1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/libcfs/tracefile.c
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LNET
44 #define LUSTRE_TRACEFILE_PRIVATE
45 #include "tracefile.h"
47 #include <libcfs/kp30.h>
48 #include <libcfs/libcfs.h>
50 /* XXX move things up to the top, comment */
51 union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
53 char tracefile[TRACEFILE_NAME_SIZE];
54 long long tracefile_size = TRACEFILE_SIZE;
55 static struct tracefiled_ctl trace_tctl;
56 struct semaphore trace_thread_sem;
57 static int thread_running = 0;
59 atomic_t tage_allocated = ATOMIC_INIT(0);
61 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
62 struct trace_cpu_data *tcd);
64 static inline struct trace_page *tage_from_list(struct list_head *list)
66 return list_entry(list, struct trace_page, linkage);
69 static struct trace_page *tage_alloc(int gfp)
72 struct trace_page *tage;
74 /* My caller is trying to free memory */
75 if (!cfs_in_interrupt() && libcfs_memory_pressure_get())
79 * Don't spam console with allocation failures: they will be reported
80 * by upper layer anyway.
82 gfp |= CFS_ALLOC_NOWARN;
83 page = cfs_alloc_page(gfp);
87 tage = cfs_alloc(sizeof(*tage), gfp);
94 atomic_inc(&tage_allocated);
98 static void tage_free(struct trace_page *tage)
100 __LASSERT(tage != NULL);
101 __LASSERT(tage->page != NULL);
103 cfs_free_page(tage->page);
105 atomic_dec(&tage_allocated);
108 static void tage_to_tail(struct trace_page *tage, struct list_head *queue)
110 __LASSERT(tage != NULL);
111 __LASSERT(queue != NULL);
113 list_move_tail(&tage->linkage, queue);
116 int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
117 struct list_head *stock)
122 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
123 * from here: this will lead to infinite recursion.
126 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
127 struct trace_page *tage;
129 tage = tage_alloc(gfp);
132 list_add_tail(&tage->linkage, stock);
137 /* return a page that has 'len' bytes left at the end */
138 static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
141 struct trace_page *tage;
143 if (tcd->tcd_cur_pages > 0) {
144 __LASSERT(!list_empty(&tcd->tcd_pages));
145 tage = tage_from_list(tcd->tcd_pages.prev);
146 if (tage->used + len <= CFS_PAGE_SIZE)
150 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
151 if (tcd->tcd_cur_stock_pages > 0) {
152 tage = tage_from_list(tcd->tcd_stock_pages.prev);
153 -- tcd->tcd_cur_stock_pages;
154 list_del_init(&tage->linkage);
156 tage = tage_alloc(CFS_ALLOC_ATOMIC);
159 "failure to allocate a tage (%ld)\n",
166 tage->cpu = smp_processor_id();
167 tage->type = tcd->tcd_type;
168 list_add_tail(&tage->linkage, &tcd->tcd_pages);
169 tcd->tcd_cur_pages++;
171 if (tcd->tcd_cur_pages > 8 && thread_running) {
172 struct tracefiled_ctl *tctl = &trace_tctl;
174 * wake up tracefiled to process some pages.
176 cfs_waitq_signal(&tctl->tctl_waitq);
183 static void tcd_shrink(struct trace_cpu_data *tcd)
185 int pgcount = tcd->tcd_cur_pages / 10;
186 struct page_collection pc;
187 struct trace_page *tage;
188 struct trace_page *tmp;
191 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
192 * from here: this will lead to infinite recursion.
195 if (printk_ratelimit())
196 printk(KERN_WARNING "debug daemon buffer overflowed; "
197 "discarding 10%% of pages (%d of %ld)\n",
198 pgcount + 1, tcd->tcd_cur_pages);
200 CFS_INIT_LIST_HEAD(&pc.pc_pages);
201 spin_lock_init(&pc.pc_lock);
203 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
207 list_move_tail(&tage->linkage, &pc.pc_pages);
208 tcd->tcd_cur_pages--;
210 put_pages_on_tcd_daemon_list(&pc, tcd);
213 /* return a page that has 'len' bytes left at the end */
214 static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
217 struct trace_page *tage;
220 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
221 * from here: this will lead to infinite recursion.
224 if (len > CFS_PAGE_SIZE) {
226 "cowardly refusing to write %lu bytes in a page\n", len);
230 tage = trace_get_tage_try(tcd, len);
235 if (tcd->tcd_cur_pages > 0) {
236 tage = tage_from_list(tcd->tcd_pages.next);
238 tage_to_tail(tage, &tcd->tcd_pages);
243 int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
244 const char *file, const char *fn, const int line,
245 const char *format1, va_list args,
246 const char *format2, ...)
248 struct trace_cpu_data *tcd = NULL;
249 struct ptldebug_header header;
250 struct trace_page *tage;
251 /* string_buf is used only if tcd != NULL, and is always set then */
252 char *string_buf = NULL;
255 int needed = 85; /* average message length */
262 if (strchr(file, '/'))
263 file = strrchr(file, '/') + 1;
265 tcd = trace_get_tcd();
267 /* fs_trace_get_tcd() grabs a lock, which disables preemption and
268 * pins us to a particular CPU. This avoids an smp_processor_id()
269 * warning on Linux when debugging is enabled. */
270 set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
272 if (tcd == NULL) /* arch may not log in IRQ context */
275 if (tcd->tcd_shutting_down) {
281 depth = __current_nesting_level();
282 known_size = strlen(file) + 1 + depth;
284 known_size += strlen(fn) + 1;
286 if (libcfs_debug_binary)
287 known_size += sizeof(header);
290 * '2' used because vsnprintf return real size required for output
291 * _without_ terminating NULL.
292 * if needed is to small for this format.
295 tage = trace_get_tage(tcd, needed + known_size + 1);
297 if (needed + known_size > CFS_PAGE_SIZE)
305 string_buf = (char *)cfs_page_address(tage->page)+tage->used+known_size;
307 max_nob = CFS_PAGE_SIZE - tage->used - known_size;
309 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
319 needed = vsnprintf(string_buf, max_nob, format1, ap);
324 remain = max_nob - needed;
328 va_start(ap, format2);
329 needed += vsnprintf(string_buf + needed, remain,
334 if (needed < max_nob) /* well. printing ok.. */
338 if (*(string_buf+needed-1) != '\n')
339 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
342 header.ph_len = known_size + needed;
343 debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
345 if (libcfs_debug_binary) {
346 memcpy(debug_buf, &header, sizeof(header));
347 tage->used += sizeof(header);
348 debug_buf += sizeof(header);
351 /* indent message according to the nesting level */
352 while (depth-- > 0) {
353 *(debug_buf++) = '.';
357 strcpy(debug_buf, file);
358 tage->used += strlen(file) + 1;
359 debug_buf += strlen(file) + 1;
362 strcpy(debug_buf, fn);
363 tage->used += strlen(fn) + 1;
364 debug_buf += strlen(fn) + 1;
367 __LASSERT(debug_buf == string_buf);
369 tage->used += needed;
370 __LASSERT (tage->used <= CFS_PAGE_SIZE);
373 if ((mask & libcfs_printk) == 0) {
374 /* no console output requested */
381 if (libcfs_console_ratelimit &&
382 cdls->cdls_next != 0 && /* not first time ever */
383 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
384 /* skipping a console message */
391 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
392 libcfs_console_max_delay
393 + cfs_time_seconds(10))) {
394 /* last timeout was a long time ago */
395 cdls->cdls_delay /= libcfs_console_backoff * 4;
397 cdls->cdls_delay *= libcfs_console_backoff;
399 if (cdls->cdls_delay < libcfs_console_min_delay)
400 cdls->cdls_delay = libcfs_console_min_delay;
401 else if (cdls->cdls_delay > libcfs_console_max_delay)
402 cdls->cdls_delay = libcfs_console_max_delay;
405 /* ensure cdls_next is never zero after it's been seen */
406 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
410 print_to_console(&header, mask, string_buf, needed, file, fn);
413 string_buf = trace_get_console_buffer();
416 if (format1 != NULL) {
418 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
421 if (format2 != NULL) {
422 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
424 va_start(ap, format2);
425 needed += vsnprintf(string_buf+needed, remain, format2, ap);
429 print_to_console(&header, mask,
430 string_buf, needed, file, fn);
432 trace_put_console_buffer(string_buf);
435 if (cdls != NULL && cdls->cdls_count != 0) {
436 string_buf = trace_get_console_buffer();
438 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
439 "Skipped %d previous similar message%s\n",
440 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
442 print_to_console(&header, mask,
443 string_buf, needed, file, fn);
445 trace_put_console_buffer(string_buf);
446 cdls->cdls_count = 0;
451 EXPORT_SYMBOL(libcfs_debug_vmsg2);
454 libcfs_assertion_failed(const char *expr, const char *file,
455 const char *func, const int line)
457 libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
458 "ASSERTION(%s) failed\n", expr);
459 lbug_with_loc(file, func, line);
461 EXPORT_SYMBOL(libcfs_assertion_failed);
464 trace_assertion_failed(const char *str,
465 const char *fn, const char *file, int line)
467 struct ptldebug_header hdr;
469 libcfs_panic_in_progress = 1;
470 libcfs_catastrophe = 1;
473 set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
476 print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
478 LIBCFS_PANIC("Lustre debug assertion failure\n");
484 panic_collect_pages(struct page_collection *pc)
486 /* Do the collect_pages job on a single CPU: assumes that all other
487 * CPUs have been stopped during a panic. If this isn't true for some
488 * arch, this will have to be implemented separately in each arch. */
491 struct trace_cpu_data *tcd;
493 CFS_INIT_LIST_HEAD(&pc->pc_pages);
495 tcd_for_each(tcd, i, j) {
496 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
497 tcd->tcd_cur_pages = 0;
499 if (pc->pc_want_daemon_pages) {
500 list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
501 tcd->tcd_cur_daemon_pages = 0;
506 static void collect_pages_on_all_cpus(struct page_collection *pc)
508 struct trace_cpu_data *tcd;
511 spin_lock(&pc->pc_lock);
512 for_each_possible_cpu(cpu) {
513 tcd_for_each_type_lock(tcd, i, cpu) {
514 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
515 tcd->tcd_cur_pages = 0;
516 if (pc->pc_want_daemon_pages) {
517 list_splice_init(&tcd->tcd_daemon_pages,
519 tcd->tcd_cur_daemon_pages = 0;
523 spin_unlock(&pc->pc_lock);
526 static void collect_pages(struct page_collection *pc)
528 CFS_INIT_LIST_HEAD(&pc->pc_pages);
530 if (libcfs_panic_in_progress)
531 panic_collect_pages(pc);
533 collect_pages_on_all_cpus(pc);
536 static void put_pages_back_on_all_cpus(struct page_collection *pc)
538 struct trace_cpu_data *tcd;
539 struct list_head *cur_head;
540 struct trace_page *tage;
541 struct trace_page *tmp;
544 spin_lock(&pc->pc_lock);
545 for_each_possible_cpu(cpu) {
546 tcd_for_each_type_lock(tcd, i, cpu) {
547 cur_head = tcd->tcd_pages.next;
549 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
552 __LASSERT_TAGE_INVARIANT(tage);
554 if (tage->cpu != cpu || tage->type != i)
557 tage_to_tail(tage, cur_head);
558 tcd->tcd_cur_pages++;
562 spin_unlock(&pc->pc_lock);
565 static void put_pages_back(struct page_collection *pc)
567 if (!libcfs_panic_in_progress)
568 put_pages_back_on_all_cpus(pc);
571 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
572 * we have a good amount of data at all times for dumping during an LBUG, even
573 * if we have been steadily writing (and otherwise discarding) pages via the
575 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
576 struct trace_cpu_data *tcd)
578 struct trace_page *tage;
579 struct trace_page *tmp;
581 spin_lock(&pc->pc_lock);
582 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
584 __LASSERT_TAGE_INVARIANT(tage);
586 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
589 tage_to_tail(tage, &tcd->tcd_daemon_pages);
590 tcd->tcd_cur_daemon_pages++;
592 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
593 struct trace_page *victim;
595 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
596 victim = tage_from_list(tcd->tcd_daemon_pages.next);
598 __LASSERT_TAGE_INVARIANT(victim);
600 list_del(&victim->linkage);
602 tcd->tcd_cur_daemon_pages--;
605 spin_unlock(&pc->pc_lock);
608 static void put_pages_on_daemon_list(struct page_collection *pc)
610 struct trace_cpu_data *tcd;
613 for_each_possible_cpu(cpu) {
614 tcd_for_each_type_lock(tcd, i, cpu)
615 put_pages_on_tcd_daemon_list(pc, tcd);
619 void trace_debug_print(void)
621 struct page_collection pc;
622 struct trace_page *tage;
623 struct trace_page *tmp;
625 spin_lock_init(&pc.pc_lock);
627 pc.pc_want_daemon_pages = 1;
629 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
633 __LASSERT_TAGE_INVARIANT(tage);
636 p = cfs_page_address(page);
637 while (p < ((char *)cfs_page_address(page) + tage->used)) {
638 struct ptldebug_header *hdr;
643 p += strlen(file) + 1;
646 len = hdr->ph_len - (p - (char *)hdr);
648 print_to_console(hdr, D_EMERG, p, len, file, fn);
653 list_del(&tage->linkage);
658 int tracefile_dump_all_pages(char *filename)
660 struct page_collection pc;
662 struct trace_page *tage;
663 struct trace_page *tmp;
668 tracefile_write_lock();
670 filp = cfs_filp_open(filename,
671 O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc);
674 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
679 spin_lock_init(&pc.pc_lock);
680 pc.pc_want_daemon_pages = 1;
682 if (list_empty(&pc.pc_pages)) {
687 /* ok, for now, just write the pages. in the future we'll be building
688 * iobufs with the pages and calling generic_direct_IO */
690 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
692 __LASSERT_TAGE_INVARIANT(tage);
694 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
695 tage->used, cfs_filp_poff(filp));
696 if (rc != (int)tage->used) {
697 printk(KERN_WARNING "wanted to write %u but wrote "
698 "%d\n", tage->used, rc);
700 __LASSERT(list_empty(&pc.pc_pages));
703 list_del(&tage->linkage);
707 rc = cfs_filp_fsync(filp);
709 printk(KERN_ERR "sync returns %d\n", rc);
711 cfs_filp_close(filp);
713 tracefile_write_unlock();
717 void trace_flush_pages(void)
719 struct page_collection pc;
720 struct trace_page *tage;
721 struct trace_page *tmp;
723 spin_lock_init(&pc.pc_lock);
725 pc.pc_want_daemon_pages = 1;
727 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
729 __LASSERT_TAGE_INVARIANT(tage);
731 list_del(&tage->linkage);
736 int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
737 const char *usr_buffer, int usr_buffer_nob)
741 if (usr_buffer_nob > knl_buffer_nob)
744 if (copy_from_user((void *)knl_buffer,
745 (void *)usr_buffer, usr_buffer_nob))
748 nob = strnlen(knl_buffer, usr_buffer_nob);
749 while (nob-- >= 0) /* strip trailing whitespace */
750 if (!isspace(knl_buffer[nob]))
753 if (nob < 0) /* empty string */
756 if (nob == knl_buffer_nob) /* no space to terminate */
759 knl_buffer[nob + 1] = 0; /* terminate */
763 int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
764 const char *knl_buffer, char *append)
766 /* NB if 'append' != NULL, it's a single character to append to the
767 * copied out string - usually "\n", for /proc entries and "" (i.e. a
768 * terminating zero byte) for sysctl entries */
769 int nob = strlen(knl_buffer);
771 if (nob > usr_buffer_nob)
772 nob = usr_buffer_nob;
774 if (copy_to_user(usr_buffer, knl_buffer, nob))
777 if (append != NULL && nob < usr_buffer_nob) {
778 if (copy_to_user(usr_buffer + nob, append, 1))
786 EXPORT_SYMBOL(trace_copyout_string);
788 int trace_allocate_string_buffer(char **str, int nob)
790 if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
793 *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
800 void trace_free_string_buffer(char *str, int nob)
805 int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
810 rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
814 rc = trace_copyin_string(str, usr_str_nob + 1,
815 usr_str, usr_str_nob);
819 #if !defined(__WINNT__)
825 rc = tracefile_dump_all_pages(str);
827 trace_free_string_buffer(str, usr_str_nob + 1);
831 int trace_daemon_command(char *str)
835 tracefile_write_lock();
837 if (strcmp(str, "stop") == 0) {
838 tracefile_write_unlock();
840 tracefile_write_lock();
841 memset(tracefile, 0, sizeof(tracefile));
843 } else if (strncmp(str, "size=", 5) == 0) {
844 tracefile_size = simple_strtoul(str + 5, NULL, 0);
845 if (tracefile_size < 10 || tracefile_size > 20480)
846 tracefile_size = TRACEFILE_SIZE;
848 tracefile_size <<= 20;
850 } else if (strlen(str) >= sizeof(tracefile)) {
853 } else if (str[0] != '/') {
857 strcpy(tracefile, str);
859 printk(KERN_INFO "Lustre: debug daemon will attempt to start writing "
860 "to %s (%lukB max)\n", tracefile,
861 (long)(tracefile_size >> 10));
863 trace_start_thread();
866 tracefile_write_unlock();
870 int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
875 rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
879 rc = trace_copyin_string(str, usr_str_nob + 1,
880 usr_str, usr_str_nob);
882 rc = trace_daemon_command(str);
884 trace_free_string_buffer(str, usr_str_nob + 1);
889 int trace_set_debug_mb(int mb)
894 int limit = trace_max_debug_mb();
895 struct trace_cpu_data *tcd;
897 if (mb < num_possible_cpus()) {
898 printk(KERN_ERR "Cannot set debug_mb to %d, "
899 "the value should be >= %d\n",
900 mb, num_possible_cpus());
905 printk(KERN_ERR "Lustre: Refusing to set debug buffer size to "
906 "%dMB - limit is %d\n", mb, limit);
910 mb /= num_possible_cpus();
911 pages = mb << (20 - CFS_PAGE_SHIFT);
913 tracefile_write_lock();
915 tcd_for_each(tcd, i, j)
916 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
918 tracefile_write_unlock();
923 int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
928 rc = trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
932 return trace_set_debug_mb(simple_strtoul(str, NULL, 0));
935 int trace_get_debug_mb(void)
939 struct trace_cpu_data *tcd;
942 tracefile_read_lock();
944 tcd_for_each(tcd, i, j)
945 total_pages += tcd->tcd_max_pages;
947 tracefile_read_unlock();
949 return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
952 static int tracefiled(void *arg)
954 struct page_collection pc;
955 struct tracefiled_ctl *tctl = arg;
956 struct trace_page *tage;
957 struct trace_page *tmp;
958 struct ptldebug_header *hdr;
965 /* we're started late enough that we pick up init's fs context */
966 /* this is so broken in uml? what on earth is going on? */
967 cfs_daemonize("ktracefiled");
969 spin_lock_init(&pc.pc_lock);
970 complete(&tctl->tctl_start);
973 cfs_waitlink_t __wait;
975 pc.pc_want_daemon_pages = 0;
977 if (list_empty(&pc.pc_pages))
981 tracefile_read_lock();
982 if (tracefile[0] != 0) {
983 filp = cfs_filp_open(tracefile,
984 O_CREAT | O_RDWR | O_LARGEFILE,
987 printk(KERN_WARNING "couldn't open %s: %d\n",
990 tracefile_read_unlock();
992 put_pages_on_daemon_list(&pc);
993 __LASSERT(list_empty(&pc.pc_pages));
999 /* mark the first header, so we can sort in chunks */
1000 tage = tage_from_list(pc.pc_pages.next);
1001 __LASSERT_TAGE_INVARIANT(tage);
1003 hdr = cfs_page_address(tage->page);
1004 hdr->ph_flags |= PH_FLAG_FIRST_RECORD;
1006 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1007 static loff_t f_pos;
1009 __LASSERT_TAGE_INVARIANT(tage);
1011 if (f_pos >= (off_t)tracefile_size)
1013 else if (f_pos > cfs_filp_size(filp))
1014 f_pos = cfs_filp_size(filp);
1016 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
1017 tage->used, &f_pos);
1018 if (rc != (int)tage->used) {
1019 printk(KERN_WARNING "wanted to write %u but "
1020 "wrote %d\n", tage->used, rc);
1021 put_pages_back(&pc);
1022 __LASSERT(list_empty(&pc.pc_pages));
1027 cfs_filp_close(filp);
1028 put_pages_on_daemon_list(&pc);
1029 if (!list_empty(&pc.pc_pages)) {
1032 printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
1033 printk(KERN_ALERT "total cpus(%d): ", num_possible_cpus());
1034 for (i = 0; i < num_possible_cpus(); i++)
1036 printk(KERN_ALERT "%d(on) ", i);
1038 printk(KERN_ALERT "%d(off) ", i);
1039 printk(KERN_ALERT "\n");
1042 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1044 printk(KERN_ALERT "page %d belongs to cpu %d\n",
1046 printk(KERN_ALERT "There are %d pages unwritten\n", i);
1048 __LASSERT(list_empty(&pc.pc_pages));
1050 if (atomic_read(&tctl->tctl_shutdown)) {
1051 if (last_loop == 0) {
1058 cfs_waitlink_init(&__wait);
1059 cfs_waitq_add(&tctl->tctl_waitq, &__wait);
1060 set_current_state(TASK_INTERRUPTIBLE);
1061 cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
1062 cfs_time_seconds(1));
1063 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
1065 complete(&tctl->tctl_stop);
1069 int trace_start_thread(void)
1071 struct tracefiled_ctl *tctl = &trace_tctl;
1074 mutex_down(&trace_thread_sem);
1078 init_completion(&tctl->tctl_start);
1079 init_completion(&tctl->tctl_stop);
1080 cfs_waitq_init(&tctl->tctl_waitq);
1081 atomic_set(&tctl->tctl_shutdown, 0);
1083 if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) {
1088 wait_for_completion(&tctl->tctl_start);
1091 mutex_up(&trace_thread_sem);
1095 void trace_stop_thread(void)
1097 struct tracefiled_ctl *tctl = &trace_tctl;
1099 mutex_down(&trace_thread_sem);
1100 if (thread_running) {
1101 printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n");
1102 atomic_set(&tctl->tctl_shutdown, 1);
1103 wait_for_completion(&tctl->tctl_stop);
1106 mutex_up(&trace_thread_sem);
1109 int tracefile_init(int max_pages)
1111 struct trace_cpu_data *tcd;
1117 rc = tracefile_init_arch();
1121 tcd_for_each(tcd, i, j) {
1122 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1123 factor = tcd->tcd_pages_factor;
1124 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
1125 CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1126 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1127 tcd->tcd_cur_pages = 0;
1128 tcd->tcd_cur_stock_pages = 0;
1129 tcd->tcd_cur_daemon_pages = 0;
1130 tcd->tcd_max_pages = (max_pages * factor) / 100;
1131 LASSERT(tcd->tcd_max_pages > 0);
1132 tcd->tcd_shutting_down = 0;
1138 static void trace_cleanup_on_all_cpus(void)
1140 struct trace_cpu_data *tcd;
1141 struct trace_page *tage;
1142 struct trace_page *tmp;
1145 for_each_possible_cpu(cpu) {
1146 tcd_for_each_type_lock(tcd, i, cpu) {
1147 tcd->tcd_shutting_down = 1;
1149 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1151 __LASSERT_TAGE_INVARIANT(tage);
1153 list_del(&tage->linkage);
1157 tcd->tcd_cur_pages = 0;
1162 static void trace_cleanup(void)
1164 struct page_collection pc;
1166 CFS_INIT_LIST_HEAD(&pc.pc_pages);
1167 spin_lock_init(&pc.pc_lock);
1169 trace_cleanup_on_all_cpus();
1171 tracefile_fini_arch();
1174 void tracefile_exit(void)
1176 trace_stop_thread();