1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LNET
26 #define LUSTRE_TRACEFILE_PRIVATE
27 #include "tracefile.h"
29 #include <libcfs/kp30.h>
30 #include <libcfs/libcfs.h>
32 /* XXX move things up to the top, comment */
33 union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
35 char tracefile[TRACEFILE_NAME_SIZE];
36 long long tracefile_size = TRACEFILE_SIZE;
37 static struct tracefiled_ctl trace_tctl;
38 struct semaphore trace_thread_sem;
39 static int thread_running = 0;
41 atomic_t tage_allocated = ATOMIC_INIT(0);
43 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
44 struct trace_cpu_data *tcd);
46 static inline struct trace_page *tage_from_list(struct list_head *list)
48 return list_entry(list, struct trace_page, linkage);
51 static struct trace_page *tage_alloc(int gfp)
54 struct trace_page *tage;
57 * Don't spam console with allocation failures: they will be reported
58 * by upper layer anyway.
60 gfp |= CFS_ALLOC_NOWARN;
61 page = cfs_alloc_page(gfp);
65 tage = cfs_alloc(sizeof(*tage), gfp);
72 atomic_inc(&tage_allocated);
76 static void tage_free(struct trace_page *tage)
78 __LASSERT(tage != NULL);
79 __LASSERT(tage->page != NULL);
81 cfs_free_page(tage->page);
83 atomic_dec(&tage_allocated);
86 static void tage_to_tail(struct trace_page *tage, struct list_head *queue)
88 __LASSERT(tage != NULL);
89 __LASSERT(queue != NULL);
91 list_move_tail(&tage->linkage, queue);
94 int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
95 struct list_head *stock)
100 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
101 * from here: this will lead to infinite recursion.
104 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
105 struct trace_page *tage;
107 tage = tage_alloc(gfp);
110 list_add_tail(&tage->linkage, stock);
115 /* return a page that has 'len' bytes left at the end */
116 static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
119 struct trace_page *tage;
121 if (tcd->tcd_cur_pages > 0) {
122 __LASSERT(!list_empty(&tcd->tcd_pages));
123 tage = tage_from_list(tcd->tcd_pages.prev);
124 if (tage->used + len <= CFS_PAGE_SIZE)
128 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
129 if (tcd->tcd_cur_stock_pages > 0) {
130 tage = tage_from_list(tcd->tcd_stock_pages.prev);
131 -- tcd->tcd_cur_stock_pages;
132 list_del_init(&tage->linkage);
134 tage = tage_alloc(CFS_ALLOC_ATOMIC);
137 "failure to allocate a tage (%ld)\n",
144 tage->cpu = smp_processor_id();
145 tage->type = tcd->tcd_type;
146 list_add_tail(&tage->linkage, &tcd->tcd_pages);
147 tcd->tcd_cur_pages++;
149 if (tcd->tcd_cur_pages > 8 && thread_running) {
150 struct tracefiled_ctl *tctl = &trace_tctl;
152 * wake up tracefiled to process some pages.
154 cfs_waitq_signal(&tctl->tctl_waitq);
161 static void tcd_shrink(struct trace_cpu_data *tcd)
163 int pgcount = tcd->tcd_cur_pages / 10;
164 struct page_collection pc;
165 struct trace_page *tage;
166 struct trace_page *tmp;
169 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
170 * from here: this will lead to infinite recursion.
173 printk(KERN_WARNING "debug daemon buffer overflowed; discarding"
174 " 10%% of pages (%d of %ld)\n", pgcount + 1, tcd->tcd_cur_pages);
176 CFS_INIT_LIST_HEAD(&pc.pc_pages);
177 spin_lock_init(&pc.pc_lock);
179 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
183 list_move_tail(&tage->linkage, &pc.pc_pages);
184 tcd->tcd_cur_pages--;
186 put_pages_on_tcd_daemon_list(&pc, tcd);
189 /* return a page that has 'len' bytes left at the end */
190 static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
193 struct trace_page *tage;
196 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
197 * from here: this will lead to infinite recursion.
200 if (len > CFS_PAGE_SIZE) {
202 "cowardly refusing to write %lu bytes in a page\n", len);
206 tage = trace_get_tage_try(tcd, len);
211 if (tcd->tcd_cur_pages > 0) {
212 tage = tage_from_list(tcd->tcd_pages.next);
214 tage_to_tail(tage, &tcd->tcd_pages);
219 int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
220 const char *file, const char *fn, const int line,
221 const char *format1, va_list args,
222 const char *format2, ...)
224 struct trace_cpu_data *tcd = NULL;
225 struct ptldebug_header header;
226 struct trace_page *tage;
227 /* string_buf is used only if tcd != NULL, and is always set then */
228 char *string_buf = NULL;
231 int needed = 85; /* average message length */
238 if (strchr(file, '/'))
239 file = strrchr(file, '/') + 1;
242 set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
244 tcd = trace_get_tcd();
245 if (tcd == NULL) /* arch may not log in IRQ context */
248 if (tcd->tcd_shutting_down) {
254 depth = __current_nesting_level();
255 known_size = strlen(file) + 1 + depth;
257 known_size += strlen(fn) + 1;
259 if (libcfs_debug_binary)
260 known_size += sizeof(header);
263 * '2' used because vsnprintf return real size required for output
264 * _without_ terminating NULL.
265 * if needed is to small for this format.
268 tage = trace_get_tage(tcd, needed + known_size + 1);
270 if (needed + known_size > CFS_PAGE_SIZE)
278 string_buf = (char *)cfs_page_address(tage->page)+tage->used+known_size;
280 max_nob = CFS_PAGE_SIZE - tage->used - known_size;
282 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
292 needed = vsnprintf(string_buf, max_nob, format1, ap);
298 remain = max_nob - needed;
302 va_start(ap, format2);
303 needed += vsnprintf(string_buf+needed, remain, format2, ap);
307 if (needed < max_nob) /* well. printing ok.. */
311 if (*(string_buf+needed-1) != '\n')
312 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
315 header.ph_len = known_size + needed;
316 debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
318 if (libcfs_debug_binary) {
319 memcpy(debug_buf, &header, sizeof(header));
320 tage->used += sizeof(header);
321 debug_buf += sizeof(header);
324 /* indent message according to the nesting level */
325 while (depth-- > 0) {
326 *(debug_buf++) = '.';
330 strcpy(debug_buf, file);
331 tage->used += strlen(file) + 1;
332 debug_buf += strlen(file) + 1;
335 strcpy(debug_buf, fn);
336 tage->used += strlen(fn) + 1;
337 debug_buf += strlen(fn) + 1;
340 __LASSERT(debug_buf == string_buf);
342 tage->used += needed;
343 __LASSERT (tage->used <= CFS_PAGE_SIZE);
346 if ((mask & libcfs_printk) == 0) {
347 /* no console output requested */
354 if (libcfs_console_ratelimit &&
355 cdls->cdls_next != 0 && /* not first time ever */
356 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
357 /* skipping a console message */
364 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
365 libcfs_console_max_delay
366 + cfs_time_seconds(10))) {
367 /* last timeout was a long time ago */
368 cdls->cdls_delay /= libcfs_console_backoff * 4;
370 cdls->cdls_delay *= libcfs_console_backoff;
372 if (cdls->cdls_delay < libcfs_console_min_delay)
373 cdls->cdls_delay = libcfs_console_min_delay;
374 else if (cdls->cdls_delay > libcfs_console_max_delay)
375 cdls->cdls_delay = libcfs_console_max_delay;
378 /* ensure cdls_next is never zero after it's been seen */
379 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
383 print_to_console(&header, mask, string_buf, needed, file, fn);
386 string_buf = trace_get_console_buffer();
389 if (format1 != NULL) {
391 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
394 if (format2 != NULL) {
395 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
397 va_start(ap, format2);
398 needed += vsnprintf(string_buf+needed, remain, format2, ap);
402 print_to_console(&header, mask,
403 string_buf, needed, file, fn);
405 trace_put_console_buffer(string_buf);
408 if (cdls != NULL && cdls->cdls_count != 0) {
409 string_buf = trace_get_console_buffer();
411 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
412 "Skipped %d previous similar message%s\n",
413 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
415 print_to_console(&header, mask,
416 string_buf, needed, file, fn);
418 trace_put_console_buffer(string_buf);
419 cdls->cdls_count = 0;
424 EXPORT_SYMBOL(libcfs_debug_vmsg2);
427 libcfs_assertion_failed(const char *expr, const char *file,
428 const char *func, const int line)
430 libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
431 "ASSERTION(%s) failed\n", expr);
434 EXPORT_SYMBOL(libcfs_assertion_failed);
437 trace_assertion_failed(const char *str,
438 const char *fn, const char *file, int line)
440 struct ptldebug_header hdr;
442 libcfs_panic_in_progress = 1;
443 libcfs_catastrophe = 1;
446 set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
449 print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
451 LIBCFS_PANIC("Lustre debug assertion failure\n");
457 panic_collect_pages(struct page_collection *pc)
459 /* Do the collect_pages job on a single CPU: assumes that all other
460 * CPUs have been stopped during a panic. If this isn't true for some
461 * arch, this will have to be implemented separately in each arch. */
464 struct trace_cpu_data *tcd;
466 CFS_INIT_LIST_HEAD(&pc->pc_pages);
468 tcd_for_each(tcd, i, j) {
469 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
470 tcd->tcd_cur_pages = 0;
472 if (pc->pc_want_daemon_pages) {
473 list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
474 tcd->tcd_cur_daemon_pages = 0;
479 static void collect_pages_on_cpu(void *info)
481 struct trace_cpu_data *tcd;
482 struct page_collection *pc = info;
485 spin_lock(&pc->pc_lock);
486 tcd_for_each_type_lock(tcd, i) {
487 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
488 tcd->tcd_cur_pages = 0;
489 if (pc->pc_want_daemon_pages) {
490 list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
491 tcd->tcd_cur_daemon_pages = 0;
494 spin_unlock(&pc->pc_lock);
497 static void collect_pages(struct page_collection *pc)
499 CFS_INIT_LIST_HEAD(&pc->pc_pages);
501 if (libcfs_panic_in_progress)
502 panic_collect_pages(pc);
504 trace_call_on_all_cpus(collect_pages_on_cpu, pc);
507 static void put_pages_back_on_cpu(void *info)
509 struct page_collection *pc = info;
510 struct trace_cpu_data *tcd;
511 struct list_head *cur_head;
512 struct trace_page *tage;
513 struct trace_page *tmp;
516 spin_lock(&pc->pc_lock);
517 tcd_for_each_type_lock(tcd, i) {
518 cur_head = tcd->tcd_pages.next;
520 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
522 __LASSERT_TAGE_INVARIANT(tage);
524 if (tage->cpu != smp_processor_id() || tage->type != i)
527 tage_to_tail(tage, cur_head);
528 tcd->tcd_cur_pages++;
531 spin_unlock(&pc->pc_lock);
534 static void put_pages_back(struct page_collection *pc)
536 if (!libcfs_panic_in_progress)
537 trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
540 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
541 * we have a good amount of data at all times for dumping during an LBUG, even
542 * if we have been steadily writing (and otherwise discarding) pages via the
544 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
545 struct trace_cpu_data *tcd)
547 struct trace_page *tage;
548 struct trace_page *tmp;
550 spin_lock(&pc->pc_lock);
551 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
553 __LASSERT_TAGE_INVARIANT(tage);
555 if (tage->cpu != smp_processor_id() ||
556 tage->type != tcd->tcd_type)
559 tage_to_tail(tage, &tcd->tcd_daemon_pages);
560 tcd->tcd_cur_daemon_pages++;
562 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
563 struct trace_page *victim;
565 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
566 victim = tage_from_list(tcd->tcd_daemon_pages.next);
568 __LASSERT_TAGE_INVARIANT(victim);
570 list_del(&victim->linkage);
572 tcd->tcd_cur_daemon_pages--;
575 spin_unlock(&pc->pc_lock);
578 static void put_pages_on_daemon_list_on_cpu(void *info)
580 struct trace_cpu_data *tcd;
583 tcd_for_each_type_lock(tcd, i)
584 put_pages_on_tcd_daemon_list(info, tcd);
587 static void put_pages_on_daemon_list(struct page_collection *pc)
589 trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc);
592 void trace_debug_print(void)
594 struct page_collection pc;
595 struct trace_page *tage;
596 struct trace_page *tmp;
598 spin_lock_init(&pc.pc_lock);
600 pc.pc_want_daemon_pages = 1;
602 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
606 __LASSERT_TAGE_INVARIANT(tage);
609 p = cfs_page_address(page);
610 while (p < ((char *)cfs_page_address(page) + tage->used)) {
611 struct ptldebug_header *hdr;
616 p += strlen(file) + 1;
619 len = hdr->ph_len - (p - (char *)hdr);
621 print_to_console(hdr, D_EMERG, p, len, file, fn);
626 list_del(&tage->linkage);
631 int tracefile_dump_all_pages(char *filename)
633 struct page_collection pc;
635 struct trace_page *tage;
636 struct trace_page *tmp;
641 tracefile_write_lock();
643 filp = cfs_filp_open(filename,
644 O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc);
647 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
652 spin_lock_init(&pc.pc_lock);
653 pc.pc_want_daemon_pages = 1;
655 if (list_empty(&pc.pc_pages)) {
660 /* ok, for now, just write the pages. in the future we'll be building
661 * iobufs with the pages and calling generic_direct_IO */
663 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
665 __LASSERT_TAGE_INVARIANT(tage);
667 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
668 tage->used, cfs_filp_poff(filp));
669 if (rc != (int)tage->used) {
670 printk(KERN_WARNING "wanted to write %u but wrote "
671 "%d\n", tage->used, rc);
673 __LASSERT(list_empty(&pc.pc_pages));
676 list_del(&tage->linkage);
680 rc = cfs_filp_fsync(filp);
682 printk(KERN_ERR "sync returns %d\n", rc);
684 cfs_filp_close(filp);
686 tracefile_write_unlock();
690 void trace_flush_pages(void)
692 struct page_collection pc;
693 struct trace_page *tage;
694 struct trace_page *tmp;
696 spin_lock_init(&pc.pc_lock);
698 pc.pc_want_daemon_pages = 1;
700 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
702 __LASSERT_TAGE_INVARIANT(tage);
704 list_del(&tage->linkage);
709 int trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
710 const char *usr_buffer, int usr_buffer_nob)
714 if (usr_buffer_nob > knl_buffer_nob)
717 if (copy_from_user((void *)knl_buffer,
718 (void *)usr_buffer, usr_buffer_nob))
721 nob = strnlen(knl_buffer, usr_buffer_nob);
722 while (nob-- >= 0) /* strip trailing whitespace */
723 if (!isspace(knl_buffer[nob]))
726 if (nob < 0) /* empty string */
729 if (nob == knl_buffer_nob) /* no space to terminate */
732 knl_buffer[nob + 1] = 0; /* terminate */
736 int trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
737 const char *knl_buffer, char *append)
739 /* NB if 'append' != NULL, it's a single character to append to the
740 * copied out string - usually "\n", for /proc entries and "" (i.e. a
741 * terminating zero byte) for sysctl entries */
742 int nob = strlen(knl_buffer);
744 if (nob > usr_buffer_nob)
745 nob = usr_buffer_nob;
747 if (copy_to_user(usr_buffer, knl_buffer, nob))
750 if (append != NULL && nob < usr_buffer_nob) {
751 if (copy_to_user(usr_buffer + nob, append, 1))
760 int trace_allocate_string_buffer(char **str, int nob)
762 if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
765 *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
772 void trace_free_string_buffer(char *str, int nob)
777 int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
782 rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
786 rc = trace_copyin_string(str, usr_str_nob + 1,
787 usr_str, usr_str_nob);
791 #if !defined(__WINNT__)
797 rc = tracefile_dump_all_pages(str);
799 trace_free_string_buffer(str, usr_str_nob + 1);
803 int trace_daemon_command(char *str)
807 tracefile_write_lock();
809 if (strcmp(str, "stop") == 0) {
811 memset(tracefile, 0, sizeof(tracefile));
813 } else if (strncmp(str, "size=", 5) == 0) {
814 tracefile_size = simple_strtoul(str + 5, NULL, 0);
815 if (tracefile_size < 10 || tracefile_size > 20480)
816 tracefile_size = TRACEFILE_SIZE;
818 tracefile_size <<= 20;
820 } else if (strlen(str) >= sizeof(tracefile)) {
823 } else if (str[0] != '/') {
827 strcpy(tracefile, str);
829 printk(KERN_INFO "Lustre: debug daemon will attempt to start writing "
830 "to %s (%lukB max)\n", tracefile,
831 (long)(tracefile_size >> 10));
833 trace_start_thread();
836 tracefile_write_unlock();
840 int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
845 rc = trace_allocate_string_buffer(&str, usr_str_nob + 1);
849 rc = trace_copyin_string(str, usr_str_nob + 1,
850 usr_str, usr_str_nob);
852 rc = trace_daemon_command(str);
854 trace_free_string_buffer(str, usr_str_nob + 1);
858 int trace_set_debug_mb(int mb)
863 int limit = trace_max_debug_mb();
864 struct trace_cpu_data *tcd;
866 if (mb < num_possible_cpus())
870 printk(KERN_ERR "Lustre: Refusing to set debug buffer size to "
871 "%dMB - limit is %d\n", mb, limit);
875 mb /= num_possible_cpus();
876 pages = mb << (20 - CFS_PAGE_SHIFT);
878 tracefile_write_lock();
880 tcd_for_each(tcd, i, j)
881 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
883 tracefile_write_unlock();
888 int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
893 rc = trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
897 return trace_set_debug_mb(simple_strtoul(str, NULL, 0));
900 int trace_get_debug_mb(void)
904 struct trace_cpu_data *tcd;
907 tracefile_read_lock();
909 tcd_for_each(tcd, i, j)
910 total_pages += tcd->tcd_max_pages;
912 tracefile_read_unlock();
914 return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
917 static int tracefiled(void *arg)
919 struct page_collection pc;
920 struct tracefiled_ctl *tctl = arg;
921 struct trace_page *tage;
922 struct trace_page *tmp;
923 struct ptldebug_header *hdr;
929 /* we're started late enough that we pick up init's fs context */
930 /* this is so broken in uml? what on earth is going on? */
931 cfs_daemonize("ktracefiled");
933 spin_lock_init(&pc.pc_lock);
934 complete(&tctl->tctl_start);
937 cfs_waitlink_t __wait;
939 cfs_waitlink_init(&__wait);
940 cfs_waitq_add(&tctl->tctl_waitq, &__wait);
941 set_current_state(TASK_INTERRUPTIBLE);
942 cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
943 cfs_time_seconds(1));
944 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
946 if (atomic_read(&tctl->tctl_shutdown))
949 pc.pc_want_daemon_pages = 0;
951 if (list_empty(&pc.pc_pages))
955 tracefile_read_lock();
956 if (tracefile[0] != 0) {
957 filp = cfs_filp_open(tracefile,
958 O_CREAT | O_RDWR | O_LARGEFILE,
961 printk(KERN_WARNING "couldn't open %s: %d\n",
964 tracefile_read_unlock();
966 put_pages_on_daemon_list(&pc);
967 __LASSERT(list_empty(&pc.pc_pages));
973 /* mark the first header, so we can sort in chunks */
974 tage = tage_from_list(pc.pc_pages.next);
975 __LASSERT_TAGE_INVARIANT(tage);
977 hdr = cfs_page_address(tage->page);
978 hdr->ph_flags |= PH_FLAG_FIRST_RECORD;
980 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
983 __LASSERT_TAGE_INVARIANT(tage);
985 if (f_pos >= (off_t)tracefile_size)
987 else if (f_pos > cfs_filp_size(filp))
988 f_pos = cfs_filp_size(filp);
990 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
992 if (rc != (int)tage->used) {
993 printk(KERN_WARNING "wanted to write %u but "
994 "wrote %d\n", tage->used, rc);
996 __LASSERT(list_empty(&pc.pc_pages));
1001 cfs_filp_close(filp);
1002 put_pages_on_daemon_list(&pc);
1003 __LASSERT(list_empty(&pc.pc_pages));
1005 complete(&tctl->tctl_stop);
1009 int trace_start_thread(void)
1011 struct tracefiled_ctl *tctl = &trace_tctl;
1014 mutex_down(&trace_thread_sem);
1018 init_completion(&tctl->tctl_start);
1019 init_completion(&tctl->tctl_stop);
1020 cfs_waitq_init(&tctl->tctl_waitq);
1021 atomic_set(&tctl->tctl_shutdown, 0);
1023 if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) {
1028 wait_for_completion(&tctl->tctl_start);
1031 mutex_up(&trace_thread_sem);
1035 void trace_stop_thread(void)
1037 struct tracefiled_ctl *tctl = &trace_tctl;
1039 mutex_down(&trace_thread_sem);
1040 if (thread_running) {
1041 printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n");
1042 atomic_set(&tctl->tctl_shutdown, 1);
1043 wait_for_completion(&tctl->tctl_stop);
1046 mutex_up(&trace_thread_sem);
1049 int tracefile_init(void)
1051 struct trace_cpu_data *tcd;
1057 rc = tracefile_init_arch();
1061 tcd_for_each(tcd, i, j) {
1062 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1063 factor = tcd->tcd_pages_factor;
1064 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
1065 CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1066 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1067 tcd->tcd_cur_pages = 0;
1068 tcd->tcd_cur_stock_pages = 0;
1069 tcd->tcd_cur_daemon_pages = 0;
1070 tcd->tcd_max_pages = (TCD_MAX_PAGES * factor) / 100;
1071 LASSERT(tcd->tcd_max_pages > 0);
1072 tcd->tcd_shutting_down = 0;
1078 static void trace_cleanup_on_cpu(void *info)
1080 struct trace_cpu_data *tcd;
1081 struct trace_page *tage;
1082 struct trace_page *tmp;
1085 tcd_for_each_type_lock(tcd, i) {
1086 tcd->tcd_shutting_down = 1;
1088 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1089 __LASSERT_TAGE_INVARIANT(tage);
1091 list_del(&tage->linkage);
1094 tcd->tcd_cur_pages = 0;
1098 static void trace_cleanup(void)
1100 struct page_collection pc;
1102 CFS_INIT_LIST_HEAD(&pc.pc_pages);
1103 spin_lock_init(&pc.pc_lock);
1105 trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc);
1107 tracefile_fini_arch();
1110 void tracefile_exit(void)
1112 trace_stop_thread();