4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
43 #include <linux/ctype.h>
45 #include <linux/kthread.h>
46 #include <linux/pagemap.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
51 /* XXX move things up to the top, comment */
52 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
54 char cfs_tracefile[TRACEFILE_NAME_SIZE];
55 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
56 static struct tracefiled_ctl trace_tctl;
57 static DEFINE_MUTEX(cfs_trace_thread_mutex);
58 static int thread_running = 0;
60 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
62 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
63 struct cfs_trace_cpu_data *tcd);
65 static inline struct cfs_trace_page *
66 cfs_tage_from_list(struct list_head *list)
68 return list_entry(list, struct cfs_trace_page, linkage);
71 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
74 struct cfs_trace_page *tage;
76 /* My caller is trying to free memory */
77 if (!in_interrupt() && memory_pressure_get())
81 * Don't spam console with allocation failures: they will be reported
82 * by upper layer anyway.
85 page = alloc_page(gfp);
89 tage = kmalloc(sizeof(*tage), gfp);
96 atomic_inc(&cfs_tage_allocated);
100 static void cfs_tage_free(struct cfs_trace_page *tage)
102 __LASSERT(tage != NULL);
103 __LASSERT(tage->page != NULL);
105 __free_page(tage->page);
107 atomic_dec(&cfs_tage_allocated);
110 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
111 struct list_head *queue)
113 __LASSERT(tage != NULL);
114 __LASSERT(queue != NULL);
116 list_move_tail(&tage->linkage, queue);
119 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
120 struct list_head *stock)
125 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
126 * from here: this will lead to infinite recursion.
129 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
130 struct cfs_trace_page *tage;
132 tage = cfs_tage_alloc(gfp);
135 list_add_tail(&tage->linkage, stock);
140 /* return a page that has 'len' bytes left at the end */
141 static struct cfs_trace_page *
142 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
144 struct cfs_trace_page *tage;
146 if (tcd->tcd_cur_pages > 0) {
147 __LASSERT(!list_empty(&tcd->tcd_pages));
148 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
149 if (tage->used + len <= PAGE_SIZE)
153 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
154 if (tcd->tcd_cur_stock_pages > 0) {
155 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
156 --tcd->tcd_cur_stock_pages;
157 list_del_init(&tage->linkage);
159 tage = cfs_tage_alloc(GFP_ATOMIC);
160 if (unlikely(tage == NULL)) {
161 if ((!memory_pressure_get() ||
162 in_interrupt()) && printk_ratelimit())
164 "cannot allocate a tage (%ld)\n",
171 tage->cpu = smp_processor_id();
172 tage->type = tcd->tcd_type;
173 list_add_tail(&tage->linkage, &tcd->tcd_pages);
174 tcd->tcd_cur_pages++;
176 if (tcd->tcd_cur_pages > 8 && thread_running) {
177 struct tracefiled_ctl *tctl = &trace_tctl;
179 * wake up tracefiled to process some pages.
181 wake_up(&tctl->tctl_waitq);
188 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
190 int pgcount = tcd->tcd_cur_pages / 10;
191 struct page_collection pc;
192 struct cfs_trace_page *tage;
193 struct cfs_trace_page *tmp;
196 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
197 * from here: this will lead to infinite recursion.
200 if (printk_ratelimit())
201 printk(KERN_WARNING "debug daemon buffer overflowed; "
202 "discarding 10%% of pages (%d of %ld)\n",
203 pgcount + 1, tcd->tcd_cur_pages);
205 INIT_LIST_HEAD(&pc.pc_pages);
207 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
211 list_move_tail(&tage->linkage, &pc.pc_pages);
212 tcd->tcd_cur_pages--;
214 put_pages_on_tcd_daemon_list(&pc, tcd);
217 /* return a page that has 'len' bytes left at the end */
218 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
221 struct cfs_trace_page *tage;
224 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
225 * from here: this will lead to infinite recursion.
228 if (len > PAGE_SIZE) {
230 "cowardly refusing to write %lu bytes in a page\n", len);
234 tage = cfs_trace_get_tage_try(tcd, len);
239 if (tcd->tcd_cur_pages > 0) {
240 tage = cfs_tage_from_list(tcd->tcd_pages.next);
242 cfs_tage_to_tail(tage, &tcd->tcd_pages);
247 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
248 const char *format, ...)
253 va_start(args, format);
254 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
259 EXPORT_SYMBOL(libcfs_debug_msg);
261 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
262 const char *format1, va_list args,
263 const char *format2, ...)
265 struct cfs_trace_cpu_data *tcd = NULL;
266 struct ptldebug_header header = {0};
267 struct cfs_trace_page *tage;
268 /* string_buf is used only if tcd != NULL, and is always set then */
269 char *string_buf = NULL;
272 int needed = 85; /* average message length */
277 int mask = msgdata->msg_mask;
278 char *file = (char *)msgdata->msg_file;
279 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
281 if (strchr(file, '/'))
282 file = strrchr(file, '/') + 1;
284 tcd = cfs_trace_get_tcd();
286 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
287 * pins us to a particular CPU. This avoids an smp_processor_id()
288 * warning on Linux when debugging is enabled. */
289 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
291 if (tcd == NULL) /* arch may not log in IRQ context */
294 if (tcd->tcd_cur_pages == 0)
295 header.ph_flags |= PH_FLAG_FIRST_RECORD;
297 if (tcd->tcd_shutting_down) {
298 cfs_trace_put_tcd(tcd);
303 known_size = strlen(file) + 1;
305 known_size += strlen(msgdata->msg_fn) + 1;
307 if (libcfs_debug_binary)
308 known_size += sizeof(header);
311 * '2' used because vsnprintf return real size required for output
312 * _without_ terminating NULL.
313 * if needed is to small for this format.
315 for (i = 0; i < 2; i++) {
316 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
318 if (needed + known_size > PAGE_SIZE)
321 cfs_trace_put_tcd(tcd);
326 string_buf = (char *)page_address(tage->page) +
327 tage->used + known_size;
329 max_nob = PAGE_SIZE - tage->used - known_size;
331 printk(KERN_EMERG "negative max_nob: %d\n",
334 cfs_trace_put_tcd(tcd);
342 needed = vsnprintf(string_buf, max_nob, format1, ap);
347 remain = max_nob - needed;
351 va_start(ap, format2);
352 needed += vsnprintf(string_buf + needed, remain,
357 if (needed < max_nob) /* well. printing ok.. */
361 if (*(string_buf+needed-1) != '\n')
362 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
363 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
365 header.ph_len = known_size + needed;
366 debug_buf = (char *)page_address(tage->page) + tage->used;
368 if (libcfs_debug_binary) {
369 memcpy(debug_buf, &header, sizeof(header));
370 tage->used += sizeof(header);
371 debug_buf += sizeof(header);
374 strcpy(debug_buf, file);
375 tage->used += strlen(file) + 1;
376 debug_buf += strlen(file) + 1;
378 if (msgdata->msg_fn) {
379 strcpy(debug_buf, msgdata->msg_fn);
380 tage->used += strlen(msgdata->msg_fn) + 1;
381 debug_buf += strlen(msgdata->msg_fn) + 1;
384 __LASSERT(debug_buf == string_buf);
386 tage->used += needed;
387 __LASSERT(tage->used <= PAGE_SIZE);
390 if ((mask & libcfs_printk) == 0) {
391 /* no console output requested */
393 cfs_trace_put_tcd(tcd);
398 if (libcfs_console_ratelimit &&
399 cdls->cdls_next != 0 && /* not first time ever */
400 time_after(jiffies, cdls->cdls_next)) {
401 /* skipping a console message */
404 cfs_trace_put_tcd(tcd);
408 if (time_after(jiffies, cdls->cdls_next +
409 libcfs_console_max_delay +
410 cfs_time_seconds(10))) {
411 /* last timeout was a long time ago */
412 cdls->cdls_delay /= libcfs_console_backoff * 4;
414 cdls->cdls_delay *= libcfs_console_backoff;
417 if (cdls->cdls_delay < libcfs_console_min_delay)
418 cdls->cdls_delay = libcfs_console_min_delay;
419 else if (cdls->cdls_delay > libcfs_console_max_delay)
420 cdls->cdls_delay = libcfs_console_max_delay;
422 /* ensure cdls_next is never zero after it's been seen */
423 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
427 cfs_print_to_console(&header, mask, string_buf, needed, file,
429 cfs_trace_put_tcd(tcd);
431 string_buf = cfs_trace_get_console_buffer();
434 if (format1 != NULL) {
436 needed = vsnprintf(string_buf,
437 CFS_TRACE_CONSOLE_BUFFER_SIZE,
441 if (format2 != NULL) {
442 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
444 va_start(ap, format2);
445 needed += vsnprintf(string_buf+needed, remain,
450 cfs_print_to_console(&header, mask,
451 string_buf, needed, file, msgdata->msg_fn);
456 if (cdls != NULL && cdls->cdls_count != 0) {
457 string_buf = cfs_trace_get_console_buffer();
459 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
460 "Skipped %d previous similar message%s\n",
462 (cdls->cdls_count > 1) ? "s" : "");
464 cfs_print_to_console(&header, mask,
465 string_buf, needed, file, msgdata->msg_fn);
468 cdls->cdls_count = 0;
473 EXPORT_SYMBOL(libcfs_debug_vmsg2);
476 cfs_trace_assertion_failed(const char *str,
477 struct libcfs_debug_msg_data *msgdata)
479 struct ptldebug_header hdr;
481 libcfs_panic_in_progress = 1;
482 libcfs_catastrophe = 1;
485 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
487 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
488 msgdata->msg_file, msgdata->msg_fn);
490 panic("Lustre debug assertion failure\n");
496 panic_collect_pages(struct page_collection *pc)
498 /* Do the collect_pages job on a single CPU: assumes that all other
499 * CPUs have been stopped during a panic. If this isn't true for some
500 * arch, this will have to be implemented separately in each arch. */
503 struct cfs_trace_cpu_data *tcd;
505 INIT_LIST_HEAD(&pc->pc_pages);
507 cfs_tcd_for_each(tcd, i, j) {
508 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
509 tcd->tcd_cur_pages = 0;
511 if (pc->pc_want_daemon_pages) {
512 list_splice_init(&tcd->tcd_daemon_pages,
514 tcd->tcd_cur_daemon_pages = 0;
519 static void collect_pages_on_all_cpus(struct page_collection *pc)
521 struct cfs_trace_cpu_data *tcd;
524 for_each_possible_cpu(cpu) {
525 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
526 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
527 tcd->tcd_cur_pages = 0;
528 if (pc->pc_want_daemon_pages) {
529 list_splice_init(&tcd->tcd_daemon_pages,
531 tcd->tcd_cur_daemon_pages = 0;
537 static void collect_pages(struct page_collection *pc)
539 INIT_LIST_HEAD(&pc->pc_pages);
541 if (libcfs_panic_in_progress)
542 panic_collect_pages(pc);
544 collect_pages_on_all_cpus(pc);
547 static void put_pages_back_on_all_cpus(struct page_collection *pc)
549 struct cfs_trace_cpu_data *tcd;
550 struct list_head *cur_head;
551 struct cfs_trace_page *tage;
552 struct cfs_trace_page *tmp;
555 for_each_possible_cpu(cpu) {
556 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
557 cur_head = tcd->tcd_pages.next;
559 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
562 __LASSERT_TAGE_INVARIANT(tage);
564 if (tage->cpu != cpu || tage->type != i)
567 cfs_tage_to_tail(tage, cur_head);
568 tcd->tcd_cur_pages++;
574 static void put_pages_back(struct page_collection *pc)
576 if (!libcfs_panic_in_progress)
577 put_pages_back_on_all_cpus(pc);
580 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
581 * we have a good amount of data at all times for dumping during an LBUG, even
582 * if we have been steadily writing (and otherwise discarding) pages via the
584 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
585 struct cfs_trace_cpu_data *tcd)
587 struct cfs_trace_page *tage;
588 struct cfs_trace_page *tmp;
590 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
591 __LASSERT_TAGE_INVARIANT(tage);
593 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
596 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
597 tcd->tcd_cur_daemon_pages++;
599 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
600 struct cfs_trace_page *victim;
602 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
603 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
605 __LASSERT_TAGE_INVARIANT(victim);
607 list_del(&victim->linkage);
608 cfs_tage_free(victim);
609 tcd->tcd_cur_daemon_pages--;
614 static void put_pages_on_daemon_list(struct page_collection *pc)
616 struct cfs_trace_cpu_data *tcd;
619 for_each_possible_cpu(cpu) {
620 cfs_tcd_for_each_type_lock(tcd, i, cpu)
621 put_pages_on_tcd_daemon_list(pc, tcd);
625 void cfs_trace_debug_print(void)
627 struct page_collection pc;
628 struct cfs_trace_page *tage;
629 struct cfs_trace_page *tmp;
631 pc.pc_want_daemon_pages = 1;
633 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
637 __LASSERT_TAGE_INVARIANT(tage);
640 p = page_address(page);
641 while (p < ((char *)page_address(page) + tage->used)) {
642 struct ptldebug_header *hdr;
647 p += strlen(file) + 1;
650 len = hdr->ph_len - (int)(p - (char *)hdr);
652 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
657 list_del(&tage->linkage);
662 int cfs_tracefile_dump_all_pages(char *filename)
664 struct page_collection pc;
666 struct cfs_trace_page *tage;
667 struct cfs_trace_page *tmp;
671 cfs_tracefile_write_lock();
673 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
677 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
682 pc.pc_want_daemon_pages = 1;
684 if (list_empty(&pc.pc_pages)) {
689 /* ok, for now, just write the pages. in the future we'll be building
690 * iobufs with the pages and calling generic_direct_IO */
691 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
693 __LASSERT_TAGE_INVARIANT(tage);
695 buf = kmap(tage->page);
696 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
698 if (rc != (int)tage->used) {
699 printk(KERN_WARNING "wanted to write %u but wrote "
700 "%d\n", tage->used, rc);
702 __LASSERT(list_empty(&pc.pc_pages));
705 list_del(&tage->linkage);
709 rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
711 printk(KERN_ERR "sync returns %d\n", rc);
713 filp_close(filp, NULL);
715 cfs_tracefile_write_unlock();
719 void cfs_trace_flush_pages(void)
721 struct page_collection pc;
722 struct cfs_trace_page *tage;
723 struct cfs_trace_page *tmp;
725 pc.pc_want_daemon_pages = 1;
727 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
729 __LASSERT_TAGE_INVARIANT(tage);
731 list_del(&tage->linkage);
736 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
737 const char __user *usr_buffer, int usr_buffer_nob)
741 if (usr_buffer_nob > knl_buffer_nob)
744 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
747 nob = strnlen(knl_buffer, usr_buffer_nob);
748 while (nob-- >= 0) /* strip trailing whitespace */
749 if (!isspace(knl_buffer[nob]))
752 if (nob < 0) /* empty string */
755 if (nob == knl_buffer_nob) /* no space to terminate */
758 knl_buffer[nob + 1] = 0; /* terminate */
761 EXPORT_SYMBOL(cfs_trace_copyin_string);
763 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
764 const char *knl_buffer, char *append)
766 /* NB if 'append' != NULL, it's a single character to append to the
767 * copied out string - usually "\n", for /proc entries and "" (i.e. a
768 * terminating zero byte) for sysctl entries */
769 int nob = strlen(knl_buffer);
771 if (nob > usr_buffer_nob)
772 nob = usr_buffer_nob;
774 if (copy_to_user(usr_buffer, knl_buffer, nob))
777 if (append != NULL && nob < usr_buffer_nob) {
778 if (copy_to_user(usr_buffer + nob, append, 1))
786 EXPORT_SYMBOL(cfs_trace_copyout_string);
788 int cfs_trace_allocate_string_buffer(char **str, int nob)
790 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
793 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
800 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
805 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
809 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
810 usr_str, usr_str_nob);
818 rc = cfs_tracefile_dump_all_pages(str);
824 int cfs_trace_daemon_command(char *str)
828 cfs_tracefile_write_lock();
830 if (strcmp(str, "stop") == 0) {
831 cfs_tracefile_write_unlock();
832 cfs_trace_stop_thread();
833 cfs_tracefile_write_lock();
834 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
836 } else if (strncmp(str, "size=", 5) == 0) {
837 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
838 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
839 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
841 cfs_tracefile_size <<= 20;
843 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
845 } else if (str[0] != '/') {
848 strcpy(cfs_tracefile, str);
851 "Lustre: debug daemon will attempt to start writing "
852 "to %s (%lukB max)\n", cfs_tracefile,
853 (long)(cfs_tracefile_size >> 10));
855 cfs_trace_start_thread();
858 cfs_tracefile_write_unlock();
862 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
867 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
871 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
872 usr_str, usr_str_nob);
874 rc = cfs_trace_daemon_command(str);
880 int cfs_trace_set_debug_mb(int mb)
885 int limit = cfs_trace_max_debug_mb();
886 struct cfs_trace_cpu_data *tcd;
888 if (mb < num_possible_cpus()) {
890 "Lustre: %d MB is too small for debug buffer size, "
891 "setting it to %d MB.\n", mb, num_possible_cpus());
892 mb = num_possible_cpus();
897 "Lustre: %d MB is too large for debug buffer size, "
898 "setting it to %d MB.\n", mb, limit);
902 mb /= num_possible_cpus();
903 pages = mb << (20 - PAGE_SHIFT);
905 cfs_tracefile_write_lock();
907 cfs_tcd_for_each(tcd, i, j)
908 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
910 cfs_tracefile_write_unlock();
915 int cfs_trace_get_debug_mb(void)
919 struct cfs_trace_cpu_data *tcd;
922 cfs_tracefile_read_lock();
924 cfs_tcd_for_each(tcd, i, j)
925 total_pages += tcd->tcd_max_pages;
927 cfs_tracefile_read_unlock();
929 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
932 static int tracefiled(void *arg)
934 struct page_collection pc;
935 struct tracefiled_ctl *tctl = arg;
936 struct cfs_trace_page *tage;
937 struct cfs_trace_page *tmp;
943 /* we're started late enough that we pick up init's fs context */
944 /* this is so broken in uml? what on earth is going on? */
946 complete(&tctl->tctl_start);
949 wait_queue_entry_t __wait;
951 pc.pc_want_daemon_pages = 0;
953 if (list_empty(&pc.pc_pages))
957 cfs_tracefile_read_lock();
958 if (cfs_tracefile[0] != 0) {
959 filp = filp_open(cfs_tracefile,
960 O_CREAT | O_RDWR | O_LARGEFILE,
965 printk(KERN_WARNING "couldn't open %s: "
966 "%d\n", cfs_tracefile, rc);
969 cfs_tracefile_read_unlock();
971 put_pages_on_daemon_list(&pc);
972 __LASSERT(list_empty(&pc.pc_pages));
976 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
977 struct dentry *de = file_dentry(filp);
980 __LASSERT_TAGE_INVARIANT(tage);
982 if (f_pos >= (off_t)cfs_tracefile_size)
984 else if (f_pos > i_size_read(de->d_inode))
985 f_pos = i_size_read(de->d_inode);
987 buf = kmap(tage->page);
988 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
990 if (rc != (int)tage->used) {
991 printk(KERN_WARNING "wanted to write %u "
992 "but wrote %d\n", tage->used, rc);
994 __LASSERT(list_empty(&pc.pc_pages));
999 filp_close(filp, NULL);
1000 put_pages_on_daemon_list(&pc);
1001 if (!list_empty(&pc.pc_pages)) {
1004 printk(KERN_ALERT "Lustre: trace pages aren't "
1006 printk(KERN_ERR "total cpus(%d): ",
1007 num_possible_cpus());
1008 for (i = 0; i < num_possible_cpus(); i++)
1010 printk(KERN_ERR "%d(on) ", i);
1012 printk(KERN_ERR "%d(off) ", i);
1013 printk(KERN_ERR "\n");
1016 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1018 printk(KERN_ERR "page %d belongs to cpu "
1019 "%d\n", ++i, tage->cpu);
1020 printk(KERN_ERR "There are %d pages unwritten\n",
1023 __LASSERT(list_empty(&pc.pc_pages));
1025 if (atomic_read(&tctl->tctl_shutdown)) {
1026 if (last_loop == 0) {
1033 init_waitqueue_entry(&__wait, current);
1034 add_wait_queue(&tctl->tctl_waitq, &__wait);
1035 set_current_state(TASK_INTERRUPTIBLE);
1036 schedule_timeout(cfs_time_seconds(1));
1037 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1039 complete(&tctl->tctl_stop);
1043 int cfs_trace_start_thread(void)
1045 struct tracefiled_ctl *tctl = &trace_tctl;
1048 mutex_lock(&cfs_trace_thread_mutex);
1052 init_completion(&tctl->tctl_start);
1053 init_completion(&tctl->tctl_stop);
1054 init_waitqueue_head(&tctl->tctl_waitq);
1055 atomic_set(&tctl->tctl_shutdown, 0);
1057 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1062 wait_for_completion(&tctl->tctl_start);
1065 mutex_unlock(&cfs_trace_thread_mutex);
1069 void cfs_trace_stop_thread(void)
1071 struct tracefiled_ctl *tctl = &trace_tctl;
1073 mutex_lock(&cfs_trace_thread_mutex);
1074 if (thread_running) {
1076 "Lustre: shutting down debug daemon thread...\n");
1077 atomic_set(&tctl->tctl_shutdown, 1);
1078 wait_for_completion(&tctl->tctl_stop);
1081 mutex_unlock(&cfs_trace_thread_mutex);
1084 int cfs_tracefile_init(int max_pages)
1086 struct cfs_trace_cpu_data *tcd;
1092 rc = cfs_tracefile_init_arch();
1096 cfs_tcd_for_each(tcd, i, j) {
1097 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1098 factor = tcd->tcd_pages_factor;
1099 INIT_LIST_HEAD(&tcd->tcd_pages);
1100 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1101 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1102 tcd->tcd_cur_pages = 0;
1103 tcd->tcd_cur_stock_pages = 0;
1104 tcd->tcd_cur_daemon_pages = 0;
1105 tcd->tcd_max_pages = (max_pages * factor) / 100;
1106 LASSERT(tcd->tcd_max_pages > 0);
1107 tcd->tcd_shutting_down = 0;
1112 static void trace_cleanup_on_all_cpus(void)
1114 struct cfs_trace_cpu_data *tcd;
1115 struct cfs_trace_page *tage;
1116 struct cfs_trace_page *tmp;
1119 for_each_possible_cpu(cpu) {
1120 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1121 tcd->tcd_shutting_down = 1;
1123 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1124 __LASSERT_TAGE_INVARIANT(tage);
1126 list_del(&tage->linkage);
1127 cfs_tage_free(tage);
1129 tcd->tcd_cur_pages = 0;
1134 static void cfs_trace_cleanup(void)
1136 struct page_collection pc;
1138 INIT_LIST_HEAD(&pc.pc_pages);
1140 trace_cleanup_on_all_cpus();
1142 cfs_tracefile_fini_arch();
1145 void cfs_tracefile_exit(void)
1147 cfs_trace_stop_thread();
1148 cfs_trace_cleanup();