4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
43 #include <linux/ctype.h>
45 #include <linux/kthread.h>
46 #include <linux/pagemap.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
51 /* XXX move things up to the top, comment */
52 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
54 char cfs_tracefile[TRACEFILE_NAME_SIZE];
55 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
56 static struct tracefiled_ctl trace_tctl;
57 static DEFINE_MUTEX(cfs_trace_thread_mutex);
58 static int thread_running = 0;
60 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
62 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
63 struct cfs_trace_cpu_data *tcd);
65 static inline struct cfs_trace_page *
66 cfs_tage_from_list(struct list_head *list)
68 return list_entry(list, struct cfs_trace_page, linkage);
71 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
74 struct cfs_trace_page *tage;
76 /* My caller is trying to free memory */
77 if (!in_interrupt() && memory_pressure_get())
81 * Don't spam console with allocation failures: they will be reported
82 * by upper layer anyway.
85 page = alloc_page(gfp);
89 tage = kmalloc(sizeof(*tage), gfp);
96 atomic_inc(&cfs_tage_allocated);
100 static void cfs_tage_free(struct cfs_trace_page *tage)
102 __LASSERT(tage != NULL);
103 __LASSERT(tage->page != NULL);
105 __free_page(tage->page);
107 atomic_dec(&cfs_tage_allocated);
110 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
111 struct list_head *queue)
113 __LASSERT(tage != NULL);
114 __LASSERT(queue != NULL);
116 list_move_tail(&tage->linkage, queue);
119 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
120 struct list_head *stock)
125 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
126 * from here: this will lead to infinite recursion.
129 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
130 struct cfs_trace_page *tage;
132 tage = cfs_tage_alloc(gfp);
135 list_add_tail(&tage->linkage, stock);
140 /* return a page that has 'len' bytes left at the end */
141 static struct cfs_trace_page *
142 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
144 struct cfs_trace_page *tage;
146 if (tcd->tcd_cur_pages > 0) {
147 __LASSERT(!list_empty(&tcd->tcd_pages));
148 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
149 if (tage->used + len <= PAGE_SIZE)
153 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
154 if (tcd->tcd_cur_stock_pages > 0) {
155 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
156 --tcd->tcd_cur_stock_pages;
157 list_del_init(&tage->linkage);
159 tage = cfs_tage_alloc(GFP_ATOMIC);
160 if (unlikely(tage == NULL)) {
161 if ((!memory_pressure_get() ||
162 in_interrupt()) && printk_ratelimit())
164 "cannot allocate a tage (%ld)\n",
171 tage->cpu = smp_processor_id();
172 tage->type = tcd->tcd_type;
173 list_add_tail(&tage->linkage, &tcd->tcd_pages);
174 tcd->tcd_cur_pages++;
176 if (tcd->tcd_cur_pages > 8 && thread_running) {
177 struct tracefiled_ctl *tctl = &trace_tctl;
179 * wake up tracefiled to process some pages.
181 wake_up(&tctl->tctl_waitq);
188 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
190 int pgcount = tcd->tcd_cur_pages / 10;
191 struct page_collection pc;
192 struct cfs_trace_page *tage;
193 struct cfs_trace_page *tmp;
196 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
197 * from here: this will lead to infinite recursion.
200 if (printk_ratelimit())
201 printk(KERN_WARNING "debug daemon buffer overflowed; "
202 "discarding 10%% of pages (%d of %ld)\n",
203 pgcount + 1, tcd->tcd_cur_pages);
205 INIT_LIST_HEAD(&pc.pc_pages);
207 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
211 list_move_tail(&tage->linkage, &pc.pc_pages);
212 tcd->tcd_cur_pages--;
214 put_pages_on_tcd_daemon_list(&pc, tcd);
217 /* return a page that has 'len' bytes left at the end */
218 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
221 struct cfs_trace_page *tage;
224 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
225 * from here: this will lead to infinite recursion.
228 if (len > PAGE_SIZE) {
230 "cowardly refusing to write %lu bytes in a page\n", len);
234 tage = cfs_trace_get_tage_try(tcd, len);
239 if (tcd->tcd_cur_pages > 0) {
240 tage = cfs_tage_from_list(tcd->tcd_pages.next);
242 cfs_tage_to_tail(tage, &tcd->tcd_pages);
247 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
248 const char *format, ...)
250 struct cfs_trace_cpu_data *tcd = NULL;
251 struct ptldebug_header header = {0};
252 struct cfs_trace_page *tage;
253 /* string_buf is used only if tcd != NULL, and is always set then */
254 char *string_buf = NULL;
257 int needed = 85; /* average message length */
262 int mask = msgdata->msg_mask;
263 char *file = (char *)msgdata->msg_file;
264 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
266 if (strchr(file, '/'))
267 file = strrchr(file, '/') + 1;
269 tcd = cfs_trace_get_tcd();
271 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
272 * pins us to a particular CPU. This avoids an smp_processor_id()
273 * warning on Linux when debugging is enabled. */
274 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
276 if (tcd == NULL) /* arch may not log in IRQ context */
279 if (tcd->tcd_cur_pages == 0)
280 header.ph_flags |= PH_FLAG_FIRST_RECORD;
282 if (tcd->tcd_shutting_down) {
283 cfs_trace_put_tcd(tcd);
288 known_size = strlen(file) + 1;
290 known_size += strlen(msgdata->msg_fn) + 1;
292 if (libcfs_debug_binary)
293 known_size += sizeof(header);
296 * '2' used because vsnprintf return real size required for output
297 * _without_ terminating NULL.
298 * if needed is to small for this format.
300 for (i = 0; i < 2; i++) {
301 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
303 if (needed + known_size > PAGE_SIZE)
306 cfs_trace_put_tcd(tcd);
311 string_buf = (char *)page_address(tage->page) +
312 tage->used + known_size;
314 max_nob = PAGE_SIZE - tage->used - known_size;
316 printk(KERN_EMERG "negative max_nob: %d\n",
319 cfs_trace_put_tcd(tcd);
325 remain = max_nob - needed;
329 va_start(ap, format);
330 needed += vsnprintf(string_buf + needed, remain,
334 if (needed < max_nob) /* well. printing ok.. */
338 if (*(string_buf + needed - 1) != '\n') {
339 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
340 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
341 } else if (mask & D_TTY) {
342 /* TTY needs '\r\n' to move carriage to leftmost position */
343 if (needed < 2 || *(string_buf + needed - 2) != '\r')
344 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
345 "'\\r\\n'\n", file, msgdata->msg_line,
349 header.ph_len = known_size + needed;
350 debug_buf = (char *)page_address(tage->page) + tage->used;
352 if (libcfs_debug_binary) {
353 memcpy(debug_buf, &header, sizeof(header));
354 tage->used += sizeof(header);
355 debug_buf += sizeof(header);
358 strcpy(debug_buf, file);
359 tage->used += strlen(file) + 1;
360 debug_buf += strlen(file) + 1;
362 if (msgdata->msg_fn) {
363 strcpy(debug_buf, msgdata->msg_fn);
364 tage->used += strlen(msgdata->msg_fn) + 1;
365 debug_buf += strlen(msgdata->msg_fn) + 1;
368 __LASSERT(debug_buf == string_buf);
370 tage->used += needed;
371 __LASSERT(tage->used <= PAGE_SIZE);
374 if ((mask & libcfs_printk) == 0) {
375 /* no console output requested */
377 cfs_trace_put_tcd(tcd);
382 if (libcfs_console_ratelimit &&
383 cdls->cdls_next != 0 && /* not first time ever */
384 time_before(jiffies, cdls->cdls_next)) {
385 /* skipping a console message */
388 cfs_trace_put_tcd(tcd);
392 if (time_after(jiffies, cdls->cdls_next +
393 libcfs_console_max_delay +
394 cfs_time_seconds(10))) {
395 /* last timeout was a long time ago */
396 cdls->cdls_delay /= libcfs_console_backoff * 4;
398 cdls->cdls_delay *= libcfs_console_backoff;
401 if (cdls->cdls_delay < libcfs_console_min_delay)
402 cdls->cdls_delay = libcfs_console_min_delay;
403 else if (cdls->cdls_delay > libcfs_console_max_delay)
404 cdls->cdls_delay = libcfs_console_max_delay;
406 /* ensure cdls_next is never zero after it's been seen */
407 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
411 cfs_print_to_console(&header, mask, string_buf, needed, file,
413 cfs_trace_put_tcd(tcd);
415 string_buf = cfs_trace_get_console_buffer();
418 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
420 va_start(ap, format);
421 needed += vsnprintf(string_buf+needed, remain,
426 cfs_print_to_console(&header, mask,
427 string_buf, needed, file, msgdata->msg_fn);
432 if (cdls != NULL && cdls->cdls_count != 0) {
433 string_buf = cfs_trace_get_console_buffer();
435 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
436 "Skipped %d previous similar message%s\n",
438 (cdls->cdls_count > 1) ? "s" : "");
440 /* Do not allow print this to TTY */
441 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
442 needed, file, msgdata->msg_fn);
445 cdls->cdls_count = 0;
450 EXPORT_SYMBOL(libcfs_debug_msg);
453 cfs_trace_assertion_failed(const char *str,
454 struct libcfs_debug_msg_data *msgdata)
456 struct ptldebug_header hdr;
458 libcfs_panic_in_progress = 1;
459 libcfs_catastrophe = 1;
462 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
464 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
465 msgdata->msg_file, msgdata->msg_fn);
467 panic("Lustre debug assertion failure\n");
473 panic_collect_pages(struct page_collection *pc)
475 /* Do the collect_pages job on a single CPU: assumes that all other
476 * CPUs have been stopped during a panic. If this isn't true for some
477 * arch, this will have to be implemented separately in each arch. */
480 struct cfs_trace_cpu_data *tcd;
482 INIT_LIST_HEAD(&pc->pc_pages);
484 cfs_tcd_for_each(tcd, i, j) {
485 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
486 tcd->tcd_cur_pages = 0;
488 if (pc->pc_want_daemon_pages) {
489 list_splice_init(&tcd->tcd_daemon_pages,
491 tcd->tcd_cur_daemon_pages = 0;
496 static void collect_pages_on_all_cpus(struct page_collection *pc)
498 struct cfs_trace_cpu_data *tcd;
501 for_each_possible_cpu(cpu) {
502 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
503 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
504 tcd->tcd_cur_pages = 0;
505 if (pc->pc_want_daemon_pages) {
506 list_splice_init(&tcd->tcd_daemon_pages,
508 tcd->tcd_cur_daemon_pages = 0;
514 static void collect_pages(struct page_collection *pc)
516 INIT_LIST_HEAD(&pc->pc_pages);
518 if (libcfs_panic_in_progress)
519 panic_collect_pages(pc);
521 collect_pages_on_all_cpus(pc);
524 static void put_pages_back_on_all_cpus(struct page_collection *pc)
526 struct cfs_trace_cpu_data *tcd;
527 struct list_head *cur_head;
528 struct cfs_trace_page *tage;
529 struct cfs_trace_page *tmp;
532 for_each_possible_cpu(cpu) {
533 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
534 cur_head = tcd->tcd_pages.next;
536 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
539 __LASSERT_TAGE_INVARIANT(tage);
541 if (tage->cpu != cpu || tage->type != i)
544 cfs_tage_to_tail(tage, cur_head);
545 tcd->tcd_cur_pages++;
551 static void put_pages_back(struct page_collection *pc)
553 if (!libcfs_panic_in_progress)
554 put_pages_back_on_all_cpus(pc);
557 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
558 * we have a good amount of data at all times for dumping during an LBUG, even
559 * if we have been steadily writing (and otherwise discarding) pages via the
561 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
562 struct cfs_trace_cpu_data *tcd)
564 struct cfs_trace_page *tage;
565 struct cfs_trace_page *tmp;
567 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
568 __LASSERT_TAGE_INVARIANT(tage);
570 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
573 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
574 tcd->tcd_cur_daemon_pages++;
576 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
577 struct cfs_trace_page *victim;
579 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
580 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
582 __LASSERT_TAGE_INVARIANT(victim);
584 list_del(&victim->linkage);
585 cfs_tage_free(victim);
586 tcd->tcd_cur_daemon_pages--;
591 static void put_pages_on_daemon_list(struct page_collection *pc)
593 struct cfs_trace_cpu_data *tcd;
596 for_each_possible_cpu(cpu) {
597 cfs_tcd_for_each_type_lock(tcd, i, cpu)
598 put_pages_on_tcd_daemon_list(pc, tcd);
602 void cfs_trace_debug_print(void)
604 struct page_collection pc;
605 struct cfs_trace_page *tage;
606 struct cfs_trace_page *tmp;
608 pc.pc_want_daemon_pages = 1;
610 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
614 __LASSERT_TAGE_INVARIANT(tage);
617 p = page_address(page);
618 while (p < ((char *)page_address(page) + tage->used)) {
619 struct ptldebug_header *hdr;
624 p += strlen(file) + 1;
627 len = hdr->ph_len - (int)(p - (char *)hdr);
629 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
634 list_del(&tage->linkage);
639 int cfs_tracefile_dump_all_pages(char *filename)
641 struct page_collection pc;
643 struct cfs_trace_page *tage;
644 struct cfs_trace_page *tmp;
648 cfs_tracefile_write_lock();
650 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
654 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
659 pc.pc_want_daemon_pages = 1;
661 if (list_empty(&pc.pc_pages)) {
666 /* ok, for now, just write the pages. in the future we'll be building
667 * iobufs with the pages and calling generic_direct_IO */
668 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
670 __LASSERT_TAGE_INVARIANT(tage);
672 buf = kmap(tage->page);
673 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
675 if (rc != (int)tage->used) {
676 printk(KERN_WARNING "wanted to write %u but wrote "
677 "%d\n", tage->used, rc);
679 __LASSERT(list_empty(&pc.pc_pages));
682 list_del(&tage->linkage);
686 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
688 printk(KERN_ERR "sync returns %d\n", rc);
690 filp_close(filp, NULL);
692 cfs_tracefile_write_unlock();
696 void cfs_trace_flush_pages(void)
698 struct page_collection pc;
699 struct cfs_trace_page *tage;
700 struct cfs_trace_page *tmp;
702 pc.pc_want_daemon_pages = 1;
704 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
706 __LASSERT_TAGE_INVARIANT(tage);
708 list_del(&tage->linkage);
713 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
714 const char __user *usr_buffer, int usr_buffer_nob)
718 if (usr_buffer_nob > knl_buffer_nob)
721 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
724 nob = strnlen(knl_buffer, usr_buffer_nob);
725 while (nob-- >= 0) /* strip trailing whitespace */
726 if (!isspace(knl_buffer[nob]))
729 if (nob < 0) /* empty string */
732 if (nob == knl_buffer_nob) /* no space to terminate */
735 knl_buffer[nob + 1] = 0; /* terminate */
738 EXPORT_SYMBOL(cfs_trace_copyin_string);
740 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
741 const char *knl_buffer, char *append)
743 /* NB if 'append' != NULL, it's a single character to append to the
744 * copied out string - usually "\n", for /proc entries and "" (i.e. a
745 * terminating zero byte) for sysctl entries */
746 int nob = strlen(knl_buffer);
748 if (nob > usr_buffer_nob)
749 nob = usr_buffer_nob;
751 if (copy_to_user(usr_buffer, knl_buffer, nob))
754 if (append != NULL && nob < usr_buffer_nob) {
755 if (copy_to_user(usr_buffer + nob, append, 1))
763 EXPORT_SYMBOL(cfs_trace_copyout_string);
765 int cfs_trace_allocate_string_buffer(char **str, int nob)
767 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
770 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
777 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
782 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
786 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
787 usr_str, usr_str_nob);
795 rc = cfs_tracefile_dump_all_pages(str);
801 int cfs_trace_daemon_command(char *str)
805 cfs_tracefile_write_lock();
807 if (strcmp(str, "stop") == 0) {
808 cfs_tracefile_write_unlock();
809 cfs_trace_stop_thread();
810 cfs_tracefile_write_lock();
811 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
813 } else if (strncmp(str, "size=", 5) == 0) {
816 rc = kstrtoul(str + 5, 10, &tmp);
818 if (tmp < 10 || tmp > 20480)
819 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
821 cfs_tracefile_size = tmp << 20;
823 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
825 } else if (str[0] != '/') {
828 strcpy(cfs_tracefile, str);
831 "Lustre: debug daemon will attempt to start writing "
832 "to %s (%lukB max)\n", cfs_tracefile,
833 (long)(cfs_tracefile_size >> 10));
835 cfs_trace_start_thread();
838 cfs_tracefile_write_unlock();
842 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
847 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
851 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
852 usr_str, usr_str_nob);
854 rc = cfs_trace_daemon_command(str);
860 int cfs_trace_set_debug_mb(int mb)
865 int limit = cfs_trace_max_debug_mb();
866 struct cfs_trace_cpu_data *tcd;
868 if (mb < num_possible_cpus()) {
870 "Lustre: %d MB is too small for debug buffer size, "
871 "setting it to %d MB.\n", mb, num_possible_cpus());
872 mb = num_possible_cpus();
877 "Lustre: %d MB is too large for debug buffer size, "
878 "setting it to %d MB.\n", mb, limit);
882 mb /= num_possible_cpus();
883 pages = mb << (20 - PAGE_SHIFT);
885 cfs_tracefile_write_lock();
887 cfs_tcd_for_each(tcd, i, j)
888 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
890 cfs_tracefile_write_unlock();
895 int cfs_trace_get_debug_mb(void)
899 struct cfs_trace_cpu_data *tcd;
902 cfs_tracefile_read_lock();
904 cfs_tcd_for_each(tcd, i, j)
905 total_pages += tcd->tcd_max_pages;
907 cfs_tracefile_read_unlock();
909 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
912 static int tracefiled(void *arg)
914 struct page_collection pc;
915 struct tracefiled_ctl *tctl = arg;
916 struct cfs_trace_page *tage;
917 struct cfs_trace_page *tmp;
923 /* we're started late enough that we pick up init's fs context */
924 /* this is so broken in uml? what on earth is going on? */
926 complete(&tctl->tctl_start);
929 wait_queue_entry_t __wait;
931 pc.pc_want_daemon_pages = 0;
933 if (list_empty(&pc.pc_pages))
937 cfs_tracefile_read_lock();
938 if (cfs_tracefile[0] != 0) {
939 filp = filp_open(cfs_tracefile,
940 O_CREAT | O_RDWR | O_LARGEFILE,
945 printk(KERN_WARNING "couldn't open %s: "
946 "%d\n", cfs_tracefile, rc);
949 cfs_tracefile_read_unlock();
951 put_pages_on_daemon_list(&pc);
952 __LASSERT(list_empty(&pc.pc_pages));
956 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
957 struct dentry *de = file_dentry(filp);
960 __LASSERT_TAGE_INVARIANT(tage);
962 if (f_pos >= (off_t)cfs_tracefile_size)
964 else if (f_pos > i_size_read(de->d_inode))
965 f_pos = i_size_read(de->d_inode);
967 buf = kmap(tage->page);
968 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
970 if (rc != (int)tage->used) {
971 printk(KERN_WARNING "wanted to write %u "
972 "but wrote %d\n", tage->used, rc);
974 __LASSERT(list_empty(&pc.pc_pages));
979 filp_close(filp, NULL);
980 put_pages_on_daemon_list(&pc);
981 if (!list_empty(&pc.pc_pages)) {
984 printk(KERN_ALERT "Lustre: trace pages aren't "
986 printk(KERN_ERR "total cpus(%d): ",
987 num_possible_cpus());
988 for (i = 0; i < num_possible_cpus(); i++)
990 printk(KERN_ERR "%d(on) ", i);
992 printk(KERN_ERR "%d(off) ", i);
993 printk(KERN_ERR "\n");
996 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
998 printk(KERN_ERR "page %d belongs to cpu "
999 "%d\n", ++i, tage->cpu);
1000 printk(KERN_ERR "There are %d pages unwritten\n",
1003 __LASSERT(list_empty(&pc.pc_pages));
1005 if (atomic_read(&tctl->tctl_shutdown)) {
1006 if (last_loop == 0) {
1013 init_waitqueue_entry(&__wait, current);
1014 add_wait_queue(&tctl->tctl_waitq, &__wait);
1015 set_current_state(TASK_INTERRUPTIBLE);
1016 schedule_timeout(cfs_time_seconds(1));
1017 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1019 complete(&tctl->tctl_stop);
1023 int cfs_trace_start_thread(void)
1025 struct tracefiled_ctl *tctl = &trace_tctl;
1028 mutex_lock(&cfs_trace_thread_mutex);
1032 init_completion(&tctl->tctl_start);
1033 init_completion(&tctl->tctl_stop);
1034 init_waitqueue_head(&tctl->tctl_waitq);
1035 atomic_set(&tctl->tctl_shutdown, 0);
1037 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1042 wait_for_completion(&tctl->tctl_start);
1045 mutex_unlock(&cfs_trace_thread_mutex);
1049 void cfs_trace_stop_thread(void)
1051 struct tracefiled_ctl *tctl = &trace_tctl;
1053 mutex_lock(&cfs_trace_thread_mutex);
1054 if (thread_running) {
1056 "Lustre: shutting down debug daemon thread...\n");
1057 atomic_set(&tctl->tctl_shutdown, 1);
1058 wait_for_completion(&tctl->tctl_stop);
1061 mutex_unlock(&cfs_trace_thread_mutex);
1064 int cfs_tracefile_init(int max_pages)
1066 struct cfs_trace_cpu_data *tcd;
1072 rc = cfs_tracefile_init_arch();
1076 cfs_tcd_for_each(tcd, i, j) {
1077 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1078 factor = tcd->tcd_pages_factor;
1079 INIT_LIST_HEAD(&tcd->tcd_pages);
1080 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1081 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1082 tcd->tcd_cur_pages = 0;
1083 tcd->tcd_cur_stock_pages = 0;
1084 tcd->tcd_cur_daemon_pages = 0;
1085 tcd->tcd_max_pages = (max_pages * factor) / 100;
1086 LASSERT(tcd->tcd_max_pages > 0);
1087 tcd->tcd_shutting_down = 0;
1092 static void trace_cleanup_on_all_cpus(void)
1094 struct cfs_trace_cpu_data *tcd;
1095 struct cfs_trace_page *tage;
1096 struct cfs_trace_page *tmp;
1099 for_each_possible_cpu(cpu) {
1100 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1101 tcd->tcd_shutting_down = 1;
1103 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1104 __LASSERT_TAGE_INVARIANT(tage);
1106 list_del(&tage->linkage);
1107 cfs_tage_free(tage);
1109 tcd->tcd_cur_pages = 0;
1114 static void cfs_trace_cleanup(void)
1116 struct page_collection pc;
1118 INIT_LIST_HEAD(&pc.pc_pages);
1120 trace_cleanup_on_all_cpus();
1122 cfs_tracefile_fini_arch();
1125 void cfs_tracefile_exit(void)
1127 cfs_trace_stop_thread();
1128 cfs_trace_cleanup();