4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/tracefile.c
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LNET
44 #define LUSTRE_TRACEFILE_PRIVATE
45 #include "tracefile.h"
47 #include <libcfs/libcfs.h>
49 /* XXX move things up to the top, comment */
50 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
52 char cfs_tracefile[TRACEFILE_NAME_SIZE];
53 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
54 static struct tracefiled_ctl trace_tctl;
55 struct mutex cfs_trace_thread_mutex;
56 static int thread_running = 0;
58 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
60 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
61 struct cfs_trace_cpu_data *tcd);
63 static inline struct cfs_trace_page *
64 cfs_tage_from_list(struct list_head *list)
66 return list_entry(list, struct cfs_trace_page, linkage);
69 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
72 struct cfs_trace_page *tage;
74 /* My caller is trying to free memory */
75 if (!in_interrupt() && memory_pressure_get())
79 * Don't spam console with allocation failures: they will be reported
80 * by upper layer anyway.
83 page = alloc_page(gfp);
87 tage = kmalloc(sizeof(*tage), gfp);
94 atomic_inc(&cfs_tage_allocated);
98 static void cfs_tage_free(struct cfs_trace_page *tage)
100 __LASSERT(tage != NULL);
101 __LASSERT(tage->page != NULL);
103 __free_page(tage->page);
105 atomic_dec(&cfs_tage_allocated);
108 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
109 struct list_head *queue)
111 __LASSERT(tage != NULL);
112 __LASSERT(queue != NULL);
114 list_move_tail(&tage->linkage, queue);
117 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
118 struct list_head *stock)
123 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
124 * from here: this will lead to infinite recursion.
127 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
128 struct cfs_trace_page *tage;
130 tage = cfs_tage_alloc(gfp);
133 list_add_tail(&tage->linkage, stock);
138 /* return a page that has 'len' bytes left at the end */
139 static struct cfs_trace_page *
140 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
142 struct cfs_trace_page *tage;
144 if (tcd->tcd_cur_pages > 0) {
145 __LASSERT(!list_empty(&tcd->tcd_pages));
146 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
147 if (tage->used + len <= PAGE_CACHE_SIZE)
151 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
152 if (tcd->tcd_cur_stock_pages > 0) {
153 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
154 --tcd->tcd_cur_stock_pages;
155 list_del_init(&tage->linkage);
157 tage = cfs_tage_alloc(GFP_ATOMIC);
158 if (unlikely(tage == NULL)) {
159 if ((!memory_pressure_get() ||
160 in_interrupt()) && printk_ratelimit())
162 "cannot allocate a tage (%ld)\n",
169 tage->cpu = smp_processor_id();
170 tage->type = tcd->tcd_type;
171 list_add_tail(&tage->linkage, &tcd->tcd_pages);
172 tcd->tcd_cur_pages++;
174 if (tcd->tcd_cur_pages > 8 && thread_running) {
175 struct tracefiled_ctl *tctl = &trace_tctl;
177 * wake up tracefiled to process some pages.
179 wake_up(&tctl->tctl_waitq);
186 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
188 int pgcount = tcd->tcd_cur_pages / 10;
189 struct page_collection pc;
190 struct cfs_trace_page *tage;
191 struct cfs_trace_page *tmp;
194 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
195 * from here: this will lead to infinite recursion.
198 if (printk_ratelimit())
199 printk(KERN_WARNING "debug daemon buffer overflowed; "
200 "discarding 10%% of pages (%d of %ld)\n",
201 pgcount + 1, tcd->tcd_cur_pages);
203 INIT_LIST_HEAD(&pc.pc_pages);
205 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
209 list_move_tail(&tage->linkage, &pc.pc_pages);
210 tcd->tcd_cur_pages--;
212 put_pages_on_tcd_daemon_list(&pc, tcd);
215 /* return a page that has 'len' bytes left at the end */
216 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
219 struct cfs_trace_page *tage;
222 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
223 * from here: this will lead to infinite recursion.
226 if (len > PAGE_CACHE_SIZE) {
228 "cowardly refusing to write %lu bytes in a page\n", len);
232 tage = cfs_trace_get_tage_try(tcd, len);
237 if (tcd->tcd_cur_pages > 0) {
238 tage = cfs_tage_from_list(tcd->tcd_pages.next);
240 cfs_tage_to_tail(tage, &tcd->tcd_pages);
245 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
246 const char *format, ...)
251 va_start(args, format);
252 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
257 EXPORT_SYMBOL(libcfs_debug_msg);
259 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
260 const char *format1, va_list args,
261 const char *format2, ...)
263 struct cfs_trace_cpu_data *tcd = NULL;
264 struct ptldebug_header header = {0};
265 struct cfs_trace_page *tage;
266 /* string_buf is used only if tcd != NULL, and is always set then */
267 char *string_buf = NULL;
270 int needed = 85; /* average message length */
275 int mask = msgdata->msg_mask;
276 char *file = (char *)msgdata->msg_file;
277 cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
279 if (strchr(file, '/'))
280 file = strrchr(file, '/') + 1;
282 tcd = cfs_trace_get_tcd();
284 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
285 * pins us to a particular CPU. This avoids an smp_processor_id()
286 * warning on Linux when debugging is enabled. */
287 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
289 if (tcd == NULL) /* arch may not log in IRQ context */
292 if (tcd->tcd_cur_pages == 0)
293 header.ph_flags |= PH_FLAG_FIRST_RECORD;
295 if (tcd->tcd_shutting_down) {
296 cfs_trace_put_tcd(tcd);
301 known_size = strlen(file) + 1;
303 known_size += strlen(msgdata->msg_fn) + 1;
305 if (libcfs_debug_binary)
306 known_size += sizeof(header);
309 * '2' used because vsnprintf return real size required for output
310 * _without_ terminating NULL.
311 * if needed is to small for this format.
313 for (i = 0; i < 2; i++) {
314 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
316 if (needed + known_size > PAGE_CACHE_SIZE)
319 cfs_trace_put_tcd(tcd);
324 string_buf = (char *)page_address(tage->page) +
325 tage->used + known_size;
327 max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
329 printk(KERN_EMERG "negative max_nob: %d\n",
332 cfs_trace_put_tcd(tcd);
340 needed = vsnprintf(string_buf, max_nob, format1, ap);
345 remain = max_nob - needed;
349 va_start(ap, format2);
350 needed += vsnprintf(string_buf + needed, remain,
355 if (needed < max_nob) /* well. printing ok.. */
359 if (*(string_buf+needed-1) != '\n')
360 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
361 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
363 header.ph_len = known_size + needed;
364 debug_buf = (char *)page_address(tage->page) + tage->used;
366 if (libcfs_debug_binary) {
367 memcpy(debug_buf, &header, sizeof(header));
368 tage->used += sizeof(header);
369 debug_buf += sizeof(header);
372 strcpy(debug_buf, file);
373 tage->used += strlen(file) + 1;
374 debug_buf += strlen(file) + 1;
376 if (msgdata->msg_fn) {
377 strcpy(debug_buf, msgdata->msg_fn);
378 tage->used += strlen(msgdata->msg_fn) + 1;
379 debug_buf += strlen(msgdata->msg_fn) + 1;
382 __LASSERT(debug_buf == string_buf);
384 tage->used += needed;
385 __LASSERT(tage->used <= PAGE_CACHE_SIZE);
388 if ((mask & libcfs_printk) == 0) {
389 /* no console output requested */
391 cfs_trace_put_tcd(tcd);
396 if (libcfs_console_ratelimit &&
397 cdls->cdls_next != 0 && /* not first time ever */
398 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
399 /* skipping a console message */
402 cfs_trace_put_tcd(tcd);
406 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
407 libcfs_console_max_delay
408 + cfs_time_seconds(10))) {
409 /* last timeout was a long time ago */
410 cdls->cdls_delay /= libcfs_console_backoff * 4;
412 cdls->cdls_delay *= libcfs_console_backoff;
415 if (cdls->cdls_delay < libcfs_console_min_delay)
416 cdls->cdls_delay = libcfs_console_min_delay;
417 else if (cdls->cdls_delay > libcfs_console_max_delay)
418 cdls->cdls_delay = libcfs_console_max_delay;
420 /* ensure cdls_next is never zero after it's been seen */
421 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
425 cfs_print_to_console(&header, mask, string_buf, needed, file,
427 cfs_trace_put_tcd(tcd);
429 string_buf = cfs_trace_get_console_buffer();
432 if (format1 != NULL) {
434 needed = vsnprintf(string_buf,
435 CFS_TRACE_CONSOLE_BUFFER_SIZE,
439 if (format2 != NULL) {
440 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
442 va_start(ap, format2);
443 needed += vsnprintf(string_buf+needed, remain,
448 cfs_print_to_console(&header, mask,
449 string_buf, needed, file, msgdata->msg_fn);
451 cfs_trace_put_console_buffer(string_buf);
454 if (cdls != NULL && cdls->cdls_count != 0) {
455 string_buf = cfs_trace_get_console_buffer();
457 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
458 "Skipped %d previous similar message%s\n",
460 (cdls->cdls_count > 1) ? "s" : "");
462 cfs_print_to_console(&header, mask,
463 string_buf, needed, file, msgdata->msg_fn);
465 cfs_trace_put_console_buffer(string_buf);
466 cdls->cdls_count = 0;
471 EXPORT_SYMBOL(libcfs_debug_vmsg2);
474 cfs_trace_assertion_failed(const char *str,
475 struct libcfs_debug_msg_data *msgdata)
477 struct ptldebug_header hdr;
479 libcfs_panic_in_progress = 1;
480 libcfs_catastrophe = 1;
483 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
485 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
486 msgdata->msg_file, msgdata->msg_fn);
488 panic("Lustre debug assertion failure\n");
494 panic_collect_pages(struct page_collection *pc)
496 /* Do the collect_pages job on a single CPU: assumes that all other
497 * CPUs have been stopped during a panic. If this isn't true for some
498 * arch, this will have to be implemented separately in each arch. */
501 struct cfs_trace_cpu_data *tcd;
503 INIT_LIST_HEAD(&pc->pc_pages);
505 cfs_tcd_for_each(tcd, i, j) {
506 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
507 tcd->tcd_cur_pages = 0;
509 if (pc->pc_want_daemon_pages) {
510 list_splice_init(&tcd->tcd_daemon_pages,
512 tcd->tcd_cur_daemon_pages = 0;
517 static void collect_pages_on_all_cpus(struct page_collection *pc)
519 struct cfs_trace_cpu_data *tcd;
522 for_each_possible_cpu(cpu) {
523 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
524 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
525 tcd->tcd_cur_pages = 0;
526 if (pc->pc_want_daemon_pages) {
527 list_splice_init(&tcd->tcd_daemon_pages,
529 tcd->tcd_cur_daemon_pages = 0;
535 static void collect_pages(struct page_collection *pc)
537 INIT_LIST_HEAD(&pc->pc_pages);
539 if (libcfs_panic_in_progress)
540 panic_collect_pages(pc);
542 collect_pages_on_all_cpus(pc);
545 static void put_pages_back_on_all_cpus(struct page_collection *pc)
547 struct cfs_trace_cpu_data *tcd;
548 struct list_head *cur_head;
549 struct cfs_trace_page *tage;
550 struct cfs_trace_page *tmp;
553 for_each_possible_cpu(cpu) {
554 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
555 cur_head = tcd->tcd_pages.next;
557 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
560 __LASSERT_TAGE_INVARIANT(tage);
562 if (tage->cpu != cpu || tage->type != i)
565 cfs_tage_to_tail(tage, cur_head);
566 tcd->tcd_cur_pages++;
572 static void put_pages_back(struct page_collection *pc)
574 if (!libcfs_panic_in_progress)
575 put_pages_back_on_all_cpus(pc);
578 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
579 * we have a good amount of data at all times for dumping during an LBUG, even
580 * if we have been steadily writing (and otherwise discarding) pages via the
582 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
583 struct cfs_trace_cpu_data *tcd)
585 struct cfs_trace_page *tage;
586 struct cfs_trace_page *tmp;
588 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
589 __LASSERT_TAGE_INVARIANT(tage);
591 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
594 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
595 tcd->tcd_cur_daemon_pages++;
597 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
598 struct cfs_trace_page *victim;
600 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
601 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
603 __LASSERT_TAGE_INVARIANT(victim);
605 list_del(&victim->linkage);
606 cfs_tage_free(victim);
607 tcd->tcd_cur_daemon_pages--;
612 static void put_pages_on_daemon_list(struct page_collection *pc)
614 struct cfs_trace_cpu_data *tcd;
617 for_each_possible_cpu(cpu) {
618 cfs_tcd_for_each_type_lock(tcd, i, cpu)
619 put_pages_on_tcd_daemon_list(pc, tcd);
623 void cfs_trace_debug_print(void)
625 struct page_collection pc;
626 struct cfs_trace_page *tage;
627 struct cfs_trace_page *tmp;
629 pc.pc_want_daemon_pages = 1;
631 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
635 __LASSERT_TAGE_INVARIANT(tage);
638 p = page_address(page);
639 while (p < ((char *)page_address(page) + tage->used)) {
640 struct ptldebug_header *hdr;
645 p += strlen(file) + 1;
648 len = hdr->ph_len - (int)(p - (char *)hdr);
650 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
655 list_del(&tage->linkage);
660 int cfs_tracefile_dump_all_pages(char *filename)
662 struct page_collection pc;
664 struct cfs_trace_page *tage;
665 struct cfs_trace_page *tmp;
670 cfs_tracefile_write_lock();
672 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
676 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
681 pc.pc_want_daemon_pages = 1;
683 if (list_empty(&pc.pc_pages)) {
688 /* ok, for now, just write the pages. in the future we'll be building
689 * iobufs with the pages and calling generic_direct_IO */
691 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
693 __LASSERT_TAGE_INVARIANT(tage);
695 rc = filp_write(filp, page_address(tage->page),
696 tage->used, filp_poff(filp));
697 if (rc != (int)tage->used) {
698 printk(KERN_WARNING "wanted to write %u but wrote "
699 "%d\n", tage->used, rc);
701 __LASSERT(list_empty(&pc.pc_pages));
704 list_del(&tage->linkage);
708 rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
710 printk(KERN_ERR "sync returns %d\n", rc);
712 filp_close(filp, NULL);
714 cfs_tracefile_write_unlock();
718 void cfs_trace_flush_pages(void)
720 struct page_collection pc;
721 struct cfs_trace_page *tage;
722 struct cfs_trace_page *tmp;
724 pc.pc_want_daemon_pages = 1;
726 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
728 __LASSERT_TAGE_INVARIANT(tage);
730 list_del(&tage->linkage);
735 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
736 const char __user *usr_buffer, int usr_buffer_nob)
740 if (usr_buffer_nob > knl_buffer_nob)
743 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
746 nob = strnlen(knl_buffer, usr_buffer_nob);
747 while (nob-- >= 0) /* strip trailing whitespace */
748 if (!isspace(knl_buffer[nob]))
751 if (nob < 0) /* empty string */
754 if (nob == knl_buffer_nob) /* no space to terminate */
757 knl_buffer[nob + 1] = 0; /* terminate */
760 EXPORT_SYMBOL(cfs_trace_copyin_string);
762 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
763 const char *knl_buffer, char *append)
765 /* NB if 'append' != NULL, it's a single character to append to the
766 * copied out string - usually "\n", for /proc entries and "" (i.e. a
767 * terminating zero byte) for sysctl entries */
768 int nob = strlen(knl_buffer);
770 if (nob > usr_buffer_nob)
771 nob = usr_buffer_nob;
773 if (copy_to_user(usr_buffer, knl_buffer, nob))
776 if (append != NULL && nob < usr_buffer_nob) {
777 if (copy_to_user(usr_buffer + nob, append, 1))
785 EXPORT_SYMBOL(cfs_trace_copyout_string);
787 int cfs_trace_allocate_string_buffer(char **str, int nob)
789 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
792 *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
799 void cfs_trace_free_string_buffer(char *str, int nob)
804 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
809 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
813 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
814 usr_str, usr_str_nob);
822 rc = cfs_tracefile_dump_all_pages(str);
824 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
828 int cfs_trace_daemon_command(char *str)
832 cfs_tracefile_write_lock();
834 if (strcmp(str, "stop") == 0) {
835 cfs_tracefile_write_unlock();
836 cfs_trace_stop_thread();
837 cfs_tracefile_write_lock();
838 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
840 } else if (strncmp(str, "size=", 5) == 0) {
841 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
842 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
843 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
845 cfs_tracefile_size <<= 20;
847 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
849 } else if (str[0] != '/') {
852 strcpy(cfs_tracefile, str);
855 "Lustre: debug daemon will attempt to start writing "
856 "to %s (%lukB max)\n", cfs_tracefile,
857 (long)(cfs_tracefile_size >> 10));
859 cfs_trace_start_thread();
862 cfs_tracefile_write_unlock();
866 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
871 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
875 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
876 usr_str, usr_str_nob);
878 rc = cfs_trace_daemon_command(str);
880 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
884 int cfs_trace_set_debug_mb(int mb)
889 int limit = cfs_trace_max_debug_mb();
890 struct cfs_trace_cpu_data *tcd;
892 if (mb < num_possible_cpus()) {
894 "Lustre: %d MB is too small for debug buffer size, "
895 "setting it to %d MB.\n", mb, num_possible_cpus());
896 mb = num_possible_cpus();
901 "Lustre: %d MB is too large for debug buffer size, "
902 "setting it to %d MB.\n", mb, limit);
906 mb /= num_possible_cpus();
907 pages = mb << (20 - PAGE_CACHE_SHIFT);
909 cfs_tracefile_write_lock();
911 cfs_tcd_for_each(tcd, i, j)
912 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
914 cfs_tracefile_write_unlock();
919 int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob)
924 rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
928 return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
931 int cfs_trace_get_debug_mb(void)
935 struct cfs_trace_cpu_data *tcd;
938 cfs_tracefile_read_lock();
940 cfs_tcd_for_each(tcd, i, j)
941 total_pages += tcd->tcd_max_pages;
943 cfs_tracefile_read_unlock();
945 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
948 static int tracefiled(void *arg)
950 struct page_collection pc;
951 struct tracefiled_ctl *tctl = arg;
952 struct cfs_trace_page *tage;
953 struct cfs_trace_page *tmp;
960 /* we're started late enough that we pick up init's fs context */
961 /* this is so broken in uml? what on earth is going on? */
963 complete(&tctl->tctl_start);
968 pc.pc_want_daemon_pages = 0;
970 if (list_empty(&pc.pc_pages))
974 cfs_tracefile_read_lock();
975 if (cfs_tracefile[0] != 0) {
976 filp = filp_open(cfs_tracefile,
977 O_CREAT | O_RDWR | O_LARGEFILE,
982 printk(KERN_WARNING "couldn't open %s: "
983 "%d\n", cfs_tracefile, rc);
986 cfs_tracefile_read_unlock();
988 put_pages_on_daemon_list(&pc);
989 __LASSERT(list_empty(&pc.pc_pages));
995 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
998 __LASSERT_TAGE_INVARIANT(tage);
1000 if (f_pos >= (off_t)cfs_tracefile_size)
1002 else if (f_pos > (off_t)filp_size(filp))
1003 f_pos = filp_size(filp);
1005 rc = filp_write(filp, page_address(tage->page),
1006 tage->used, &f_pos);
1007 if (rc != (int)tage->used) {
1008 printk(KERN_WARNING "wanted to write %u "
1009 "but wrote %d\n", tage->used, rc);
1010 put_pages_back(&pc);
1011 __LASSERT(list_empty(&pc.pc_pages));
1017 filp_close(filp, NULL);
1018 put_pages_on_daemon_list(&pc);
1019 if (!list_empty(&pc.pc_pages)) {
1022 printk(KERN_ALERT "Lustre: trace pages aren't "
1024 printk(KERN_ERR "total cpus(%d): ",
1025 num_possible_cpus());
1026 for (i = 0; i < num_possible_cpus(); i++)
1028 printk(KERN_ERR "%d(on) ", i);
1030 printk(KERN_ERR "%d(off) ", i);
1031 printk(KERN_ERR "\n");
1034 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1036 printk(KERN_ERR "page %d belongs to cpu "
1037 "%d\n", ++i, tage->cpu);
1038 printk(KERN_ERR "There are %d pages unwritten\n",
1041 __LASSERT(list_empty(&pc.pc_pages));
1043 if (atomic_read(&tctl->tctl_shutdown)) {
1044 if (last_loop == 0) {
1051 init_waitqueue_entry_current(&__wait);
1052 add_wait_queue(&tctl->tctl_waitq, &__wait);
1053 set_current_state(TASK_INTERRUPTIBLE);
1054 waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
1055 cfs_time_seconds(1));
1056 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1058 complete(&tctl->tctl_stop);
1062 int cfs_trace_start_thread(void)
1064 struct tracefiled_ctl *tctl = &trace_tctl;
1067 mutex_lock(&cfs_trace_thread_mutex);
1071 init_completion(&tctl->tctl_start);
1072 init_completion(&tctl->tctl_stop);
1073 init_waitqueue_head(&tctl->tctl_waitq);
1074 atomic_set(&tctl->tctl_shutdown, 0);
1076 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1081 wait_for_completion(&tctl->tctl_start);
1084 mutex_unlock(&cfs_trace_thread_mutex);
1088 void cfs_trace_stop_thread(void)
1090 struct tracefiled_ctl *tctl = &trace_tctl;
1092 mutex_lock(&cfs_trace_thread_mutex);
1093 if (thread_running) {
1095 "Lustre: shutting down debug daemon thread...\n");
1096 atomic_set(&tctl->tctl_shutdown, 1);
1097 wait_for_completion(&tctl->tctl_stop);
1100 mutex_unlock(&cfs_trace_thread_mutex);
1103 int cfs_tracefile_init(int max_pages)
1105 struct cfs_trace_cpu_data *tcd;
1111 rc = cfs_tracefile_init_arch();
1115 cfs_tcd_for_each(tcd, i, j) {
1116 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1117 factor = tcd->tcd_pages_factor;
1118 INIT_LIST_HEAD(&tcd->tcd_pages);
1119 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1120 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1121 tcd->tcd_cur_pages = 0;
1122 tcd->tcd_cur_stock_pages = 0;
1123 tcd->tcd_cur_daemon_pages = 0;
1124 tcd->tcd_max_pages = (max_pages * factor) / 100;
1125 LASSERT(tcd->tcd_max_pages > 0);
1126 tcd->tcd_shutting_down = 0;
1131 static void trace_cleanup_on_all_cpus(void)
1133 struct cfs_trace_cpu_data *tcd;
1134 struct cfs_trace_page *tage;
1135 struct cfs_trace_page *tmp;
1138 for_each_possible_cpu(cpu) {
1139 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1140 tcd->tcd_shutting_down = 1;
1142 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1143 __LASSERT_TAGE_INVARIANT(tage);
1145 list_del(&tage->linkage);
1146 cfs_tage_free(tage);
1148 tcd->tcd_cur_pages = 0;
1153 static void cfs_trace_cleanup(void)
1155 struct page_collection pc;
1157 INIT_LIST_HEAD(&pc.pc_pages);
1159 trace_cleanup_on_all_cpus();
1161 cfs_tracefile_fini_arch();
1164 void cfs_tracefile_exit(void)
1166 cfs_trace_stop_thread();
1167 cfs_trace_cleanup();