4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/tracefile.c
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LNET
44 #define LUSTRE_TRACEFILE_PRIVATE
45 #include "tracefile.h"
47 #include <libcfs/libcfs.h>
49 /* XXX move things up to the top, comment */
50 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;
52 char cfs_tracefile[TRACEFILE_NAME_SIZE];
53 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
54 static struct tracefiled_ctl trace_tctl;
55 struct mutex cfs_trace_thread_mutex;
56 static int thread_running = 0;
58 cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
60 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
61 struct cfs_trace_cpu_data *tcd);
63 static inline struct cfs_trace_page *
64 cfs_tage_from_list(cfs_list_t *list)
66 return cfs_list_entry(list, struct cfs_trace_page, linkage);
69 static struct cfs_trace_page *cfs_tage_alloc(int gfp)
72 struct cfs_trace_page *tage;
74 /* My caller is trying to free memory */
75 if (!cfs_in_interrupt() && cfs_memory_pressure_get())
79 * Don't spam console with allocation failures: they will be reported
80 * by upper layer anyway.
82 gfp |= CFS_ALLOC_NOWARN;
83 page = cfs_alloc_page(gfp);
87 tage = cfs_alloc(sizeof(*tage), gfp);
94 cfs_atomic_inc(&cfs_tage_allocated);
98 static void cfs_tage_free(struct cfs_trace_page *tage)
100 __LASSERT(tage != NULL);
101 __LASSERT(tage->page != NULL);
103 cfs_free_page(tage->page);
105 cfs_atomic_dec(&cfs_tage_allocated);
108 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
111 __LASSERT(tage != NULL);
112 __LASSERT(queue != NULL);
114 cfs_list_move_tail(&tage->linkage, queue);
117 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
123 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
124 * from here: this will lead to infinite recursion.
127 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
128 struct cfs_trace_page *tage;
130 tage = cfs_tage_alloc(gfp);
133 cfs_list_add_tail(&tage->linkage, stock);
138 /* return a page that has 'len' bytes left at the end */
139 static struct cfs_trace_page *
140 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
142 struct cfs_trace_page *tage;
144 if (tcd->tcd_cur_pages > 0) {
145 __LASSERT(!cfs_list_empty(&tcd->tcd_pages));
146 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
147 if (tage->used + len <= CFS_PAGE_SIZE)
151 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
152 if (tcd->tcd_cur_stock_pages > 0) {
153 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
154 -- tcd->tcd_cur_stock_pages;
155 cfs_list_del_init(&tage->linkage);
157 tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
159 if (printk_ratelimit())
160 printk(CFS_KERN_WARNING
161 "cannot allocate a tage (%ld)\n",
168 tage->cpu = cfs_smp_processor_id();
169 tage->type = tcd->tcd_type;
170 cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
171 tcd->tcd_cur_pages++;
173 if (tcd->tcd_cur_pages > 8 && thread_running) {
174 struct tracefiled_ctl *tctl = &trace_tctl;
176 * wake up tracefiled to process some pages.
178 cfs_waitq_signal(&tctl->tctl_waitq);
185 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
187 int pgcount = tcd->tcd_cur_pages / 10;
188 struct page_collection pc;
189 struct cfs_trace_page *tage;
190 struct cfs_trace_page *tmp;
193 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
194 * from here: this will lead to infinite recursion.
197 if (printk_ratelimit())
198 printk(CFS_KERN_WARNING "debug daemon buffer overflowed; "
199 "discarding 10%% of pages (%d of %ld)\n",
200 pgcount + 1, tcd->tcd_cur_pages);
202 CFS_INIT_LIST_HEAD(&pc.pc_pages);
203 spin_lock_init(&pc.pc_lock);
205 cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
206 struct cfs_trace_page, linkage) {
210 cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
211 tcd->tcd_cur_pages--;
213 put_pages_on_tcd_daemon_list(&pc, tcd);
216 /* return a page that has 'len' bytes left at the end */
217 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
220 struct cfs_trace_page *tage;
223 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
224 * from here: this will lead to infinite recursion.
227 if (len > CFS_PAGE_SIZE) {
229 "cowardly refusing to write %lu bytes in a page\n", len);
233 tage = cfs_trace_get_tage_try(tcd, len);
238 if (tcd->tcd_cur_pages > 0) {
239 tage = cfs_tage_from_list(tcd->tcd_pages.next);
241 cfs_tage_to_tail(tage, &tcd->tcd_pages);
246 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
247 const char *format, ...)
252 va_start(args, format);
253 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
258 EXPORT_SYMBOL(libcfs_debug_msg);
260 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
261 const char *format1, va_list args,
262 const char *format2, ...)
264 struct cfs_trace_cpu_data *tcd = NULL;
265 struct ptldebug_header header = {0};
266 struct cfs_trace_page *tage;
267 /* string_buf is used only if tcd != NULL, and is always set then */
268 char *string_buf = NULL;
271 int needed = 85; /* average message length */
277 int mask = msgdata->msg_mask;
278 char *file = (char *)msgdata->msg_file;
279 cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
281 if (strchr(file, '/'))
282 file = strrchr(file, '/') + 1;
284 tcd = cfs_trace_get_tcd();
286 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
287 * pins us to a particular CPU. This avoids an smp_processor_id()
288 * warning on Linux when debugging is enabled. */
289 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
291 if (tcd == NULL) /* arch may not log in IRQ context */
294 if (tcd->tcd_cur_pages == 0)
295 header.ph_flags |= PH_FLAG_FIRST_RECORD;
297 if (tcd->tcd_shutting_down) {
298 cfs_trace_put_tcd(tcd);
303 depth = __current_nesting_level();
304 known_size = strlen(file) + 1 + depth;
306 known_size += strlen(msgdata->msg_fn) + 1;
308 if (libcfs_debug_binary)
309 known_size += sizeof(header);
312 * '2' used because vsnprintf return real size required for output
313 * _without_ terminating NULL.
314 * if needed is to small for this format.
316 for (i = 0; i < 2; i++) {
317 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
319 if (needed + known_size > CFS_PAGE_SIZE)
322 cfs_trace_put_tcd(tcd);
327 string_buf = (char *)cfs_page_address(tage->page) +
328 tage->used + known_size;
330 max_nob = CFS_PAGE_SIZE - tage->used - known_size;
332 printk(CFS_KERN_EMERG "negative max_nob: %d\n",
335 cfs_trace_put_tcd(tcd);
343 needed = vsnprintf(string_buf, max_nob, format1, ap);
348 remain = max_nob - needed;
352 va_start(ap, format2);
353 needed += vsnprintf(string_buf + needed, remain,
358 if (needed < max_nob) /* well. printing ok.. */
362 if (*(string_buf+needed-1) != '\n')
363 printk(CFS_KERN_INFO "format at %s:%d:%s doesn't end in "
364 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
366 header.ph_len = known_size + needed;
367 debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
369 if (libcfs_debug_binary) {
370 memcpy(debug_buf, &header, sizeof(header));
371 tage->used += sizeof(header);
372 debug_buf += sizeof(header);
375 /* indent message according to the nesting level */
376 while (depth-- > 0) {
377 *(debug_buf++) = '.';
381 strcpy(debug_buf, file);
382 tage->used += strlen(file) + 1;
383 debug_buf += strlen(file) + 1;
385 if (msgdata->msg_fn) {
386 strcpy(debug_buf, msgdata->msg_fn);
387 tage->used += strlen(msgdata->msg_fn) + 1;
388 debug_buf += strlen(msgdata->msg_fn) + 1;
391 __LASSERT(debug_buf == string_buf);
393 tage->used += needed;
394 __LASSERT (tage->used <= CFS_PAGE_SIZE);
397 if ((mask & libcfs_printk) == 0) {
398 /* no console output requested */
400 cfs_trace_put_tcd(tcd);
405 if (libcfs_console_ratelimit &&
406 cdls->cdls_next != 0 && /* not first time ever */
407 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
408 /* skipping a console message */
411 cfs_trace_put_tcd(tcd);
415 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
416 libcfs_console_max_delay
417 + cfs_time_seconds(10))) {
418 /* last timeout was a long time ago */
419 cdls->cdls_delay /= libcfs_console_backoff * 4;
421 cdls->cdls_delay *= libcfs_console_backoff;
423 if (cdls->cdls_delay < libcfs_console_min_delay)
424 cdls->cdls_delay = libcfs_console_min_delay;
425 else if (cdls->cdls_delay > libcfs_console_max_delay)
426 cdls->cdls_delay = libcfs_console_max_delay;
429 /* ensure cdls_next is never zero after it's been seen */
430 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
434 cfs_print_to_console(&header, mask, string_buf, needed, file,
436 cfs_trace_put_tcd(tcd);
438 string_buf = cfs_trace_get_console_buffer();
441 if (format1 != NULL) {
443 needed = vsnprintf(string_buf,
444 CFS_TRACE_CONSOLE_BUFFER_SIZE,
448 if (format2 != NULL) {
449 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
451 va_start(ap, format2);
452 needed += vsnprintf(string_buf+needed, remain,
457 cfs_print_to_console(&header, mask,
458 string_buf, needed, file, msgdata->msg_fn);
460 cfs_trace_put_console_buffer(string_buf);
463 if (cdls != NULL && cdls->cdls_count != 0) {
464 string_buf = cfs_trace_get_console_buffer();
466 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
467 "Skipped %d previous similar message%s\n",
469 (cdls->cdls_count > 1) ? "s" : "");
471 cfs_print_to_console(&header, mask,
472 string_buf, needed, file, msgdata->msg_fn);
474 cfs_trace_put_console_buffer(string_buf);
475 cdls->cdls_count = 0;
480 EXPORT_SYMBOL(libcfs_debug_vmsg2);
483 cfs_trace_assertion_failed(const char *str,
484 struct libcfs_debug_msg_data *msgdata)
486 struct ptldebug_header hdr;
488 libcfs_panic_in_progress = 1;
489 libcfs_catastrophe = 1;
492 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
494 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
495 msgdata->msg_file, msgdata->msg_fn);
497 LIBCFS_PANIC("Lustre debug assertion failure\n");
503 panic_collect_pages(struct page_collection *pc)
505 /* Do the collect_pages job on a single CPU: assumes that all other
506 * CPUs have been stopped during a panic. If this isn't true for some
507 * arch, this will have to be implemented separately in each arch. */
510 struct cfs_trace_cpu_data *tcd;
512 CFS_INIT_LIST_HEAD(&pc->pc_pages);
514 cfs_tcd_for_each(tcd, i, j) {
515 cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
516 tcd->tcd_cur_pages = 0;
518 if (pc->pc_want_daemon_pages) {
519 cfs_list_splice_init(&tcd->tcd_daemon_pages,
521 tcd->tcd_cur_daemon_pages = 0;
526 static void collect_pages_on_all_cpus(struct page_collection *pc)
528 struct cfs_trace_cpu_data *tcd;
531 spin_lock(&pc->pc_lock);
532 cfs_for_each_possible_cpu(cpu) {
533 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
534 cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
535 tcd->tcd_cur_pages = 0;
536 if (pc->pc_want_daemon_pages) {
537 cfs_list_splice_init(&tcd->tcd_daemon_pages,
539 tcd->tcd_cur_daemon_pages = 0;
543 spin_unlock(&pc->pc_lock);
546 static void collect_pages(struct page_collection *pc)
548 CFS_INIT_LIST_HEAD(&pc->pc_pages);
550 if (libcfs_panic_in_progress)
551 panic_collect_pages(pc);
553 collect_pages_on_all_cpus(pc);
556 static void put_pages_back_on_all_cpus(struct page_collection *pc)
558 struct cfs_trace_cpu_data *tcd;
559 cfs_list_t *cur_head;
560 struct cfs_trace_page *tage;
561 struct cfs_trace_page *tmp;
564 spin_lock(&pc->pc_lock);
565 cfs_for_each_possible_cpu(cpu) {
566 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
567 cur_head = tcd->tcd_pages.next;
569 cfs_list_for_each_entry_safe_typed(tage, tmp,
571 struct cfs_trace_page,
574 __LASSERT_TAGE_INVARIANT(tage);
576 if (tage->cpu != cpu || tage->type != i)
579 cfs_tage_to_tail(tage, cur_head);
580 tcd->tcd_cur_pages++;
584 spin_unlock(&pc->pc_lock);
587 static void put_pages_back(struct page_collection *pc)
589 if (!libcfs_panic_in_progress)
590 put_pages_back_on_all_cpus(pc);
593 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
594 * we have a good amount of data at all times for dumping during an LBUG, even
595 * if we have been steadily writing (and otherwise discarding) pages via the
597 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
598 struct cfs_trace_cpu_data *tcd)
600 struct cfs_trace_page *tage;
601 struct cfs_trace_page *tmp;
603 spin_lock(&pc->pc_lock);
604 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
605 struct cfs_trace_page, linkage) {
607 __LASSERT_TAGE_INVARIANT(tage);
609 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
612 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
613 tcd->tcd_cur_daemon_pages++;
615 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
616 struct cfs_trace_page *victim;
618 __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages));
619 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
621 __LASSERT_TAGE_INVARIANT(victim);
623 cfs_list_del(&victim->linkage);
624 cfs_tage_free(victim);
625 tcd->tcd_cur_daemon_pages--;
628 spin_unlock(&pc->pc_lock);
631 static void put_pages_on_daemon_list(struct page_collection *pc)
633 struct cfs_trace_cpu_data *tcd;
636 cfs_for_each_possible_cpu(cpu) {
637 cfs_tcd_for_each_type_lock(tcd, i, cpu)
638 put_pages_on_tcd_daemon_list(pc, tcd);
642 void cfs_trace_debug_print(void)
644 struct page_collection pc;
645 struct cfs_trace_page *tage;
646 struct cfs_trace_page *tmp;
648 spin_lock_init(&pc.pc_lock);
650 pc.pc_want_daemon_pages = 1;
652 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
653 struct cfs_trace_page, linkage) {
657 __LASSERT_TAGE_INVARIANT(tage);
660 p = cfs_page_address(page);
661 while (p < ((char *)cfs_page_address(page) + tage->used)) {
662 struct ptldebug_header *hdr;
667 p += strlen(file) + 1;
670 len = hdr->ph_len - (int)(p - (char *)hdr);
672 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
677 cfs_list_del(&tage->linkage);
682 int cfs_tracefile_dump_all_pages(char *filename)
684 struct page_collection pc;
686 struct cfs_trace_page *tage;
687 struct cfs_trace_page *tmp;
692 cfs_tracefile_write_lock();
694 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
698 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
703 spin_lock_init(&pc.pc_lock);
704 pc.pc_want_daemon_pages = 1;
706 if (cfs_list_empty(&pc.pc_pages)) {
711 /* ok, for now, just write the pages. in the future we'll be building
712 * iobufs with the pages and calling generic_direct_IO */
714 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
715 struct cfs_trace_page, linkage) {
717 __LASSERT_TAGE_INVARIANT(tage);
719 rc = filp_write(filp, cfs_page_address(tage->page),
720 tage->used, filp_poff(filp));
721 if (rc != (int)tage->used) {
722 printk(CFS_KERN_WARNING "wanted to write %u but wrote "
723 "%d\n", tage->used, rc);
725 __LASSERT(cfs_list_empty(&pc.pc_pages));
728 cfs_list_del(&tage->linkage);
732 rc = filp_fsync(filp);
734 printk(CFS_KERN_ERR "sync returns %d\n", rc);
736 filp_close(filp, NULL);
738 cfs_tracefile_write_unlock();
742 void cfs_trace_flush_pages(void)
744 struct page_collection pc;
745 struct cfs_trace_page *tage;
746 struct cfs_trace_page *tmp;
748 spin_lock_init(&pc.pc_lock);
750 pc.pc_want_daemon_pages = 1;
752 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
753 struct cfs_trace_page, linkage) {
755 __LASSERT_TAGE_INVARIANT(tage);
757 cfs_list_del(&tage->linkage);
762 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
763 const char *usr_buffer, int usr_buffer_nob)
767 if (usr_buffer_nob > knl_buffer_nob)
770 if (cfs_copy_from_user((void *)knl_buffer,
771 (void *)usr_buffer, usr_buffer_nob))
774 nob = strnlen(knl_buffer, usr_buffer_nob);
775 while (nob-- >= 0) /* strip trailing whitespace */
776 if (!isspace(knl_buffer[nob]))
779 if (nob < 0) /* empty string */
782 if (nob == knl_buffer_nob) /* no space to terminate */
785 knl_buffer[nob + 1] = 0; /* terminate */
788 EXPORT_SYMBOL(cfs_trace_copyin_string);
790 int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
791 const char *knl_buffer, char *append)
793 /* NB if 'append' != NULL, it's a single character to append to the
794 * copied out string - usually "\n", for /proc entries and "" (i.e. a
795 * terminating zero byte) for sysctl entries */
796 int nob = strlen(knl_buffer);
798 if (nob > usr_buffer_nob)
799 nob = usr_buffer_nob;
801 if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
804 if (append != NULL && nob < usr_buffer_nob) {
805 if (cfs_copy_to_user(usr_buffer + nob, append, 1))
813 EXPORT_SYMBOL(cfs_trace_copyout_string);
815 int cfs_trace_allocate_string_buffer(char **str, int nob)
817 if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
820 *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
827 void cfs_trace_free_string_buffer(char *str, int nob)
832 int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
837 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
841 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
842 usr_str, usr_str_nob);
846 #if !defined(__WINNT__)
852 rc = cfs_tracefile_dump_all_pages(str);
854 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
858 int cfs_trace_daemon_command(char *str)
862 cfs_tracefile_write_lock();
864 if (strcmp(str, "stop") == 0) {
865 cfs_tracefile_write_unlock();
866 cfs_trace_stop_thread();
867 cfs_tracefile_write_lock();
868 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
870 } else if (strncmp(str, "size=", 5) == 0) {
871 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
872 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
873 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
875 cfs_tracefile_size <<= 20;
877 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
880 } else if (str[0] != '/') {
884 strcpy(cfs_tracefile, str);
887 "Lustre: debug daemon will attempt to start writing "
888 "to %s (%lukB max)\n", cfs_tracefile,
889 (long)(cfs_tracefile_size >> 10));
891 cfs_trace_start_thread();
894 cfs_tracefile_write_unlock();
898 int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
903 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
907 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
908 usr_str, usr_str_nob);
910 rc = cfs_trace_daemon_command(str);
912 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
916 int cfs_trace_set_debug_mb(int mb)
921 int limit = cfs_trace_max_debug_mb();
922 struct cfs_trace_cpu_data *tcd;
924 if (mb < cfs_num_possible_cpus()) {
925 printk(CFS_KERN_WARNING
926 "Lustre: %d MB is too small for debug buffer size, "
927 "setting it to %d MB.\n", mb, cfs_num_possible_cpus());
928 mb = cfs_num_possible_cpus();
932 printk(CFS_KERN_WARNING
933 "Lustre: %d MB is too large for debug buffer size, "
934 "setting it to %d MB.\n", mb, limit);
938 mb /= cfs_num_possible_cpus();
939 pages = mb << (20 - CFS_PAGE_SHIFT);
941 cfs_tracefile_write_lock();
943 cfs_tcd_for_each(tcd, i, j)
944 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
946 cfs_tracefile_write_unlock();
951 int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
956 rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
960 return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
963 int cfs_trace_get_debug_mb(void)
967 struct cfs_trace_cpu_data *tcd;
970 cfs_tracefile_read_lock();
972 cfs_tcd_for_each(tcd, i, j)
973 total_pages += tcd->tcd_max_pages;
975 cfs_tracefile_read_unlock();
977 return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
980 static int tracefiled(void *arg)
982 struct page_collection pc;
983 struct tracefiled_ctl *tctl = arg;
984 struct cfs_trace_page *tage;
985 struct cfs_trace_page *tmp;
992 /* we're started late enough that we pick up init's fs context */
993 /* this is so broken in uml? what on earth is going on? */
994 cfs_daemonize("ktracefiled");
996 spin_lock_init(&pc.pc_lock);
997 complete(&tctl->tctl_start);
1000 cfs_waitlink_t __wait;
1002 pc.pc_want_daemon_pages = 0;
1004 if (cfs_list_empty(&pc.pc_pages))
1008 cfs_tracefile_read_lock();
1009 if (cfs_tracefile[0] != 0) {
1010 filp = filp_open(cfs_tracefile,
1011 O_CREAT | O_RDWR | O_LARGEFILE,
1016 printk(CFS_KERN_WARNING "couldn't open %s: "
1017 "%d\n", cfs_tracefile, rc);
1020 cfs_tracefile_read_unlock();
1022 put_pages_on_daemon_list(&pc);
1023 __LASSERT(cfs_list_empty(&pc.pc_pages));
1029 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
1030 struct cfs_trace_page,
1032 static loff_t f_pos;
1034 __LASSERT_TAGE_INVARIANT(tage);
1036 if (f_pos >= (off_t)cfs_tracefile_size)
1038 else if (f_pos > (off_t)filp_size(filp))
1039 f_pos = filp_size(filp);
1041 rc = filp_write(filp, cfs_page_address(tage->page),
1042 tage->used, &f_pos);
1043 if (rc != (int)tage->used) {
1044 printk(CFS_KERN_WARNING "wanted to write %u "
1045 "but wrote %d\n", tage->used, rc);
1046 put_pages_back(&pc);
1047 __LASSERT(cfs_list_empty(&pc.pc_pages));
1052 filp_close(filp, NULL);
1053 put_pages_on_daemon_list(&pc);
1054 if (!cfs_list_empty(&pc.pc_pages)) {
1057 printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
1059 printk(CFS_KERN_ERR "total cpus(%d): ",
1060 cfs_num_possible_cpus());
1061 for (i = 0; i < cfs_num_possible_cpus(); i++)
1063 printk(CFS_KERN_ERR "%d(on) ", i);
1065 printk(CFS_KERN_ERR "%d(off) ", i);
1066 printk(CFS_KERN_ERR "\n");
1069 cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1071 printk(CFS_KERN_ERR "page %d belongs to cpu "
1072 "%d\n", ++i, tage->cpu);
1073 printk(CFS_KERN_ERR "There are %d pages unwritten\n",
1076 __LASSERT(cfs_list_empty(&pc.pc_pages));
1078 if (cfs_atomic_read(&tctl->tctl_shutdown)) {
1079 if (last_loop == 0) {
1086 cfs_waitlink_init(&__wait);
1087 cfs_waitq_add(&tctl->tctl_waitq, &__wait);
1088 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
1089 cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
1090 cfs_time_seconds(1));
1091 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
1093 complete(&tctl->tctl_stop);
1097 int cfs_trace_start_thread(void)
1099 struct tracefiled_ctl *tctl = &trace_tctl;
1102 mutex_lock(&cfs_trace_thread_mutex);
1106 init_completion(&tctl->tctl_start);
1107 init_completion(&tctl->tctl_stop);
1108 cfs_waitq_init(&tctl->tctl_waitq);
1109 cfs_atomic_set(&tctl->tctl_shutdown, 0);
1111 if (cfs_create_thread(tracefiled, tctl, 0) < 0) {
1116 wait_for_completion(&tctl->tctl_start);
1119 mutex_unlock(&cfs_trace_thread_mutex);
1123 void cfs_trace_stop_thread(void)
1125 struct tracefiled_ctl *tctl = &trace_tctl;
1127 mutex_lock(&cfs_trace_thread_mutex);
1128 if (thread_running) {
1129 printk(CFS_KERN_INFO
1130 "Lustre: shutting down debug daemon thread...\n");
1131 cfs_atomic_set(&tctl->tctl_shutdown, 1);
1132 wait_for_completion(&tctl->tctl_stop);
1135 mutex_unlock(&cfs_trace_thread_mutex);
1138 int cfs_tracefile_init(int max_pages)
1140 struct cfs_trace_cpu_data *tcd;
1146 rc = cfs_tracefile_init_arch();
1150 cfs_tcd_for_each(tcd, i, j) {
1151 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1152 factor = tcd->tcd_pages_factor;
1153 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
1154 CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1155 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1156 tcd->tcd_cur_pages = 0;
1157 tcd->tcd_cur_stock_pages = 0;
1158 tcd->tcd_cur_daemon_pages = 0;
1159 tcd->tcd_max_pages = (max_pages * factor) / 100;
1160 LASSERT(tcd->tcd_max_pages > 0);
1161 tcd->tcd_shutting_down = 0;
1167 static void trace_cleanup_on_all_cpus(void)
1169 struct cfs_trace_cpu_data *tcd;
1170 struct cfs_trace_page *tage;
1171 struct cfs_trace_page *tmp;
1174 cfs_for_each_possible_cpu(cpu) {
1175 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1176 tcd->tcd_shutting_down = 1;
1178 cfs_list_for_each_entry_safe_typed(tage, tmp,
1180 struct cfs_trace_page,
1182 __LASSERT_TAGE_INVARIANT(tage);
1184 cfs_list_del(&tage->linkage);
1185 cfs_tage_free(tage);
1188 tcd->tcd_cur_pages = 0;
1193 static void cfs_trace_cleanup(void)
1195 struct page_collection pc;
1197 CFS_INIT_LIST_HEAD(&pc.pc_pages);
1198 spin_lock_init(&pc.pc_lock);
1200 trace_cleanup_on_all_cpus();
1202 cfs_tracefile_fini_arch();
1205 void cfs_tracefile_exit(void)
1207 cfs_trace_stop_thread();
1208 cfs_trace_cleanup();