1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Zach Brown <zab@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define DEBUG_SUBSYSTEM S_LNET
26 #define LUSTRE_TRACEFILE_PRIVATE
27 #include "tracefile.h"
29 #include <libcfs/kp30.h>
30 #include <libcfs/libcfs.h>
32 /* XXX move things up to the top, comment */
33 union trace_data_union trace_data[NR_CPUS] __cacheline_aligned;
35 char *tracefile = NULL;
36 int64_t tracefile_size = TRACEFILE_SIZE;
37 static struct tracefiled_ctl trace_tctl;
38 struct semaphore trace_thread_sem;
39 static int thread_running = 0;
41 atomic_t tage_allocated = ATOMIC_INIT(0);
43 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
44 struct trace_cpu_data *tcd);
46 static inline struct trace_page *tage_from_list(struct list_head *list)
48 return list_entry(list, struct trace_page, linkage);
51 static struct trace_page *tage_alloc(int gfp)
54 struct trace_page *tage;
57 * Don't spam console with allocation failures: they will be reported
58 * by upper layer anyway.
60 gfp |= CFS_ALLOC_NOWARN;
61 page = cfs_alloc_page(gfp);
65 tage = cfs_alloc(sizeof(*tage), gfp);
72 atomic_inc(&tage_allocated);
76 static void tage_free(struct trace_page *tage)
78 __LASSERT(tage != NULL);
79 __LASSERT(tage->page != NULL);
81 cfs_free_page(tage->page);
83 atomic_dec(&tage_allocated);
86 static void tage_to_tail(struct trace_page *tage, struct list_head *queue)
88 __LASSERT(tage != NULL);
89 __LASSERT(queue != NULL);
91 list_move_tail(&tage->linkage, queue);
94 int trace_refill_stock(struct trace_cpu_data *tcd, int gfp,
95 struct list_head *stock)
100 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
101 * from here: this will lead to infinite recursion.
104 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
105 struct trace_page *tage;
107 tage = tage_alloc(gfp);
110 list_add_tail(&tage->linkage, stock);
115 /* return a page that has 'len' bytes left at the end */
116 static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd,
119 struct trace_page *tage;
121 if (tcd->tcd_cur_pages > 0) {
122 __LASSERT(!list_empty(&tcd->tcd_pages));
123 tage = tage_from_list(tcd->tcd_pages.prev);
124 if (tage->used + len <= CFS_PAGE_SIZE)
128 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
129 if (tcd->tcd_cur_stock_pages > 0) {
130 tage = tage_from_list(tcd->tcd_stock_pages.prev);
131 -- tcd->tcd_cur_stock_pages;
132 list_del_init(&tage->linkage);
134 tage = tage_alloc(CFS_ALLOC_ATOMIC);
137 "failure to allocate a tage (%ld)\n",
144 tage->cpu = smp_processor_id();
145 list_add_tail(&tage->linkage, &tcd->tcd_pages);
146 tcd->tcd_cur_pages++;
148 if (tcd->tcd_cur_pages > 8 && thread_running) {
149 struct tracefiled_ctl *tctl = &trace_tctl;
151 * wake up tracefiled to process some pages.
153 cfs_waitq_signal(&tctl->tctl_waitq);
160 static void tcd_shrink(struct trace_cpu_data *tcd)
162 int pgcount = tcd->tcd_cur_pages / 10;
163 struct page_collection pc;
164 struct trace_page *tage;
165 struct trace_page *tmp;
168 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
169 * from here: this will lead to infinite recursion.
172 printk(KERN_WARNING "debug daemon buffer overflowed; discarding"
173 " 10%% of pages (%d of %ld)\n", pgcount + 1, tcd->tcd_cur_pages);
175 CFS_INIT_LIST_HEAD(&pc.pc_pages);
176 spin_lock_init(&pc.pc_lock);
178 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
182 list_move_tail(&tage->linkage, &pc.pc_pages);
183 tcd->tcd_cur_pages--;
185 put_pages_on_tcd_daemon_list(&pc, tcd);
188 /* return a page that has 'len' bytes left at the end */
189 static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd,
192 struct trace_page *tage;
195 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
196 * from here: this will lead to infinite recursion.
199 if (len > CFS_PAGE_SIZE) {
201 "cowardly refusing to write %lu bytes in a page\n", len);
205 tage = trace_get_tage_try(tcd, len);
210 if (tcd->tcd_cur_pages > 0) {
211 tage = tage_from_list(tcd->tcd_pages.next);
213 tage_to_tail(tage, &tcd->tcd_pages);
218 int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask,
219 const char *file, const char *fn, const int line,
220 const char *format1, va_list args,
221 const char *format2, ...)
223 struct trace_cpu_data *tcd = NULL;
224 struct ptldebug_header header;
225 struct trace_page *tage;
226 /* string_buf is used only if tcd != NULL, and is always set then */
227 char *string_buf = NULL;
230 int needed = 85; /* average message length */
237 if (strchr(file, '/'))
238 file = strrchr(file, '/') + 1;
241 set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK());
243 tcd = trace_get_tcd();
244 if (tcd == NULL) /* arch may not log in IRQ context */
247 if (tcd->tcd_shutting_down) {
253 depth = __current_nesting_level();
254 known_size = strlen(file) + 1 + depth;
256 known_size += strlen(fn) + 1;
258 if (libcfs_debug_binary)
259 known_size += sizeof(header);
262 * '2' used because vsnprintf return real size required for output
263 * _without_ terminating NULL.
264 * if needed is to small for this format.
267 tage = trace_get_tage(tcd, needed + known_size + 1);
269 if (needed + known_size > CFS_PAGE_SIZE)
277 string_buf = (char *)cfs_page_address(tage->page)+tage->used+known_size;
279 max_nob = CFS_PAGE_SIZE - tage->used - known_size;
281 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
291 needed = vsnprintf(string_buf, max_nob, format1, ap);
297 remain = max_nob - needed;
301 va_start(ap, format2);
302 needed += vsnprintf(string_buf+needed, remain, format2, ap);
306 if (needed < max_nob) /* well. printing ok.. */
310 if (*(string_buf+needed-1) != '\n')
311 printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
314 header.ph_len = known_size + needed;
315 debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
317 if (libcfs_debug_binary) {
318 memcpy(debug_buf, &header, sizeof(header));
319 tage->used += sizeof(header);
320 debug_buf += sizeof(header);
323 /* indent message according to the nesting level */
324 while (depth-- > 0) {
325 *(debug_buf++) = '.';
329 strcpy(debug_buf, file);
330 tage->used += strlen(file) + 1;
331 debug_buf += strlen(file) + 1;
334 strcpy(debug_buf, fn);
335 tage->used += strlen(fn) + 1;
336 debug_buf += strlen(fn) + 1;
339 __LASSERT(debug_buf == string_buf);
341 tage->used += needed;
342 __LASSERT (tage->used <= CFS_PAGE_SIZE);
345 if (!((mask & D_CANTMASK) != 0 || (mask & libcfs_printk) != 0)) {
346 /* no console output requested */
353 cfs_time_t t = cdls->cdls_next +
354 cfs_time_seconds(CDEBUG_MAX_LIMIT + 10);
355 cfs_duration_t dmax = cfs_time_seconds(CDEBUG_MAX_LIMIT);
357 if (libcfs_console_ratelimit &&
358 cdls->cdls_next != 0 && /* not first time ever */
359 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
360 /* skipping a console message */
367 if (cfs_time_after(cfs_time_current(), t)) {
368 /* last timeout was a long time ago */
369 cdls->cdls_delay /= 8;
371 cdls->cdls_delay *= 2;
373 if (cdls->cdls_delay < CFS_TICK)
374 cdls->cdls_delay = CFS_TICK;
375 else if (cdls->cdls_delay > dmax)
376 cdls->cdls_delay = dmax;
379 /* ensure cdls_next is never zero after it's been seen */
380 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
384 print_to_console(&header, mask, string_buf, needed, file, fn);
387 string_buf = trace_get_console_buffer();
390 if (format1 != NULL) {
392 needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap);
395 if (format2 != NULL) {
396 remain = TRACE_CONSOLE_BUFFER_SIZE - needed;
398 va_start(ap, format2);
399 needed += vsnprintf(string_buf+needed, remain, format2, ap);
403 print_to_console(&header, mask,
404 string_buf, needed, file, fn);
406 trace_put_console_buffer(string_buf);
409 if (cdls != NULL && cdls->cdls_count != 0) {
410 string_buf = trace_get_console_buffer();
412 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
413 "Skipped %d previous similar message%s\n",
414 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
416 print_to_console(&header, mask,
417 string_buf, needed, file, fn);
419 trace_put_console_buffer(string_buf);
420 cdls->cdls_count = 0;
425 EXPORT_SYMBOL(libcfs_debug_vmsg2);
428 libcfs_assertion_failed(const char *expr, const char *file,
429 const char *func, const int line)
431 libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
432 "ASSERTION(%s) failed\n", expr);
435 EXPORT_SYMBOL(libcfs_assertion_failed);
438 trace_assertion_failed(const char *str,
439 const char *fn, const char *file, int line)
441 struct ptldebug_header hdr;
443 libcfs_panic_in_progress = 1;
444 libcfs_catastrophe = 1;
447 set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line,
450 print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn);
452 LIBCFS_PANIC("Lustre debug assertion failure\n");
458 panic_collect_pages(struct page_collection *pc)
460 /* Do the collect_pages job on a single CPU: assumes that all other
461 * CPUs have been stopped during a panic. If this isn't true for some
462 * arch, this will have to be implemented separately in each arch. */
464 struct trace_cpu_data *tcd;
466 CFS_INIT_LIST_HEAD(&pc->pc_pages);
468 for (i = 0; i < NR_CPUS; i++) {
469 tcd = &trace_data[i].tcd;
471 list_splice(&tcd->tcd_pages, &pc->pc_pages);
472 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
473 tcd->tcd_cur_pages = 0;
475 if (pc->pc_want_daemon_pages) {
476 list_splice(&tcd->tcd_daemon_pages, &pc->pc_pages);
477 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
478 tcd->tcd_cur_daemon_pages = 0;
483 static void collect_pages_on_cpu(void *info)
485 struct trace_cpu_data *tcd;
486 struct page_collection *pc = info;
488 tcd = trace_get_tcd();
489 __LASSERT (tcd != NULL);
491 spin_lock(&pc->pc_lock);
492 list_splice(&tcd->tcd_pages, &pc->pc_pages);
493 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
494 tcd->tcd_cur_pages = 0;
495 if (pc->pc_want_daemon_pages) {
496 list_splice(&tcd->tcd_daemon_pages, &pc->pc_pages);
497 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
498 tcd->tcd_cur_daemon_pages = 0;
500 spin_unlock(&pc->pc_lock);
505 static void collect_pages(struct page_collection *pc)
507 CFS_INIT_LIST_HEAD(&pc->pc_pages);
509 if (libcfs_panic_in_progress)
510 panic_collect_pages(pc);
512 trace_call_on_all_cpus(collect_pages_on_cpu, pc);
515 static void put_pages_back_on_cpu(void *info)
517 struct page_collection *pc = info;
518 struct trace_cpu_data *tcd;
519 struct list_head *cur_head;
520 struct trace_page *tage;
521 struct trace_page *tmp;
523 tcd = trace_get_tcd();
524 __LASSERT (tcd != NULL);
526 cur_head = tcd->tcd_pages.next;
528 spin_lock(&pc->pc_lock);
529 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
531 __LASSERT_TAGE_INVARIANT(tage);
533 if (tage->cpu != smp_processor_id())
536 tage_to_tail(tage, cur_head);
537 tcd->tcd_cur_pages++;
539 spin_unlock(&pc->pc_lock);
544 static void put_pages_back(struct page_collection *pc)
546 if (!libcfs_panic_in_progress)
547 trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
550 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
551 * we have a good amount of data at all times for dumping during an LBUG, even
552 * if we have been steadily writing (and otherwise discarding) pages via the
554 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
555 struct trace_cpu_data *tcd)
557 struct trace_page *tage;
558 struct trace_page *tmp;
560 spin_lock(&pc->pc_lock);
561 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
563 __LASSERT_TAGE_INVARIANT(tage);
565 if (tage->cpu != smp_processor_id())
568 tage_to_tail(tage, &tcd->tcd_daemon_pages);
569 tcd->tcd_cur_daemon_pages++;
571 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
572 struct trace_page *victim;
574 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
575 victim = tage_from_list(tcd->tcd_daemon_pages.next);
577 __LASSERT_TAGE_INVARIANT(victim);
579 list_del(&victim->linkage);
581 tcd->tcd_cur_daemon_pages--;
584 spin_unlock(&pc->pc_lock);
587 static void put_pages_on_daemon_list_on_cpu(void *info)
589 struct trace_cpu_data *tcd;
591 tcd = trace_get_tcd();
592 __LASSERT (tcd != NULL);
594 put_pages_on_tcd_daemon_list(info, tcd);
599 static void put_pages_on_daemon_list(struct page_collection *pc)
601 trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc);
604 void trace_debug_print(void)
606 struct page_collection pc;
607 struct trace_page *tage;
608 struct trace_page *tmp;
610 spin_lock_init(&pc.pc_lock);
612 pc.pc_want_daemon_pages = 1;
614 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
618 __LASSERT_TAGE_INVARIANT(tage);
621 p = cfs_page_address(page);
622 while (p < ((char *)cfs_page_address(page) + tage->used)) {
623 struct ptldebug_header *hdr;
628 p += strlen(file) + 1;
631 len = hdr->ph_len - (p - (char *)hdr);
633 print_to_console(hdr, D_EMERG, p, len, file, fn);
638 list_del(&tage->linkage);
643 int tracefile_dump_all_pages(char *filename)
645 struct page_collection pc;
647 struct trace_page *tage;
648 struct trace_page *tmp;
653 tracefile_write_lock();
655 filp = cfs_filp_open(filename,
656 O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc);
658 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
663 spin_lock_init(&pc.pc_lock);
664 pc.pc_want_daemon_pages = 1;
666 if (list_empty(&pc.pc_pages)) {
671 /* ok, for now, just write the pages. in the future we'll be building
672 * iobufs with the pages and calling generic_direct_IO */
674 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
676 __LASSERT_TAGE_INVARIANT(tage);
678 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
679 tage->used, cfs_filp_poff(filp));
680 if (rc != (int)tage->used) {
681 printk(KERN_WARNING "wanted to write %u but wrote "
682 "%d\n", tage->used, rc);
684 __LASSERT(list_empty(&pc.pc_pages));
687 list_del(&tage->linkage);
691 rc = cfs_filp_fsync(filp);
693 printk(KERN_ERR "sync returns %d\n", rc);
695 cfs_filp_close(filp);
697 tracefile_write_unlock();
701 void trace_flush_pages(void)
703 struct page_collection pc;
704 struct trace_page *tage;
705 struct trace_page *tmp;
707 spin_lock_init(&pc.pc_lock);
709 pc.pc_want_daemon_pages = 1;
711 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
713 __LASSERT_TAGE_INVARIANT(tage);
715 list_del(&tage->linkage);
720 int trace_dk(struct file *file, const char *buffer, unsigned long count,
727 name = cfs_alloc(count + 1, CFS_ALLOC_STD);
731 if (copy_from_user((void *)name, (void *)buffer, count)) {
736 #if !defined(__WINNT__)
737 if (name[0] != '/') {
743 /* be nice and strip out trailing '\n' */
744 for (off = count ; off > 2 && isspace(name[off - 1]); off--)
748 rc = tracefile_dump_all_pages(name);
754 EXPORT_SYMBOL(trace_dk);
756 static int tracefiled(void *arg)
758 struct page_collection pc;
759 struct tracefiled_ctl *tctl = arg;
760 struct trace_page *tage;
761 struct trace_page *tmp;
762 struct ptldebug_header *hdr;
768 /* we're started late enough that we pick up init's fs context */
769 /* this is so broken in uml? what on earth is going on? */
770 cfs_daemonize("ktracefiled");
772 spin_lock_init(&pc.pc_lock);
773 complete(&tctl->tctl_start);
776 cfs_waitlink_t __wait;
778 cfs_waitlink_init(&__wait);
779 cfs_waitq_add(&tctl->tctl_waitq, &__wait);
780 set_current_state(TASK_INTERRUPTIBLE);
781 cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
782 cfs_time_seconds(1));
783 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
785 if (atomic_read(&tctl->tctl_shutdown))
788 pc.pc_want_daemon_pages = 0;
790 if (list_empty(&pc.pc_pages))
794 tracefile_read_lock();
795 if (tracefile != NULL) {
796 filp = cfs_filp_open(tracefile,
797 O_CREAT | O_RDWR | O_LARGEFILE,
800 printk("couldn't open %s: %d\n", tracefile, rc);
802 tracefile_read_unlock();
804 put_pages_on_daemon_list(&pc);
805 __LASSERT(list_empty(&pc.pc_pages));
811 /* mark the first header, so we can sort in chunks */
812 tage = tage_from_list(pc.pc_pages.next);
813 __LASSERT_TAGE_INVARIANT(tage);
815 hdr = cfs_page_address(tage->page);
816 hdr->ph_flags |= PH_FLAG_FIRST_RECORD;
818 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
821 __LASSERT_TAGE_INVARIANT(tage);
823 if (f_pos >= (off_t)tracefile_size)
825 else if (f_pos > cfs_filp_size(filp))
826 f_pos = cfs_filp_size(filp);
828 rc = cfs_filp_write(filp, cfs_page_address(tage->page),
830 if (rc != (int)tage->used) {
831 printk(KERN_WARNING "wanted to write %u but "
832 "wrote %d\n", tage->used, rc);
834 __LASSERT(list_empty(&pc.pc_pages));
839 cfs_filp_close(filp);
840 put_pages_on_daemon_list(&pc);
841 __LASSERT(list_empty(&pc.pc_pages));
843 complete(&tctl->tctl_stop);
847 int trace_start_thread(void)
849 struct tracefiled_ctl *tctl = &trace_tctl;
852 mutex_down(&trace_thread_sem);
856 init_completion(&tctl->tctl_start);
857 init_completion(&tctl->tctl_stop);
858 cfs_waitq_init(&tctl->tctl_waitq);
859 atomic_set(&tctl->tctl_shutdown, 0);
861 if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) {
866 wait_for_completion(&tctl->tctl_start);
869 mutex_up(&trace_thread_sem);
873 void trace_stop_thread(void)
875 struct tracefiled_ctl *tctl = &trace_tctl;
877 mutex_down(&trace_thread_sem);
878 if (thread_running) {
879 printk(KERN_INFO "Shutting down debug daemon thread...\n");
880 atomic_set(&tctl->tctl_shutdown, 1);
881 wait_for_completion(&tctl->tctl_stop);
884 mutex_up(&trace_thread_sem);
887 int tracefile_init(void)
889 struct trace_cpu_data *tcd;
893 rc = tracefile_init_arch();
897 for (i = 0; i < NR_CPUS; i++) {
898 tcd = &trace_data[i].tcd;
899 CFS_INIT_LIST_HEAD(&tcd->tcd_pages);
900 CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages);
901 CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
902 tcd->tcd_cur_pages = 0;
903 tcd->tcd_cur_stock_pages = 0;
904 tcd->tcd_cur_daemon_pages = 0;
905 tcd->tcd_max_pages = TCD_MAX_PAGES;
906 tcd->tcd_shutting_down = 0;
913 static void trace_cleanup_on_cpu(void *info)
915 struct trace_cpu_data *tcd;
916 struct trace_page *tage;
917 struct trace_page *tmp;
919 tcd = trace_get_tcd();
920 __LASSERT (tcd != NULL);
922 tcd->tcd_shutting_down = 1;
924 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
925 __LASSERT_TAGE_INVARIANT(tage);
927 list_del(&tage->linkage);
930 tcd->tcd_cur_pages = 0;
935 static void trace_cleanup(void)
937 struct page_collection pc;
939 CFS_INIT_LIST_HEAD(&pc.pc_pages);
940 spin_lock_init(&pc.pc_lock);
942 trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc);
944 tracefile_fini_arch();
947 void tracefile_exit(void)