4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LNET
39 #include "tracefile.h"
41 #include <linux/ctype.h>
43 #include <linux/kthread.h>
44 #include <linux/pagemap.h>
45 #include <linux/poll.h>
46 #include <linux/tty.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
51 #define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
53 enum cfs_trace_buf_type {
54 CFS_TCD_TYPE_PROC = 0,
60 union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned;
62 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_CNT];
63 char cfs_tracefile[TRACEFILE_NAME_SIZE];
64 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
65 static struct tracefiled_ctl trace_tctl;
66 static DEFINE_MUTEX(cfs_trace_thread_mutex);
67 static int thread_running = 0;
69 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
70 static DECLARE_RWSEM(cfs_tracefile_sem);
72 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
73 struct cfs_trace_cpu_data *tcd);
75 /* trace file lock routines */
76 /* The walking argument indicates the locking comes from all tcd types
77 * iterator and we must lock it and dissable local irqs to avoid deadlocks
78 * with other interrupt locks that might be happening. See LU-1311
81 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
82 __acquires(&tcd->tcd_lock)
84 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
85 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
86 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
87 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
88 spin_lock_bh(&tcd->tcd_lock);
89 else if (unlikely(walking))
90 spin_lock_irq(&tcd->tcd_lock);
92 spin_lock(&tcd->tcd_lock);
96 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
97 __releases(&tcd->tcd_lock)
99 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
100 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
101 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
102 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
103 spin_unlock_bh(&tcd->tcd_lock);
104 else if (unlikely(walking))
105 spin_unlock_irq(&tcd->tcd_lock);
107 spin_unlock(&tcd->tcd_lock);
110 #define cfs_tcd_for_each(tcd, i, j) \
111 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) \
112 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
113 j < num_possible_cpus(); \
114 j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
116 #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
117 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] && \
118 (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
119 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
121 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
124 return CFS_TCD_TYPE_IRQ;
126 return CFS_TCD_TYPE_SOFTIRQ;
127 return CFS_TCD_TYPE_PROC;
130 static inline char *cfs_trace_get_console_buffer(void)
132 unsigned int i = get_cpu();
133 unsigned int j = cfs_trace_buf_idx_get();
135 return cfs_trace_console_buffers[i][j];
138 static inline struct cfs_trace_cpu_data *
139 cfs_trace_get_tcd(void)
141 struct cfs_trace_cpu_data *tcd =
142 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
144 cfs_trace_lock_tcd(tcd, 0);
149 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
151 cfs_trace_unlock_tcd(tcd, 0);
156 static inline struct cfs_trace_page *
157 cfs_tage_from_list(struct list_head *list)
159 return list_entry(list, struct cfs_trace_page, linkage);
162 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
165 struct cfs_trace_page *tage;
167 /* My caller is trying to free memory */
168 if (!in_interrupt() && (current->flags & PF_MEMALLOC))
172 * Don't spam console with allocation failures: they will be reported
173 * by upper layer anyway.
176 page = alloc_page(gfp);
180 tage = kmalloc(sizeof(*tage), gfp);
187 atomic_inc(&cfs_tage_allocated);
191 static void cfs_tage_free(struct cfs_trace_page *tage)
193 __LASSERT(tage != NULL);
194 __LASSERT(tage->page != NULL);
196 __free_page(tage->page);
198 atomic_dec(&cfs_tage_allocated);
201 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
202 struct list_head *queue)
204 __LASSERT(tage != NULL);
205 __LASSERT(queue != NULL);
207 list_move_tail(&tage->linkage, queue);
210 /* return a page that has 'len' bytes left at the end */
211 static struct cfs_trace_page *
212 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
214 struct cfs_trace_page *tage;
216 if (tcd->tcd_cur_pages > 0) {
217 __LASSERT(!list_empty(&tcd->tcd_pages));
218 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
219 if (tage->used + len <= PAGE_SIZE)
223 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
224 if (tcd->tcd_cur_stock_pages > 0) {
225 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
226 --tcd->tcd_cur_stock_pages;
227 list_del_init(&tage->linkage);
229 tage = cfs_tage_alloc(GFP_ATOMIC);
230 if (unlikely(tage == NULL)) {
231 if ((!(current->flags & PF_MEMALLOC) ||
232 in_interrupt()) && printk_ratelimit())
233 pr_warn("Lustre: cannot allocate a tage (%ld)\n",
240 tage->cpu = smp_processor_id();
241 tage->type = tcd->tcd_type;
242 list_add_tail(&tage->linkage, &tcd->tcd_pages);
243 tcd->tcd_cur_pages++;
245 if (tcd->tcd_cur_pages > 8 && thread_running) {
246 struct tracefiled_ctl *tctl = &trace_tctl;
248 * wake up tracefiled to process some pages.
250 wake_up(&tctl->tctl_waitq);
257 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
259 int pgcount = tcd->tcd_cur_pages / 10;
260 struct page_collection pc;
261 struct cfs_trace_page *tage;
262 struct cfs_trace_page *tmp;
265 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
266 * from here: this will lead to infinite recursion.
269 if (printk_ratelimit())
270 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
271 pgcount + 1, tcd->tcd_cur_pages);
273 INIT_LIST_HEAD(&pc.pc_pages);
275 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
279 list_move_tail(&tage->linkage, &pc.pc_pages);
280 tcd->tcd_cur_pages--;
282 put_pages_on_tcd_daemon_list(&pc, tcd);
285 /* return a page that has 'len' bytes left at the end */
286 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
289 struct cfs_trace_page *tage;
292 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
293 * from here: this will lead to infinite recursion.
296 if (len > PAGE_SIZE) {
297 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
302 tage = cfs_trace_get_tage_try(tcd, len);
307 if (tcd->tcd_cur_pages > 0) {
308 tage = cfs_tage_from_list(tcd->tcd_pages.next);
310 cfs_tage_to_tail(tage, &tcd->tcd_pages);
315 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
316 struct libcfs_debug_msg_data *msgdata,
319 struct timespec64 ts;
321 ktime_get_real_ts64(&ts);
323 header->ph_subsys = msgdata->msg_subsys;
324 header->ph_mask = msgdata->msg_mask;
325 header->ph_cpu_id = smp_processor_id();
326 header->ph_type = cfs_trace_buf_idx_get();
327 /* y2038 safe since all user space treats this as unsigned, but
328 * will overflow in 2106
330 header->ph_sec = (u32)ts.tv_sec;
331 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
332 header->ph_stack = stack;
333 header->ph_pid = current->pid;
334 header->ph_line_num = msgdata->msg_line;
335 header->ph_extern_pid = 0;
339 * tty_write_msg - write a message to a certain tty, not just the console.
340 * @tty: the destination tty_struct
341 * @msg: the message to write
343 * tty_write_message is not exported, so write a same function for it
346 static void tty_write_msg(struct tty_struct *tty, const char *msg)
348 mutex_lock(&tty->atomic_write_lock);
350 if (tty->ops->write && tty->count > 0)
351 tty->ops->write(tty, msg, strlen(msg));
353 mutex_unlock(&tty->atomic_write_lock);
354 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
357 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
359 struct tty_struct *tty;
361 tty = get_current_tty();
365 tty_write_msg(tty, prefix);
366 if ((mask & D_EMERG) || (mask & D_ERROR))
367 tty_write_msg(tty, "Error");
368 tty_write_msg(tty, ": ");
369 tty_write_msg(tty, msg);
373 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
374 const char *buf, int len, const char *file,
377 char *prefix = "Lustre";
379 if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
382 if (mask & D_CONSOLE) {
384 pr_emerg("%sError: %.*s", prefix, len, buf);
385 else if (mask & D_ERROR)
386 pr_err("%sError: %.*s", prefix, len, buf);
387 else if (mask & D_WARNING)
388 pr_warn("%s: %.*s", prefix, len, buf);
389 else if (mask & libcfs_printk)
390 pr_info("%s: %.*s", prefix, len, buf);
393 pr_emerg("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
394 hdr->ph_pid, hdr->ph_extern_pid, file,
395 hdr->ph_line_num, fn, len, buf);
396 else if (mask & D_ERROR)
397 pr_err("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
398 hdr->ph_pid, hdr->ph_extern_pid, file,
399 hdr->ph_line_num, fn, len, buf);
400 else if (mask & D_WARNING)
401 pr_warn("%s: %d:%d:(%s:%d:%s()) %.*s", prefix,
402 hdr->ph_pid, hdr->ph_extern_pid, file,
403 hdr->ph_line_num, fn, len, buf);
404 else if (mask & (D_CONSOLE | libcfs_printk))
405 pr_info("%s: %.*s", prefix, len, buf);
409 cfs_tty_write_message(prefix, mask, buf);
412 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
413 const char *format, ...)
415 struct cfs_trace_cpu_data *tcd = NULL;
416 struct ptldebug_header header = {0};
417 struct cfs_trace_page *tage;
418 /* string_buf is used only if tcd != NULL, and is always set then */
419 char *string_buf = NULL;
422 int needed = 85; /* seeded with average message length */
426 int mask = msgdata->msg_mask;
427 char *file = (char *)msgdata->msg_file;
428 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
430 if (strchr(file, '/'))
431 file = strrchr(file, '/') + 1;
433 tcd = cfs_trace_get_tcd();
435 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
436 * pins us to a particular CPU. This avoids an smp_processor_id()
437 * warning on Linux when debugging is enabled.
439 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
441 if (!tcd) /* arch may not log in IRQ context */
444 if (tcd->tcd_cur_pages == 0)
445 header.ph_flags |= PH_FLAG_FIRST_RECORD;
447 if (tcd->tcd_shutting_down) {
448 cfs_trace_put_tcd(tcd);
453 known_size = strlen(file) + 1;
455 known_size += strlen(msgdata->msg_fn) + 1;
457 if (libcfs_debug_binary)
458 known_size += sizeof(header);
461 * May perform an additional pass to update 'needed' and increase
462 * tage buffer size to match vsnprintf reported size required
463 * On the second pass (retry=1) use vscnprintf [which returns
464 * number of bytes written not including the terminating nul]
465 * to clarify `needed` is used as number of bytes written
466 * for the remainder of this function
468 for (retry = 0; retry < 2; retry++) {
469 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
471 if (needed + known_size > PAGE_SIZE)
474 cfs_trace_put_tcd(tcd);
479 string_buf = (char *)page_address(tage->page) +
480 tage->used + known_size;
482 max_nob = PAGE_SIZE - tage->used - known_size;
484 pr_emerg("LustreError: negative max_nob: %d\n",
487 cfs_trace_put_tcd(tcd);
492 va_start(ap, format);
494 needed = vscnprintf(string_buf, max_nob, format, ap);
496 needed = vsnprintf(string_buf, max_nob, format, ap);
499 if (needed < max_nob) /* well. printing ok.. */
503 /* `needed` is actual bytes written to string_buf */
504 if (*(string_buf + needed - 1) != '\n') {
505 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
506 file, msgdata->msg_line, msgdata->msg_fn);
507 } else if (mask & D_TTY) {
508 /* TTY needs '\r\n' to move carriage to leftmost position */
509 if (needed < 2 || *(string_buf + needed - 2) != '\r')
510 pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
511 file, msgdata->msg_line, msgdata->msg_fn);
514 header.ph_len = known_size + needed;
515 debug_buf = (char *)page_address(tage->page) + tage->used;
517 if (libcfs_debug_binary) {
518 memcpy(debug_buf, &header, sizeof(header));
519 tage->used += sizeof(header);
520 debug_buf += sizeof(header);
523 strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
524 tage->used += strlen(file) + 1;
525 debug_buf += strlen(file) + 1;
527 if (msgdata->msg_fn) {
528 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
529 tage->used += strlen(msgdata->msg_fn) + 1;
530 debug_buf += strlen(msgdata->msg_fn) + 1;
533 __LASSERT(debug_buf == string_buf);
535 tage->used += needed;
536 __LASSERT(tage->used <= PAGE_SIZE);
539 if ((mask & libcfs_printk) == 0) {
540 /* no console output requested */
542 cfs_trace_put_tcd(tcd);
547 if (libcfs_console_ratelimit &&
548 cdls->cdls_next != 0 && /* not first time ever */
549 time_before(jiffies, cdls->cdls_next)) {
550 /* skipping a console message */
553 cfs_trace_put_tcd(tcd);
557 if (time_after(jiffies, cdls->cdls_next +
558 libcfs_console_max_delay +
559 cfs_time_seconds(10))) {
560 /* last timeout was a long time ago */
561 cdls->cdls_delay /= libcfs_console_backoff * 4;
563 cdls->cdls_delay *= libcfs_console_backoff;
566 if (cdls->cdls_delay < libcfs_console_min_delay)
567 cdls->cdls_delay = libcfs_console_min_delay;
568 else if (cdls->cdls_delay > libcfs_console_max_delay)
569 cdls->cdls_delay = libcfs_console_max_delay;
571 /* ensure cdls_next is never zero after it's been seen */
572 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
576 cfs_print_to_console(&header, mask, string_buf, needed, file,
578 cfs_trace_put_tcd(tcd);
580 string_buf = cfs_trace_get_console_buffer();
582 va_start(ap, format);
583 needed = vscnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
587 cfs_print_to_console(&header, mask,
588 string_buf, needed, file, msgdata->msg_fn);
593 if (cdls != NULL && cdls->cdls_count != 0) {
594 string_buf = cfs_trace_get_console_buffer();
596 needed = scnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
597 "Skipped %d previous similar message%s\n",
599 (cdls->cdls_count > 1) ? "s" : "");
601 /* Do not allow print this to TTY */
602 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
603 needed, file, msgdata->msg_fn);
606 cdls->cdls_count = 0;
611 EXPORT_SYMBOL(libcfs_debug_msg);
614 cfs_trace_assertion_failed(const char *str,
615 struct libcfs_debug_msg_data *msgdata)
617 struct ptldebug_header hdr;
619 libcfs_panic_in_progress = 1;
620 libcfs_catastrophe = 1;
623 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
625 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
626 msgdata->msg_file, msgdata->msg_fn);
628 panic("Lustre debug assertion failure\n");
634 panic_collect_pages(struct page_collection *pc)
636 /* Do the collect_pages job on a single CPU: assumes that all other
637 * CPUs have been stopped during a panic. If this isn't true for some
638 * arch, this will have to be implemented separately in each arch. */
641 struct cfs_trace_cpu_data *tcd;
643 INIT_LIST_HEAD(&pc->pc_pages);
645 cfs_tcd_for_each(tcd, i, j) {
646 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
647 tcd->tcd_cur_pages = 0;
649 if (pc->pc_want_daemon_pages) {
650 list_splice_init(&tcd->tcd_daemon_pages,
652 tcd->tcd_cur_daemon_pages = 0;
657 static void collect_pages_on_all_cpus(struct page_collection *pc)
659 struct cfs_trace_cpu_data *tcd;
662 for_each_possible_cpu(cpu) {
663 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
664 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
665 tcd->tcd_cur_pages = 0;
666 if (pc->pc_want_daemon_pages) {
667 list_splice_init(&tcd->tcd_daemon_pages,
669 tcd->tcd_cur_daemon_pages = 0;
675 static void collect_pages(struct page_collection *pc)
677 INIT_LIST_HEAD(&pc->pc_pages);
679 if (libcfs_panic_in_progress)
680 panic_collect_pages(pc);
682 collect_pages_on_all_cpus(pc);
685 static void put_pages_back_on_all_cpus(struct page_collection *pc)
687 struct cfs_trace_cpu_data *tcd;
688 struct list_head *cur_head;
689 struct cfs_trace_page *tage;
690 struct cfs_trace_page *tmp;
693 for_each_possible_cpu(cpu) {
694 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
695 cur_head = tcd->tcd_pages.next;
697 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
700 __LASSERT_TAGE_INVARIANT(tage);
702 if (tage->cpu != cpu || tage->type != i)
705 cfs_tage_to_tail(tage, cur_head);
706 tcd->tcd_cur_pages++;
712 static void put_pages_back(struct page_collection *pc)
714 if (!libcfs_panic_in_progress)
715 put_pages_back_on_all_cpus(pc);
718 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
719 * we have a good amount of data at all times for dumping during an LBUG, even
720 * if we have been steadily writing (and otherwise discarding) pages via the
722 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
723 struct cfs_trace_cpu_data *tcd)
725 struct cfs_trace_page *tage;
726 struct cfs_trace_page *tmp;
728 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
729 __LASSERT_TAGE_INVARIANT(tage);
731 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
734 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
735 tcd->tcd_cur_daemon_pages++;
737 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
738 struct cfs_trace_page *victim;
740 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
741 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
743 __LASSERT_TAGE_INVARIANT(victim);
745 list_del(&victim->linkage);
746 cfs_tage_free(victim);
747 tcd->tcd_cur_daemon_pages--;
752 static void put_pages_on_daemon_list(struct page_collection *pc)
754 struct cfs_trace_cpu_data *tcd;
757 for_each_possible_cpu(cpu) {
758 cfs_tcd_for_each_type_lock(tcd, i, cpu)
759 put_pages_on_tcd_daemon_list(pc, tcd);
763 void cfs_trace_debug_print(void)
765 struct page_collection pc;
766 struct cfs_trace_page *tage;
767 struct cfs_trace_page *tmp;
769 pc.pc_want_daemon_pages = 1;
771 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
775 __LASSERT_TAGE_INVARIANT(tage);
778 p = page_address(page);
779 while (p < ((char *)page_address(page) + tage->used)) {
780 struct ptldebug_header *hdr;
785 p += strlen(file) + 1;
788 len = hdr->ph_len - (int)(p - (char *)hdr);
790 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
795 list_del(&tage->linkage);
800 int cfs_tracefile_dump_all_pages(char *filename)
802 struct page_collection pc;
804 struct cfs_trace_page *tage;
805 struct cfs_trace_page *tmp;
809 down_write(&cfs_tracefile_sem);
811 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
815 pr_err("LustreError: can't open %s for dump: rc = %d\n",
820 pc.pc_want_daemon_pages = 1;
822 if (list_empty(&pc.pc_pages)) {
827 /* ok, for now, just write the pages. in the future we'll be building
828 * iobufs with the pages and calling generic_direct_IO */
829 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
831 __LASSERT_TAGE_INVARIANT(tage);
833 buf = kmap(tage->page);
834 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
836 if (rc != (int)tage->used) {
837 pr_warn("Lustre: wanted to write %u but wrote %d\n",
840 __LASSERT(list_empty(&pc.pc_pages));
843 list_del(&tage->linkage);
847 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
849 pr_err("LustreError: sync returns: rc = %d\n", rc);
851 filp_close(filp, NULL);
853 up_write(&cfs_tracefile_sem);
857 void cfs_trace_flush_pages(void)
859 struct page_collection pc;
860 struct cfs_trace_page *tage;
862 pc.pc_want_daemon_pages = 1;
864 while (!list_empty(&pc.pc_pages)) {
865 tage = list_first_entry(&pc.pc_pages,
866 struct cfs_trace_page, linkage);
867 __LASSERT_TAGE_INVARIANT(tage);
869 list_del(&tage->linkage);
874 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
875 const char __user *usr_buffer, int usr_buffer_nob)
879 if (usr_buffer_nob > knl_buffer_nob)
882 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
885 nob = strnlen(knl_buffer, usr_buffer_nob);
886 while (--nob >= 0) /* strip trailing whitespace */
887 if (!isspace(knl_buffer[nob]))
890 if (nob < 0) /* empty string */
893 if (nob == knl_buffer_nob) /* no space to terminate */
896 knl_buffer[nob + 1] = 0; /* terminate */
899 EXPORT_SYMBOL(cfs_trace_copyin_string);
901 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
902 const char *knl_buffer, char *append)
904 /* NB if 'append' != NULL, it's a single character to append to the
905 * copied out string - usually "\n", for /proc entries and "" (i.e. a
906 * terminating zero byte) for sysctl entries */
907 int nob = strlen(knl_buffer);
909 if (nob > usr_buffer_nob)
910 nob = usr_buffer_nob;
912 if (copy_to_user(usr_buffer, knl_buffer, nob))
915 if (append != NULL && nob < usr_buffer_nob) {
916 if (copy_to_user(usr_buffer + nob, append, 1))
924 EXPORT_SYMBOL(cfs_trace_copyout_string);
926 int cfs_trace_allocate_string_buffer(char **str, int nob)
928 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
931 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
938 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
943 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
947 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
948 usr_str, usr_str_nob);
956 rc = cfs_tracefile_dump_all_pages(str);
962 int cfs_trace_daemon_command(char *str)
966 down_write(&cfs_tracefile_sem);
968 if (strcmp(str, "stop") == 0) {
969 up_write(&cfs_tracefile_sem);
970 cfs_trace_stop_thread();
971 down_write(&cfs_tracefile_sem);
972 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
974 } else if (strncmp(str, "size=", 5) == 0) {
977 rc = kstrtoul(str + 5, 10, &tmp);
979 if (tmp < 10 || tmp > 20480)
980 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
982 cfs_tracefile_size = tmp << 20;
984 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
986 } else if (str[0] != '/') {
989 strcpy(cfs_tracefile, str);
991 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
992 cfs_tracefile, (long)(cfs_tracefile_size >> 10));
994 cfs_trace_start_thread();
997 up_write(&cfs_tracefile_sem);
1001 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
1006 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
1010 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
1011 usr_str, usr_str_nob);
1013 rc = cfs_trace_daemon_command(str);
1019 int cfs_trace_set_debug_mb(int mb)
1023 unsigned long pages;
1024 unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
1025 unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
1026 struct cfs_trace_cpu_data *tcd;
1028 if (mb < num_possible_cpus()) {
1029 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
1030 mb, num_possible_cpus());
1031 mb = num_possible_cpus();
1035 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
1040 mb /= num_possible_cpus();
1041 pages = mb << (20 - PAGE_SHIFT);
1043 down_write(&cfs_tracefile_sem);
1045 cfs_tcd_for_each(tcd, i, j)
1046 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1048 up_write(&cfs_tracefile_sem);
1053 int cfs_trace_get_debug_mb(void)
1057 struct cfs_trace_cpu_data *tcd;
1058 int total_pages = 0;
1060 down_read(&cfs_tracefile_sem);
1062 cfs_tcd_for_each(tcd, i, j)
1063 total_pages += tcd->tcd_max_pages;
1065 up_read(&cfs_tracefile_sem);
1067 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1070 static int tracefiled(void *arg)
1072 struct page_collection pc;
1073 struct tracefiled_ctl *tctl = arg;
1074 struct cfs_trace_page *tage;
1075 struct cfs_trace_page *tmp;
1081 /* we're started late enough that we pick up init's fs context */
1082 /* this is so broken in uml? what on earth is going on? */
1084 complete(&tctl->tctl_start);
1086 pc.pc_want_daemon_pages = 0;
1088 while (!last_loop) {
1089 wait_event_timeout(tctl->tctl_waitq,
1090 ({ collect_pages(&pc);
1091 !list_empty(&pc.pc_pages); }) ||
1092 atomic_read(&tctl->tctl_shutdown),
1093 cfs_time_seconds(1));
1094 if (atomic_read(&tctl->tctl_shutdown))
1096 if (list_empty(&pc.pc_pages))
1100 down_read(&cfs_tracefile_sem);
1101 if (cfs_tracefile[0] != 0) {
1102 filp = filp_open(cfs_tracefile,
1103 O_CREAT | O_RDWR | O_LARGEFILE,
1108 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1112 up_read(&cfs_tracefile_sem);
1114 put_pages_on_daemon_list(&pc);
1115 __LASSERT(list_empty(&pc.pc_pages));
1119 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1120 struct dentry *de = file_dentry(filp);
1121 static loff_t f_pos;
1123 __LASSERT_TAGE_INVARIANT(tage);
1125 if (f_pos >= (off_t)cfs_tracefile_size)
1127 else if (f_pos > i_size_read(de->d_inode))
1128 f_pos = i_size_read(de->d_inode);
1130 buf = kmap(tage->page);
1131 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1133 if (rc != (int)tage->used) {
1134 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1136 put_pages_back(&pc);
1137 __LASSERT(list_empty(&pc.pc_pages));
1142 filp_close(filp, NULL);
1143 put_pages_on_daemon_list(&pc);
1144 if (!list_empty(&pc.pc_pages)) {
1147 pr_alert("Lustre: trace pages aren't empty\n");
1148 pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1149 for (i = 0; i < num_possible_cpus(); i++)
1151 pr_cont("%d(on) ", i);
1153 pr_cont("%d(off) ", i);
1157 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1159 pr_err("Lustre: page %d belongs to cpu %d\n",
1161 pr_err("Lustre: There are %d pages unwritten\n", i);
1163 __LASSERT(list_empty(&pc.pc_pages));
1165 complete(&tctl->tctl_stop);
1169 int cfs_trace_start_thread(void)
1171 struct tracefiled_ctl *tctl = &trace_tctl;
1174 mutex_lock(&cfs_trace_thread_mutex);
1178 init_completion(&tctl->tctl_start);
1179 init_completion(&tctl->tctl_stop);
1180 init_waitqueue_head(&tctl->tctl_waitq);
1181 atomic_set(&tctl->tctl_shutdown, 0);
1183 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1188 wait_for_completion(&tctl->tctl_start);
1191 mutex_unlock(&cfs_trace_thread_mutex);
1195 void cfs_trace_stop_thread(void)
1197 struct tracefiled_ctl *tctl = &trace_tctl;
1199 mutex_lock(&cfs_trace_thread_mutex);
1200 if (thread_running) {
1201 pr_info("Lustre: shutting down debug daemon thread...\n");
1202 atomic_set(&tctl->tctl_shutdown, 1);
1203 wait_for_completion(&tctl->tctl_stop);
1206 mutex_unlock(&cfs_trace_thread_mutex);
1209 /* percents to share the total debug memory for each type */
1210 static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = {
1211 80, /* 80% pages for CFS_TCD_TYPE_PROC */
1212 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1213 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
1216 int cfs_tracefile_init(int max_pages)
1218 struct cfs_trace_cpu_data *tcd;
1222 /* initialize trace_data */
1223 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1224 for (i = 0; i < CFS_TCD_TYPE_CNT; i++) {
1226 kmalloc_array(num_possible_cpus(),
1227 sizeof(union cfs_trace_data_union),
1229 if (!cfs_trace_data[i])
1230 goto out_trace_data;
1233 /* arch related info initialized */
1234 cfs_tcd_for_each(tcd, i, j) {
1235 int factor = pages_factor[i];
1237 spin_lock_init(&tcd->tcd_lock);
1238 tcd->tcd_pages_factor = factor;
1242 INIT_LIST_HEAD(&tcd->tcd_pages);
1243 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1244 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1245 tcd->tcd_cur_pages = 0;
1246 tcd->tcd_cur_stock_pages = 0;
1247 tcd->tcd_cur_daemon_pages = 0;
1248 tcd->tcd_max_pages = (max_pages * factor) / 100;
1249 LASSERT(tcd->tcd_max_pages > 0);
1250 tcd->tcd_shutting_down = 0;
1253 for (i = 0; i < num_possible_cpus(); i++)
1254 for (j = 0; j < 3; j++) {
1255 cfs_trace_console_buffers[i][j] =
1256 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
1258 if (!cfs_trace_console_buffers[i][j])
1265 for (i = 0; i < num_possible_cpus(); i++)
1266 for (j = 0; j < 3; j++) {
1267 kfree(cfs_trace_console_buffers[i][j]);
1268 cfs_trace_console_buffers[i][j] = NULL;
1271 for (i = 0; cfs_trace_data[i]; i++) {
1272 kfree(cfs_trace_data[i]);
1273 cfs_trace_data[i] = NULL;
1275 pr_err("lnet: Not enough memory\n");
1279 static void trace_cleanup_on_all_cpus(void)
1281 struct cfs_trace_cpu_data *tcd;
1282 struct cfs_trace_page *tage;
1285 for_each_possible_cpu(cpu) {
1286 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1287 if (!tcd->tcd_pages_factor)
1288 /* Not initialised */
1290 tcd->tcd_shutting_down = 1;
1292 while (!list_empty(&tcd->tcd_pages)) {
1293 tage = list_first_entry(&tcd->tcd_pages,
1294 struct cfs_trace_page,
1296 __LASSERT_TAGE_INVARIANT(tage);
1298 list_del(&tage->linkage);
1299 cfs_tage_free(tage);
1301 tcd->tcd_cur_pages = 0;
1306 static void cfs_trace_cleanup(void)
1308 struct page_collection pc;
1312 INIT_LIST_HEAD(&pc.pc_pages);
1314 trace_cleanup_on_all_cpus();
1316 for (i = 0; i < num_possible_cpus(); i++)
1317 for (j = 0; j < 3; j++) {
1318 kfree(cfs_trace_console_buffers[i][j]);
1319 cfs_trace_console_buffers[i][j] = NULL;
1322 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) {
1323 kfree(cfs_trace_data[i]);
1324 cfs_trace_data[i] = NULL;
1328 void cfs_tracefile_exit(void)
1330 cfs_trace_stop_thread();
1331 cfs_trace_cleanup();