4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LNET
39 #include "tracefile.h"
41 #include <linux/ctype.h>
43 #include <linux/kthread.h>
44 #include <linux/pagemap.h>
45 #include <linux/poll.h>
46 #include <linux/tty.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
51 #define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
52 #define TCD_MAX_TYPES 8
54 enum cfs_trace_buf_type {
55 CFS_TCD_TYPE_PROC = 0,
61 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
63 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_CNT];
64 char cfs_tracefile[TRACEFILE_NAME_SIZE];
65 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
66 static struct tracefiled_ctl trace_tctl;
67 static DEFINE_MUTEX(cfs_trace_thread_mutex);
68 static int thread_running = 0;
70 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
71 static DECLARE_RWSEM(cfs_tracefile_sem);
73 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
74 struct cfs_trace_cpu_data *tcd);
76 /* trace file lock routines */
77 /* The walking argument indicates the locking comes from all tcd types
78 * iterator and we must lock it and dissable local irqs to avoid deadlocks
79 * with other interrupt locks that might be happening. See LU-1311
82 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
83 __acquires(&tcd->tcd_lock)
85 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
86 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
87 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
88 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
89 spin_lock_bh(&tcd->tcd_lock);
90 else if (unlikely(walking))
91 spin_lock_irq(&tcd->tcd_lock);
93 spin_lock(&tcd->tcd_lock);
97 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
98 __releases(&tcd->tcd_lock)
100 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
101 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
102 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
103 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
104 spin_unlock_bh(&tcd->tcd_lock);
105 else if (unlikely(walking))
106 spin_unlock_irq(&tcd->tcd_lock);
108 spin_unlock(&tcd->tcd_lock);
111 #define cfs_tcd_for_each(tcd, i, j) \
112 for (i = 0; cfs_trace_data[i]; i++) \
113 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
114 j < num_possible_cpus(); \
115 j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
117 #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
118 for (i = 0; cfs_trace_data[i] && \
119 (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
120 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
122 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
125 return CFS_TCD_TYPE_IRQ;
127 return CFS_TCD_TYPE_SOFTIRQ;
128 return CFS_TCD_TYPE_PROC;
131 static inline char *cfs_trace_get_console_buffer(void)
133 unsigned int i = get_cpu();
134 unsigned int j = cfs_trace_buf_idx_get();
136 return cfs_trace_console_buffers[i][j];
139 static inline struct cfs_trace_cpu_data *
140 cfs_trace_get_tcd(void)
142 struct cfs_trace_cpu_data *tcd =
143 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
145 cfs_trace_lock_tcd(tcd, 0);
150 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
152 cfs_trace_unlock_tcd(tcd, 0);
157 static inline struct cfs_trace_page *
158 cfs_tage_from_list(struct list_head *list)
160 return list_entry(list, struct cfs_trace_page, linkage);
163 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
166 struct cfs_trace_page *tage;
168 /* My caller is trying to free memory */
169 if (!in_interrupt() && (current->flags & PF_MEMALLOC))
173 * Don't spam console with allocation failures: they will be reported
174 * by upper layer anyway.
177 page = alloc_page(gfp);
181 tage = kmalloc(sizeof(*tage), gfp);
188 atomic_inc(&cfs_tage_allocated);
192 static void cfs_tage_free(struct cfs_trace_page *tage)
194 __LASSERT(tage != NULL);
195 __LASSERT(tage->page != NULL);
197 __free_page(tage->page);
199 atomic_dec(&cfs_tage_allocated);
202 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
203 struct list_head *queue)
205 __LASSERT(tage != NULL);
206 __LASSERT(queue != NULL);
208 list_move_tail(&tage->linkage, queue);
211 /* return a page that has 'len' bytes left at the end */
212 static struct cfs_trace_page *
213 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
215 struct cfs_trace_page *tage;
217 if (tcd->tcd_cur_pages > 0) {
218 __LASSERT(!list_empty(&tcd->tcd_pages));
219 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
220 if (tage->used + len <= PAGE_SIZE)
224 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
225 if (tcd->tcd_cur_stock_pages > 0) {
226 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
227 --tcd->tcd_cur_stock_pages;
228 list_del_init(&tage->linkage);
230 tage = cfs_tage_alloc(GFP_ATOMIC);
231 if (unlikely(tage == NULL)) {
232 if ((!(current->flags & PF_MEMALLOC) ||
233 in_interrupt()) && printk_ratelimit())
234 pr_warn("Lustre: cannot allocate a tage (%ld)\n",
241 tage->cpu = smp_processor_id();
242 tage->type = tcd->tcd_type;
243 list_add_tail(&tage->linkage, &tcd->tcd_pages);
244 tcd->tcd_cur_pages++;
246 if (tcd->tcd_cur_pages > 8 && thread_running) {
247 struct tracefiled_ctl *tctl = &trace_tctl;
249 * wake up tracefiled to process some pages.
251 wake_up(&tctl->tctl_waitq);
258 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
260 int pgcount = tcd->tcd_cur_pages / 10;
261 struct page_collection pc;
262 struct cfs_trace_page *tage;
263 struct cfs_trace_page *tmp;
266 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
267 * from here: this will lead to infinite recursion.
270 if (printk_ratelimit())
271 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
272 pgcount + 1, tcd->tcd_cur_pages);
274 INIT_LIST_HEAD(&pc.pc_pages);
276 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
280 list_move_tail(&tage->linkage, &pc.pc_pages);
281 tcd->tcd_cur_pages--;
283 put_pages_on_tcd_daemon_list(&pc, tcd);
286 /* return a page that has 'len' bytes left at the end */
287 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
290 struct cfs_trace_page *tage;
293 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
294 * from here: this will lead to infinite recursion.
297 if (len > PAGE_SIZE) {
298 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
303 tage = cfs_trace_get_tage_try(tcd, len);
308 if (tcd->tcd_cur_pages > 0) {
309 tage = cfs_tage_from_list(tcd->tcd_pages.next);
311 cfs_tage_to_tail(tage, &tcd->tcd_pages);
316 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
317 struct libcfs_debug_msg_data *msgdata,
320 struct timespec64 ts;
322 ktime_get_real_ts64(&ts);
324 header->ph_subsys = msgdata->msg_subsys;
325 header->ph_mask = msgdata->msg_mask;
326 header->ph_cpu_id = smp_processor_id();
327 header->ph_type = cfs_trace_buf_idx_get();
328 /* y2038 safe since all user space treats this as unsigned, but
329 * will overflow in 2106
331 header->ph_sec = (u32)ts.tv_sec;
332 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
333 header->ph_stack = stack;
334 header->ph_pid = current->pid;
335 header->ph_line_num = msgdata->msg_line;
336 header->ph_extern_pid = 0;
340 * tty_write_msg - write a message to a certain tty, not just the console.
341 * @tty: the destination tty_struct
342 * @msg: the message to write
344 * tty_write_message is not exported, so write a same function for it
347 static void tty_write_msg(struct tty_struct *tty, const char *msg)
349 mutex_lock(&tty->atomic_write_lock);
351 if (tty->ops->write && tty->count > 0)
352 tty->ops->write(tty, msg, strlen(msg));
354 mutex_unlock(&tty->atomic_write_lock);
355 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
358 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
360 struct tty_struct *tty;
362 tty = get_current_tty();
366 tty_write_msg(tty, prefix);
367 if ((mask & D_EMERG) || (mask & D_ERROR))
368 tty_write_msg(tty, "Error");
369 tty_write_msg(tty, ": ");
370 tty_write_msg(tty, msg);
374 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
375 const char *buf, int len, const char *file,
378 char *prefix = "Lustre";
380 if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
383 if (mask & D_CONSOLE) {
385 pr_emerg("%sError: %.*s", prefix, len, buf);
386 else if (mask & D_ERROR)
387 pr_err("%sError: %.*s", prefix, len, buf);
388 else if (mask & D_WARNING)
389 pr_warn("%s: %.*s", prefix, len, buf);
390 else if (mask & libcfs_printk)
391 pr_info("%s: %.*s", prefix, len, buf);
394 pr_emerg("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
395 hdr->ph_pid, hdr->ph_extern_pid, file,
396 hdr->ph_line_num, fn, len, buf);
397 else if (mask & D_ERROR)
398 pr_err("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
399 hdr->ph_pid, hdr->ph_extern_pid, file,
400 hdr->ph_line_num, fn, len, buf);
401 else if (mask & D_WARNING)
402 pr_warn("%s: %d:%d:(%s:%d:%s()) %.*s", prefix,
403 hdr->ph_pid, hdr->ph_extern_pid, file,
404 hdr->ph_line_num, fn, len, buf);
405 else if (mask & (D_CONSOLE | libcfs_printk))
406 pr_info("%s: %.*s", prefix, len, buf);
410 cfs_tty_write_message(prefix, mask, buf);
413 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
414 const char *format, ...)
416 struct cfs_trace_cpu_data *tcd = NULL;
417 struct ptldebug_header header = {0};
418 struct cfs_trace_page *tage;
419 /* string_buf is used only if tcd != NULL, and is always set then */
420 char *string_buf = NULL;
423 int needed = 85; /* seeded with average message length */
427 int mask = msgdata->msg_mask;
428 char *file = (char *)msgdata->msg_file;
429 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
431 if (strchr(file, '/'))
432 file = strrchr(file, '/') + 1;
434 tcd = cfs_trace_get_tcd();
436 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
437 * pins us to a particular CPU. This avoids an smp_processor_id()
438 * warning on Linux when debugging is enabled.
440 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
442 if (!tcd) /* arch may not log in IRQ context */
445 if (tcd->tcd_cur_pages == 0)
446 header.ph_flags |= PH_FLAG_FIRST_RECORD;
448 if (tcd->tcd_shutting_down) {
449 cfs_trace_put_tcd(tcd);
454 known_size = strlen(file) + 1;
456 known_size += strlen(msgdata->msg_fn) + 1;
458 if (libcfs_debug_binary)
459 known_size += sizeof(header);
462 * May perform an additional pass to update 'needed' and increase
463 * tage buffer size to match vsnprintf reported size required
464 * On the second pass (retry=1) use vscnprintf [which returns
465 * number of bytes written not including the terminating nul]
466 * to clarify `needed` is used as number of bytes written
467 * for the remainder of this function
469 for (retry = 0; retry < 2; retry++) {
470 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
472 if (needed + known_size > PAGE_SIZE)
475 cfs_trace_put_tcd(tcd);
480 string_buf = (char *)page_address(tage->page) +
481 tage->used + known_size;
483 max_nob = PAGE_SIZE - tage->used - known_size;
485 pr_emerg("LustreError: negative max_nob: %d\n",
488 cfs_trace_put_tcd(tcd);
493 va_start(ap, format);
495 needed = vscnprintf(string_buf, max_nob, format, ap);
497 needed = vsnprintf(string_buf, max_nob, format, ap);
500 if (needed < max_nob) /* well. printing ok.. */
504 /* `needed` is actual bytes written to string_buf */
505 if (*(string_buf + needed - 1) != '\n') {
506 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
507 file, msgdata->msg_line, msgdata->msg_fn);
508 } else if (mask & D_TTY) {
509 /* TTY needs '\r\n' to move carriage to leftmost position */
510 if (needed < 2 || *(string_buf + needed - 2) != '\r')
511 pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
512 file, msgdata->msg_line, msgdata->msg_fn);
515 header.ph_len = known_size + needed;
516 debug_buf = (char *)page_address(tage->page) + tage->used;
518 if (libcfs_debug_binary) {
519 memcpy(debug_buf, &header, sizeof(header));
520 tage->used += sizeof(header);
521 debug_buf += sizeof(header);
524 strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
525 tage->used += strlen(file) + 1;
526 debug_buf += strlen(file) + 1;
528 if (msgdata->msg_fn) {
529 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
530 tage->used += strlen(msgdata->msg_fn) + 1;
531 debug_buf += strlen(msgdata->msg_fn) + 1;
534 __LASSERT(debug_buf == string_buf);
536 tage->used += needed;
537 __LASSERT(tage->used <= PAGE_SIZE);
540 if ((mask & libcfs_printk) == 0) {
541 /* no console output requested */
543 cfs_trace_put_tcd(tcd);
548 if (libcfs_console_ratelimit &&
549 cdls->cdls_next != 0 && /* not first time ever */
550 time_before(jiffies, cdls->cdls_next)) {
551 /* skipping a console message */
554 cfs_trace_put_tcd(tcd);
558 if (time_after(jiffies, cdls->cdls_next +
559 libcfs_console_max_delay +
560 cfs_time_seconds(10))) {
561 /* last timeout was a long time ago */
562 cdls->cdls_delay /= libcfs_console_backoff * 4;
564 cdls->cdls_delay *= libcfs_console_backoff;
567 if (cdls->cdls_delay < libcfs_console_min_delay)
568 cdls->cdls_delay = libcfs_console_min_delay;
569 else if (cdls->cdls_delay > libcfs_console_max_delay)
570 cdls->cdls_delay = libcfs_console_max_delay;
572 /* ensure cdls_next is never zero after it's been seen */
573 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
577 cfs_print_to_console(&header, mask, string_buf, needed, file,
579 cfs_trace_put_tcd(tcd);
581 string_buf = cfs_trace_get_console_buffer();
583 va_start(ap, format);
584 needed = vscnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
588 cfs_print_to_console(&header, mask,
589 string_buf, needed, file, msgdata->msg_fn);
594 if (cdls != NULL && cdls->cdls_count != 0) {
595 string_buf = cfs_trace_get_console_buffer();
597 needed = scnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
598 "Skipped %d previous similar message%s\n",
600 (cdls->cdls_count > 1) ? "s" : "");
602 /* Do not allow print this to TTY */
603 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
604 needed, file, msgdata->msg_fn);
607 cdls->cdls_count = 0;
612 EXPORT_SYMBOL(libcfs_debug_msg);
615 cfs_trace_assertion_failed(const char *str,
616 struct libcfs_debug_msg_data *msgdata)
618 struct ptldebug_header hdr;
620 libcfs_panic_in_progress = 1;
621 libcfs_catastrophe = 1;
624 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
626 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
627 msgdata->msg_file, msgdata->msg_fn);
629 panic("Lustre debug assertion failure\n");
635 panic_collect_pages(struct page_collection *pc)
637 /* Do the collect_pages job on a single CPU: assumes that all other
638 * CPUs have been stopped during a panic. If this isn't true for some
639 * arch, this will have to be implemented separately in each arch. */
642 struct cfs_trace_cpu_data *tcd;
644 INIT_LIST_HEAD(&pc->pc_pages);
646 cfs_tcd_for_each(tcd, i, j) {
647 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
648 tcd->tcd_cur_pages = 0;
650 if (pc->pc_want_daemon_pages) {
651 list_splice_init(&tcd->tcd_daemon_pages,
653 tcd->tcd_cur_daemon_pages = 0;
658 static void collect_pages_on_all_cpus(struct page_collection *pc)
660 struct cfs_trace_cpu_data *tcd;
663 for_each_possible_cpu(cpu) {
664 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
665 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
666 tcd->tcd_cur_pages = 0;
667 if (pc->pc_want_daemon_pages) {
668 list_splice_init(&tcd->tcd_daemon_pages,
670 tcd->tcd_cur_daemon_pages = 0;
676 static void collect_pages(struct page_collection *pc)
678 INIT_LIST_HEAD(&pc->pc_pages);
680 if (libcfs_panic_in_progress)
681 panic_collect_pages(pc);
683 collect_pages_on_all_cpus(pc);
686 static void put_pages_back_on_all_cpus(struct page_collection *pc)
688 struct cfs_trace_cpu_data *tcd;
689 struct list_head *cur_head;
690 struct cfs_trace_page *tage;
691 struct cfs_trace_page *tmp;
694 for_each_possible_cpu(cpu) {
695 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
696 cur_head = tcd->tcd_pages.next;
698 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
701 __LASSERT_TAGE_INVARIANT(tage);
703 if (tage->cpu != cpu || tage->type != i)
706 cfs_tage_to_tail(tage, cur_head);
707 tcd->tcd_cur_pages++;
713 static void put_pages_back(struct page_collection *pc)
715 if (!libcfs_panic_in_progress)
716 put_pages_back_on_all_cpus(pc);
719 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
720 * we have a good amount of data at all times for dumping during an LBUG, even
721 * if we have been steadily writing (and otherwise discarding) pages via the
723 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
724 struct cfs_trace_cpu_data *tcd)
726 struct cfs_trace_page *tage;
727 struct cfs_trace_page *tmp;
729 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
730 __LASSERT_TAGE_INVARIANT(tage);
732 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
735 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
736 tcd->tcd_cur_daemon_pages++;
738 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
739 struct cfs_trace_page *victim;
741 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
742 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
744 __LASSERT_TAGE_INVARIANT(victim);
746 list_del(&victim->linkage);
747 cfs_tage_free(victim);
748 tcd->tcd_cur_daemon_pages--;
753 static void put_pages_on_daemon_list(struct page_collection *pc)
755 struct cfs_trace_cpu_data *tcd;
758 for_each_possible_cpu(cpu) {
759 cfs_tcd_for_each_type_lock(tcd, i, cpu)
760 put_pages_on_tcd_daemon_list(pc, tcd);
764 void cfs_trace_debug_print(void)
766 struct page_collection pc;
767 struct cfs_trace_page *tage;
768 struct cfs_trace_page *tmp;
770 pc.pc_want_daemon_pages = 1;
772 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
776 __LASSERT_TAGE_INVARIANT(tage);
779 p = page_address(page);
780 while (p < ((char *)page_address(page) + tage->used)) {
781 struct ptldebug_header *hdr;
786 p += strlen(file) + 1;
789 len = hdr->ph_len - (int)(p - (char *)hdr);
791 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
796 list_del(&tage->linkage);
801 int cfs_tracefile_dump_all_pages(char *filename)
803 struct page_collection pc;
805 struct cfs_trace_page *tage;
806 struct cfs_trace_page *tmp;
810 down_write(&cfs_tracefile_sem);
812 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
816 pr_err("LustreError: can't open %s for dump: rc = %d\n",
821 pc.pc_want_daemon_pages = 1;
823 if (list_empty(&pc.pc_pages)) {
828 /* ok, for now, just write the pages. in the future we'll be building
829 * iobufs with the pages and calling generic_direct_IO */
830 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
832 __LASSERT_TAGE_INVARIANT(tage);
834 buf = kmap(tage->page);
835 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
837 if (rc != (int)tage->used) {
838 pr_warn("Lustre: wanted to write %u but wrote %d\n",
841 __LASSERT(list_empty(&pc.pc_pages));
844 list_del(&tage->linkage);
848 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
850 pr_err("LustreError: sync returns: rc = %d\n", rc);
852 filp_close(filp, NULL);
854 up_write(&cfs_tracefile_sem);
858 void cfs_trace_flush_pages(void)
860 struct page_collection pc;
861 struct cfs_trace_page *tage;
862 struct cfs_trace_page *tmp;
864 pc.pc_want_daemon_pages = 1;
866 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
868 __LASSERT_TAGE_INVARIANT(tage);
870 list_del(&tage->linkage);
875 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
876 const char __user *usr_buffer, int usr_buffer_nob)
880 if (usr_buffer_nob > knl_buffer_nob)
883 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
886 nob = strnlen(knl_buffer, usr_buffer_nob);
887 while (--nob >= 0) /* strip trailing whitespace */
888 if (!isspace(knl_buffer[nob]))
891 if (nob < 0) /* empty string */
894 if (nob == knl_buffer_nob) /* no space to terminate */
897 knl_buffer[nob + 1] = 0; /* terminate */
900 EXPORT_SYMBOL(cfs_trace_copyin_string);
902 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
903 const char *knl_buffer, char *append)
905 /* NB if 'append' != NULL, it's a single character to append to the
906 * copied out string - usually "\n", for /proc entries and "" (i.e. a
907 * terminating zero byte) for sysctl entries */
908 int nob = strlen(knl_buffer);
910 if (nob > usr_buffer_nob)
911 nob = usr_buffer_nob;
913 if (copy_to_user(usr_buffer, knl_buffer, nob))
916 if (append != NULL && nob < usr_buffer_nob) {
917 if (copy_to_user(usr_buffer + nob, append, 1))
925 EXPORT_SYMBOL(cfs_trace_copyout_string);
927 int cfs_trace_allocate_string_buffer(char **str, int nob)
929 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
932 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
939 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
944 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
948 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
949 usr_str, usr_str_nob);
957 rc = cfs_tracefile_dump_all_pages(str);
963 int cfs_trace_daemon_command(char *str)
967 down_write(&cfs_tracefile_sem);
969 if (strcmp(str, "stop") == 0) {
970 up_write(&cfs_tracefile_sem);
971 cfs_trace_stop_thread();
972 down_write(&cfs_tracefile_sem);
973 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
975 } else if (strncmp(str, "size=", 5) == 0) {
978 rc = kstrtoul(str + 5, 10, &tmp);
980 if (tmp < 10 || tmp > 20480)
981 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
983 cfs_tracefile_size = tmp << 20;
985 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
987 } else if (str[0] != '/') {
990 strcpy(cfs_tracefile, str);
992 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
993 cfs_tracefile, (long)(cfs_tracefile_size >> 10));
995 cfs_trace_start_thread();
998 up_write(&cfs_tracefile_sem);
1002 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
1007 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
1011 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
1012 usr_str, usr_str_nob);
1014 rc = cfs_trace_daemon_command(str);
1020 int cfs_trace_set_debug_mb(int mb)
1024 unsigned long pages;
1025 unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
1026 unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
1027 struct cfs_trace_cpu_data *tcd;
1029 if (mb < num_possible_cpus()) {
1030 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
1031 mb, num_possible_cpus());
1032 mb = num_possible_cpus();
1036 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
1041 mb /= num_possible_cpus();
1042 pages = mb << (20 - PAGE_SHIFT);
1044 down_write(&cfs_tracefile_sem);
1046 cfs_tcd_for_each(tcd, i, j)
1047 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1049 up_write(&cfs_tracefile_sem);
1054 int cfs_trace_get_debug_mb(void)
1058 struct cfs_trace_cpu_data *tcd;
1059 int total_pages = 0;
1061 down_read(&cfs_tracefile_sem);
1063 cfs_tcd_for_each(tcd, i, j)
1064 total_pages += tcd->tcd_max_pages;
1066 up_read(&cfs_tracefile_sem);
1068 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1071 static int tracefiled(void *arg)
1073 struct page_collection pc;
1074 struct tracefiled_ctl *tctl = arg;
1075 struct cfs_trace_page *tage;
1076 struct cfs_trace_page *tmp;
1082 /* we're started late enough that we pick up init's fs context */
1083 /* this is so broken in uml? what on earth is going on? */
1085 complete(&tctl->tctl_start);
1088 wait_queue_entry_t __wait;
1090 pc.pc_want_daemon_pages = 0;
1092 if (list_empty(&pc.pc_pages))
1096 down_read(&cfs_tracefile_sem);
1097 if (cfs_tracefile[0] != 0) {
1098 filp = filp_open(cfs_tracefile,
1099 O_CREAT | O_RDWR | O_LARGEFILE,
1104 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1108 up_read(&cfs_tracefile_sem);
1110 put_pages_on_daemon_list(&pc);
1111 __LASSERT(list_empty(&pc.pc_pages));
1115 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1116 struct dentry *de = file_dentry(filp);
1117 static loff_t f_pos;
1119 __LASSERT_TAGE_INVARIANT(tage);
1121 if (f_pos >= (off_t)cfs_tracefile_size)
1123 else if (f_pos > i_size_read(de->d_inode))
1124 f_pos = i_size_read(de->d_inode);
1126 buf = kmap(tage->page);
1127 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1129 if (rc != (int)tage->used) {
1130 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1132 put_pages_back(&pc);
1133 __LASSERT(list_empty(&pc.pc_pages));
1138 filp_close(filp, NULL);
1139 put_pages_on_daemon_list(&pc);
1140 if (!list_empty(&pc.pc_pages)) {
1143 pr_alert("Lustre: trace pages aren't empty\n");
1144 pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1145 for (i = 0; i < num_possible_cpus(); i++)
1147 pr_cont("%d(on) ", i);
1149 pr_cont("%d(off) ", i);
1153 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1155 pr_err("Lustre: page %d belongs to cpu %d\n",
1157 pr_err("Lustre: There are %d pages unwritten\n", i);
1159 __LASSERT(list_empty(&pc.pc_pages));
1161 if (atomic_read(&tctl->tctl_shutdown)) {
1162 if (last_loop == 0) {
1170 add_wait_queue(&tctl->tctl_waitq, &__wait);
1171 schedule_timeout_interruptible(cfs_time_seconds(1));
1172 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1174 complete(&tctl->tctl_stop);
1178 int cfs_trace_start_thread(void)
1180 struct tracefiled_ctl *tctl = &trace_tctl;
1183 mutex_lock(&cfs_trace_thread_mutex);
1187 init_completion(&tctl->tctl_start);
1188 init_completion(&tctl->tctl_stop);
1189 init_waitqueue_head(&tctl->tctl_waitq);
1190 atomic_set(&tctl->tctl_shutdown, 0);
1192 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1197 wait_for_completion(&tctl->tctl_start);
1200 mutex_unlock(&cfs_trace_thread_mutex);
1204 void cfs_trace_stop_thread(void)
1206 struct tracefiled_ctl *tctl = &trace_tctl;
1208 mutex_lock(&cfs_trace_thread_mutex);
1209 if (thread_running) {
1210 pr_info("Lustre: shutting down debug daemon thread...\n");
1211 atomic_set(&tctl->tctl_shutdown, 1);
1212 wait_for_completion(&tctl->tctl_stop);
1215 mutex_unlock(&cfs_trace_thread_mutex);
1218 /* percents to share the total debug memory for each type */
1219 static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = {
1220 80, /* 80% pages for CFS_TCD_TYPE_PROC */
1221 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1222 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
1225 int cfs_tracefile_init(int max_pages)
1227 struct cfs_trace_cpu_data *tcd;
1231 /* initialize trace_data */
1232 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1233 for (i = 0; i < CFS_TCD_TYPE_CNT; i++) {
1235 kmalloc_array(num_possible_cpus(),
1236 sizeof(union cfs_trace_data_union),
1238 if (!cfs_trace_data[i])
1239 goto out_trace_data;
1242 /* arch related info initialized */
1243 cfs_tcd_for_each(tcd, i, j) {
1244 int factor = pages_factor[i];
1246 spin_lock_init(&tcd->tcd_lock);
1247 tcd->tcd_pages_factor = factor;
1251 INIT_LIST_HEAD(&tcd->tcd_pages);
1252 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1253 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1254 tcd->tcd_cur_pages = 0;
1255 tcd->tcd_cur_stock_pages = 0;
1256 tcd->tcd_cur_daemon_pages = 0;
1257 tcd->tcd_max_pages = (max_pages * factor) / 100;
1258 LASSERT(tcd->tcd_max_pages > 0);
1259 tcd->tcd_shutting_down = 0;
1262 for (i = 0; i < num_possible_cpus(); i++)
1263 for (j = 0; j < 3; j++) {
1264 cfs_trace_console_buffers[i][j] =
1265 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
1267 if (!cfs_trace_console_buffers[i][j])
1274 for (i = 0; i < num_possible_cpus(); i++)
1275 for (j = 0; j < 3; j++) {
1276 kfree(cfs_trace_console_buffers[i][j]);
1277 cfs_trace_console_buffers[i][j] = NULL;
1280 for (i = 0; cfs_trace_data[i]; i++) {
1281 kfree(cfs_trace_data[i]);
1282 cfs_trace_data[i] = NULL;
1284 pr_err("lnet: Not enough memory\n");
1288 static void trace_cleanup_on_all_cpus(void)
1290 struct cfs_trace_cpu_data *tcd;
1291 struct cfs_trace_page *tage;
1292 struct cfs_trace_page *tmp;
1295 for_each_possible_cpu(cpu) {
1296 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1297 if (!tcd->tcd_pages_factor)
1298 /* Not initialised */
1300 tcd->tcd_shutting_down = 1;
1302 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1303 __LASSERT_TAGE_INVARIANT(tage);
1305 list_del(&tage->linkage);
1306 cfs_tage_free(tage);
1308 tcd->tcd_cur_pages = 0;
1313 static void cfs_trace_cleanup(void)
1315 struct page_collection pc;
1319 INIT_LIST_HEAD(&pc.pc_pages);
1321 trace_cleanup_on_all_cpus();
1323 for (i = 0; i < num_possible_cpus(); i++)
1324 for (j = 0; j < 3; j++) {
1325 kfree(cfs_trace_console_buffers[i][j]);
1326 cfs_trace_console_buffers[i][j] = NULL;
1329 for (i = 0; cfs_trace_data[i]; i++) {
1330 kfree(cfs_trace_data[i]);
1331 cfs_trace_data[i] = NULL;
1335 void cfs_tracefile_exit(void)
1337 cfs_trace_stop_thread();
1338 cfs_trace_cleanup();