4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
43 #include <linux/ctype.h>
45 #include <linux/kthread.h>
46 #include <linux/pagemap.h>
47 #include <linux/poll.h>
48 #include <linux/tty.h>
49 #include <linux/uaccess.h>
50 #include <libcfs/linux/linux-fs.h>
51 #include <libcfs/libcfs.h>
53 #define TCD_MAX_TYPES 8
55 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
57 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
58 char cfs_tracefile[TRACEFILE_NAME_SIZE];
59 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
60 static struct tracefiled_ctl trace_tctl;
61 static DEFINE_MUTEX(cfs_trace_thread_mutex);
62 static int thread_running = 0;
64 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
65 static DECLARE_RWSEM(cfs_tracefile_sem);
67 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
68 struct cfs_trace_cpu_data *tcd);
70 /* trace file lock routines */
71 /* The walking argument indicates the locking comes from all tcd types
72 * iterator and we must lock it and dissable local irqs to avoid deadlocks
73 * with other interrupt locks that might be happening. See LU-1311
76 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
77 __acquires(&tcd->tcd_lock)
79 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
80 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
81 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
82 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
83 spin_lock_bh(&tcd->tcd_lock);
84 else if (unlikely(walking))
85 spin_lock_irq(&tcd->tcd_lock);
87 spin_lock(&tcd->tcd_lock);
91 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
92 __releases(&tcd->tcd_lock)
94 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
95 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
96 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
97 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
98 spin_unlock_bh(&tcd->tcd_lock);
99 else if (unlikely(walking))
100 spin_unlock_irq(&tcd->tcd_lock);
102 spin_unlock(&tcd->tcd_lock);
105 #define cfs_tcd_for_each(tcd, i, j) \
106 for (i = 0; cfs_trace_data[i]; i++) \
107 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
108 j < num_possible_cpus(); \
109 j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
111 #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
112 for (i = 0; cfs_trace_data[i] && \
113 (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
114 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
116 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
119 return CFS_TCD_TYPE_IRQ;
121 return CFS_TCD_TYPE_SOFTIRQ;
122 return CFS_TCD_TYPE_PROC;
125 static inline struct cfs_trace_cpu_data *
126 cfs_trace_get_tcd(void)
128 struct cfs_trace_cpu_data *tcd =
129 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
131 cfs_trace_lock_tcd(tcd, 0);
136 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
138 cfs_trace_unlock_tcd(tcd, 0);
143 static inline struct cfs_trace_page *
144 cfs_tage_from_list(struct list_head *list)
146 return list_entry(list, struct cfs_trace_page, linkage);
149 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
152 struct cfs_trace_page *tage;
154 /* My caller is trying to free memory */
155 if (!in_interrupt() && (current->flags & PF_MEMALLOC))
159 * Don't spam console with allocation failures: they will be reported
160 * by upper layer anyway.
163 page = alloc_page(gfp);
167 tage = kmalloc(sizeof(*tage), gfp);
174 atomic_inc(&cfs_tage_allocated);
178 static void cfs_tage_free(struct cfs_trace_page *tage)
180 __LASSERT(tage != NULL);
181 __LASSERT(tage->page != NULL);
183 __free_page(tage->page);
185 atomic_dec(&cfs_tage_allocated);
188 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
189 struct list_head *queue)
191 __LASSERT(tage != NULL);
192 __LASSERT(queue != NULL);
194 list_move_tail(&tage->linkage, queue);
197 /* return a page that has 'len' bytes left at the end */
198 static struct cfs_trace_page *
199 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
201 struct cfs_trace_page *tage;
203 if (tcd->tcd_cur_pages > 0) {
204 __LASSERT(!list_empty(&tcd->tcd_pages));
205 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
206 if (tage->used + len <= PAGE_SIZE)
210 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
211 if (tcd->tcd_cur_stock_pages > 0) {
212 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
213 --tcd->tcd_cur_stock_pages;
214 list_del_init(&tage->linkage);
216 tage = cfs_tage_alloc(GFP_ATOMIC);
217 if (unlikely(tage == NULL)) {
218 if ((!(current->flags & PF_MEMALLOC) ||
219 in_interrupt()) && printk_ratelimit())
220 pr_warn("Lustre: cannot allocate a tage (%ld)\n",
227 tage->cpu = smp_processor_id();
228 tage->type = tcd->tcd_type;
229 list_add_tail(&tage->linkage, &tcd->tcd_pages);
230 tcd->tcd_cur_pages++;
232 if (tcd->tcd_cur_pages > 8 && thread_running) {
233 struct tracefiled_ctl *tctl = &trace_tctl;
235 * wake up tracefiled to process some pages.
237 wake_up(&tctl->tctl_waitq);
244 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
246 int pgcount = tcd->tcd_cur_pages / 10;
247 struct page_collection pc;
248 struct cfs_trace_page *tage;
249 struct cfs_trace_page *tmp;
252 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
253 * from here: this will lead to infinite recursion.
256 if (printk_ratelimit())
257 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
258 pgcount + 1, tcd->tcd_cur_pages);
260 INIT_LIST_HEAD(&pc.pc_pages);
262 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
266 list_move_tail(&tage->linkage, &pc.pc_pages);
267 tcd->tcd_cur_pages--;
269 put_pages_on_tcd_daemon_list(&pc, tcd);
272 /* return a page that has 'len' bytes left at the end */
273 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
276 struct cfs_trace_page *tage;
279 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
280 * from here: this will lead to infinite recursion.
283 if (len > PAGE_SIZE) {
284 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
289 tage = cfs_trace_get_tage_try(tcd, len);
294 if (tcd->tcd_cur_pages > 0) {
295 tage = cfs_tage_from_list(tcd->tcd_pages.next);
297 cfs_tage_to_tail(tage, &tcd->tcd_pages);
302 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
303 struct libcfs_debug_msg_data *msgdata,
306 struct timespec64 ts;
308 ktime_get_real_ts64(&ts);
310 header->ph_subsys = msgdata->msg_subsys;
311 header->ph_mask = msgdata->msg_mask;
312 header->ph_cpu_id = smp_processor_id();
313 header->ph_type = cfs_trace_buf_idx_get();
314 /* y2038 safe since all user space treats this as unsigned, but
315 * will overflow in 2106
317 header->ph_sec = (u32)ts.tv_sec;
318 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
319 header->ph_stack = stack;
320 header->ph_pid = current->pid;
321 header->ph_line_num = msgdata->msg_line;
322 header->ph_extern_pid = 0;
326 * tty_write_msg - write a message to a certain tty, not just the console.
327 * @tty: the destination tty_struct
328 * @msg: the message to write
330 * tty_write_message is not exported, so write a same function for it
333 static void tty_write_msg(struct tty_struct *tty, const char *msg)
335 mutex_lock(&tty->atomic_write_lock);
337 if (tty->ops->write && tty->count > 0)
338 tty->ops->write(tty, msg, strlen(msg));
340 mutex_unlock(&tty->atomic_write_lock);
341 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
344 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
346 struct tty_struct *tty;
348 tty = get_current_tty();
352 tty_write_msg(tty, prefix);
353 if ((mask & D_EMERG) || (mask & D_ERROR))
354 tty_write_msg(tty, "Error");
355 tty_write_msg(tty, ": ");
356 tty_write_msg(tty, msg);
360 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
361 const char *buf, int len, const char *file,
364 char *prefix = "Lustre";
366 if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
369 if (mask & D_CONSOLE) {
371 pr_emerg("%sError: %.*s", prefix, len, buf);
372 else if (mask & D_ERROR)
373 pr_err("%sError: %.*s", prefix, len, buf);
374 else if (mask & D_WARNING)
375 pr_warn("%s: %.*s", prefix, len, buf);
376 else if (mask & libcfs_printk)
377 pr_info("%s: %.*s", prefix, len, buf);
380 pr_emerg("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
381 hdr->ph_pid, hdr->ph_extern_pid, file,
382 hdr->ph_line_num, fn, len, buf);
383 else if (mask & D_ERROR)
384 pr_err("%sError: %d:%d:(%s:%d:%s()) %.*s", prefix,
385 hdr->ph_pid, hdr->ph_extern_pid, file,
386 hdr->ph_line_num, fn, len, buf);
387 else if (mask & D_WARNING)
388 pr_warn("%s: %d:%d:(%s:%d:%s()) %.*s", prefix,
389 hdr->ph_pid, hdr->ph_extern_pid, file,
390 hdr->ph_line_num, fn, len, buf);
391 else if (mask & (D_CONSOLE | libcfs_printk))
392 pr_info("%s: %.*s", prefix, len, buf);
396 cfs_tty_write_message(prefix, mask, buf);
399 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
400 const char *format, ...)
402 struct cfs_trace_cpu_data *tcd = NULL;
403 struct ptldebug_header header = {0};
404 struct cfs_trace_page *tage;
405 /* string_buf is used only if tcd != NULL, and is always set then */
406 char *string_buf = NULL;
409 int needed = 85; /* seeded with average message length */
413 int mask = msgdata->msg_mask;
414 char *file = (char *)msgdata->msg_file;
415 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
417 if (strchr(file, '/'))
418 file = strrchr(file, '/') + 1;
420 tcd = cfs_trace_get_tcd();
422 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
423 * pins us to a particular CPU. This avoids an smp_processor_id()
424 * warning on Linux when debugging is enabled.
426 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
428 if (!tcd) /* arch may not log in IRQ context */
431 if (tcd->tcd_cur_pages == 0)
432 header.ph_flags |= PH_FLAG_FIRST_RECORD;
434 if (tcd->tcd_shutting_down) {
435 cfs_trace_put_tcd(tcd);
440 known_size = strlen(file) + 1;
442 known_size += strlen(msgdata->msg_fn) + 1;
444 if (libcfs_debug_binary)
445 known_size += sizeof(header);
448 * May perform an additional pass to update 'needed' and increase
449 * tage buffer size to match vsnprintf reported size required
450 * On the second pass (retry=1) use vscnprintf [which returns
451 * number of bytes written not including the terminating nul]
452 * to clarify `needed` is used as number of bytes written
453 * for the remainder of this function
455 for (retry = 0; retry < 2; retry++) {
456 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
458 if (needed + known_size > PAGE_SIZE)
461 cfs_trace_put_tcd(tcd);
466 string_buf = (char *)page_address(tage->page) +
467 tage->used + known_size;
469 max_nob = PAGE_SIZE - tage->used - known_size;
471 pr_emerg("LustreError: negative max_nob: %d\n",
474 cfs_trace_put_tcd(tcd);
479 va_start(ap, format);
481 needed = vscnprintf(string_buf, max_nob, format, ap);
483 needed = vsnprintf(string_buf, max_nob, format, ap);
486 if (needed < max_nob) /* well. printing ok.. */
490 /* `needed` is actual bytes written to string_buf */
491 if (*(string_buf + needed - 1) != '\n') {
492 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
493 file, msgdata->msg_line, msgdata->msg_fn);
494 } else if (mask & D_TTY) {
495 /* TTY needs '\r\n' to move carriage to leftmost position */
496 if (needed < 2 || *(string_buf + needed - 2) != '\r')
497 pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
498 file, msgdata->msg_line, msgdata->msg_fn);
501 header.ph_len = known_size + needed;
502 debug_buf = (char *)page_address(tage->page) + tage->used;
504 if (libcfs_debug_binary) {
505 memcpy(debug_buf, &header, sizeof(header));
506 tage->used += sizeof(header);
507 debug_buf += sizeof(header);
510 strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
511 tage->used += strlen(file) + 1;
512 debug_buf += strlen(file) + 1;
514 if (msgdata->msg_fn) {
515 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
516 tage->used += strlen(msgdata->msg_fn) + 1;
517 debug_buf += strlen(msgdata->msg_fn) + 1;
520 __LASSERT(debug_buf == string_buf);
522 tage->used += needed;
523 __LASSERT(tage->used <= PAGE_SIZE);
526 if ((mask & libcfs_printk) == 0) {
527 /* no console output requested */
529 cfs_trace_put_tcd(tcd);
534 if (libcfs_console_ratelimit &&
535 cdls->cdls_next != 0 && /* not first time ever */
536 time_before(jiffies, cdls->cdls_next)) {
537 /* skipping a console message */
540 cfs_trace_put_tcd(tcd);
544 if (time_after(jiffies, cdls->cdls_next +
545 libcfs_console_max_delay +
546 cfs_time_seconds(10))) {
547 /* last timeout was a long time ago */
548 cdls->cdls_delay /= libcfs_console_backoff * 4;
550 cdls->cdls_delay *= libcfs_console_backoff;
553 if (cdls->cdls_delay < libcfs_console_min_delay)
554 cdls->cdls_delay = libcfs_console_min_delay;
555 else if (cdls->cdls_delay > libcfs_console_max_delay)
556 cdls->cdls_delay = libcfs_console_max_delay;
558 /* ensure cdls_next is never zero after it's been seen */
559 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
563 cfs_print_to_console(&header, mask, string_buf, needed, file,
565 cfs_trace_put_tcd(tcd);
567 string_buf = cfs_trace_get_console_buffer();
569 va_start(ap, format);
570 needed = vscnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
574 cfs_print_to_console(&header, mask,
575 string_buf, needed, file, msgdata->msg_fn);
580 if (cdls != NULL && cdls->cdls_count != 0) {
581 string_buf = cfs_trace_get_console_buffer();
583 needed = scnprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
584 "Skipped %d previous similar message%s\n",
586 (cdls->cdls_count > 1) ? "s" : "");
588 /* Do not allow print this to TTY */
589 cfs_print_to_console(&header, mask & ~D_TTY, string_buf,
590 needed, file, msgdata->msg_fn);
593 cdls->cdls_count = 0;
598 EXPORT_SYMBOL(libcfs_debug_msg);
601 cfs_trace_assertion_failed(const char *str,
602 struct libcfs_debug_msg_data *msgdata)
604 struct ptldebug_header hdr;
606 libcfs_panic_in_progress = 1;
607 libcfs_catastrophe = 1;
610 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
612 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
613 msgdata->msg_file, msgdata->msg_fn);
615 panic("Lustre debug assertion failure\n");
621 panic_collect_pages(struct page_collection *pc)
623 /* Do the collect_pages job on a single CPU: assumes that all other
624 * CPUs have been stopped during a panic. If this isn't true for some
625 * arch, this will have to be implemented separately in each arch. */
628 struct cfs_trace_cpu_data *tcd;
630 INIT_LIST_HEAD(&pc->pc_pages);
632 cfs_tcd_for_each(tcd, i, j) {
633 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
634 tcd->tcd_cur_pages = 0;
636 if (pc->pc_want_daemon_pages) {
637 list_splice_init(&tcd->tcd_daemon_pages,
639 tcd->tcd_cur_daemon_pages = 0;
644 static void collect_pages_on_all_cpus(struct page_collection *pc)
646 struct cfs_trace_cpu_data *tcd;
649 for_each_possible_cpu(cpu) {
650 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
651 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
652 tcd->tcd_cur_pages = 0;
653 if (pc->pc_want_daemon_pages) {
654 list_splice_init(&tcd->tcd_daemon_pages,
656 tcd->tcd_cur_daemon_pages = 0;
662 static void collect_pages(struct page_collection *pc)
664 INIT_LIST_HEAD(&pc->pc_pages);
666 if (libcfs_panic_in_progress)
667 panic_collect_pages(pc);
669 collect_pages_on_all_cpus(pc);
672 static void put_pages_back_on_all_cpus(struct page_collection *pc)
674 struct cfs_trace_cpu_data *tcd;
675 struct list_head *cur_head;
676 struct cfs_trace_page *tage;
677 struct cfs_trace_page *tmp;
680 for_each_possible_cpu(cpu) {
681 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
682 cur_head = tcd->tcd_pages.next;
684 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
687 __LASSERT_TAGE_INVARIANT(tage);
689 if (tage->cpu != cpu || tage->type != i)
692 cfs_tage_to_tail(tage, cur_head);
693 tcd->tcd_cur_pages++;
699 static void put_pages_back(struct page_collection *pc)
701 if (!libcfs_panic_in_progress)
702 put_pages_back_on_all_cpus(pc);
705 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
706 * we have a good amount of data at all times for dumping during an LBUG, even
707 * if we have been steadily writing (and otherwise discarding) pages via the
709 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
710 struct cfs_trace_cpu_data *tcd)
712 struct cfs_trace_page *tage;
713 struct cfs_trace_page *tmp;
715 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
716 __LASSERT_TAGE_INVARIANT(tage);
718 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
721 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
722 tcd->tcd_cur_daemon_pages++;
724 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
725 struct cfs_trace_page *victim;
727 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
728 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
730 __LASSERT_TAGE_INVARIANT(victim);
732 list_del(&victim->linkage);
733 cfs_tage_free(victim);
734 tcd->tcd_cur_daemon_pages--;
739 static void put_pages_on_daemon_list(struct page_collection *pc)
741 struct cfs_trace_cpu_data *tcd;
744 for_each_possible_cpu(cpu) {
745 cfs_tcd_for_each_type_lock(tcd, i, cpu)
746 put_pages_on_tcd_daemon_list(pc, tcd);
750 void cfs_trace_debug_print(void)
752 struct page_collection pc;
753 struct cfs_trace_page *tage;
754 struct cfs_trace_page *tmp;
756 pc.pc_want_daemon_pages = 1;
758 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
762 __LASSERT_TAGE_INVARIANT(tage);
765 p = page_address(page);
766 while (p < ((char *)page_address(page) + tage->used)) {
767 struct ptldebug_header *hdr;
772 p += strlen(file) + 1;
775 len = hdr->ph_len - (int)(p - (char *)hdr);
777 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
782 list_del(&tage->linkage);
787 int cfs_tracefile_dump_all_pages(char *filename)
789 struct page_collection pc;
791 struct cfs_trace_page *tage;
792 struct cfs_trace_page *tmp;
796 down_write(&cfs_tracefile_sem);
798 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
802 pr_err("LustreError: can't open %s for dump: rc = %d\n",
807 pc.pc_want_daemon_pages = 1;
809 if (list_empty(&pc.pc_pages)) {
814 /* ok, for now, just write the pages. in the future we'll be building
815 * iobufs with the pages and calling generic_direct_IO */
816 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
818 __LASSERT_TAGE_INVARIANT(tage);
820 buf = kmap(tage->page);
821 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
823 if (rc != (int)tage->used) {
824 pr_warn("Lustre: wanted to write %u but wrote %d\n",
827 __LASSERT(list_empty(&pc.pc_pages));
830 list_del(&tage->linkage);
834 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
836 pr_err("LustreError: sync returns: rc = %d\n", rc);
838 filp_close(filp, NULL);
840 up_write(&cfs_tracefile_sem);
844 void cfs_trace_flush_pages(void)
846 struct page_collection pc;
847 struct cfs_trace_page *tage;
848 struct cfs_trace_page *tmp;
850 pc.pc_want_daemon_pages = 1;
852 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
854 __LASSERT_TAGE_INVARIANT(tage);
856 list_del(&tage->linkage);
861 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
862 const char __user *usr_buffer, int usr_buffer_nob)
866 if (usr_buffer_nob > knl_buffer_nob)
869 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
872 nob = strnlen(knl_buffer, usr_buffer_nob);
873 while (--nob >= 0) /* strip trailing whitespace */
874 if (!isspace(knl_buffer[nob]))
877 if (nob < 0) /* empty string */
880 if (nob == knl_buffer_nob) /* no space to terminate */
883 knl_buffer[nob + 1] = 0; /* terminate */
886 EXPORT_SYMBOL(cfs_trace_copyin_string);
888 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
889 const char *knl_buffer, char *append)
891 /* NB if 'append' != NULL, it's a single character to append to the
892 * copied out string - usually "\n", for /proc entries and "" (i.e. a
893 * terminating zero byte) for sysctl entries */
894 int nob = strlen(knl_buffer);
896 if (nob > usr_buffer_nob)
897 nob = usr_buffer_nob;
899 if (copy_to_user(usr_buffer, knl_buffer, nob))
902 if (append != NULL && nob < usr_buffer_nob) {
903 if (copy_to_user(usr_buffer + nob, append, 1))
911 EXPORT_SYMBOL(cfs_trace_copyout_string);
913 int cfs_trace_allocate_string_buffer(char **str, int nob)
915 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
918 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
925 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
930 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
934 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
935 usr_str, usr_str_nob);
943 rc = cfs_tracefile_dump_all_pages(str);
949 int cfs_trace_daemon_command(char *str)
953 down_write(&cfs_tracefile_sem);
955 if (strcmp(str, "stop") == 0) {
956 up_write(&cfs_tracefile_sem);
957 cfs_trace_stop_thread();
958 down_write(&cfs_tracefile_sem);
959 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
961 } else if (strncmp(str, "size=", 5) == 0) {
964 rc = kstrtoul(str + 5, 10, &tmp);
966 if (tmp < 10 || tmp > 20480)
967 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
969 cfs_tracefile_size = tmp << 20;
971 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
973 } else if (str[0] != '/') {
976 strcpy(cfs_tracefile, str);
978 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
979 cfs_tracefile, (long)(cfs_tracefile_size >> 10));
981 cfs_trace_start_thread();
984 up_write(&cfs_tracefile_sem);
988 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
993 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
997 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
998 usr_str, usr_str_nob);
1000 rc = cfs_trace_daemon_command(str);
1006 int cfs_trace_set_debug_mb(int mb)
1010 unsigned long pages;
1011 unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
1012 unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
1013 struct cfs_trace_cpu_data *tcd;
1015 if (mb < num_possible_cpus()) {
1016 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
1017 mb, num_possible_cpus());
1018 mb = num_possible_cpus();
1022 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
1027 mb /= num_possible_cpus();
1028 pages = mb << (20 - PAGE_SHIFT);
1030 down_write(&cfs_tracefile_sem);
1032 cfs_tcd_for_each(tcd, i, j)
1033 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1035 up_write(&cfs_tracefile_sem);
1040 int cfs_trace_get_debug_mb(void)
1044 struct cfs_trace_cpu_data *tcd;
1045 int total_pages = 0;
1047 down_read(&cfs_tracefile_sem);
1049 cfs_tcd_for_each(tcd, i, j)
1050 total_pages += tcd->tcd_max_pages;
1052 up_read(&cfs_tracefile_sem);
1054 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1057 static int tracefiled(void *arg)
1059 struct page_collection pc;
1060 struct tracefiled_ctl *tctl = arg;
1061 struct cfs_trace_page *tage;
1062 struct cfs_trace_page *tmp;
1068 /* we're started late enough that we pick up init's fs context */
1069 /* this is so broken in uml? what on earth is going on? */
1071 complete(&tctl->tctl_start);
1074 wait_queue_entry_t __wait;
1076 pc.pc_want_daemon_pages = 0;
1078 if (list_empty(&pc.pc_pages))
1082 down_read(&cfs_tracefile_sem);
1083 if (cfs_tracefile[0] != 0) {
1084 filp = filp_open(cfs_tracefile,
1085 O_CREAT | O_RDWR | O_LARGEFILE,
1090 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1094 up_read(&cfs_tracefile_sem);
1096 put_pages_on_daemon_list(&pc);
1097 __LASSERT(list_empty(&pc.pc_pages));
1101 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1102 struct dentry *de = file_dentry(filp);
1103 static loff_t f_pos;
1105 __LASSERT_TAGE_INVARIANT(tage);
1107 if (f_pos >= (off_t)cfs_tracefile_size)
1109 else if (f_pos > i_size_read(de->d_inode))
1110 f_pos = i_size_read(de->d_inode);
1112 buf = kmap(tage->page);
1113 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1115 if (rc != (int)tage->used) {
1116 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1118 put_pages_back(&pc);
1119 __LASSERT(list_empty(&pc.pc_pages));
1124 filp_close(filp, NULL);
1125 put_pages_on_daemon_list(&pc);
1126 if (!list_empty(&pc.pc_pages)) {
1129 pr_alert("Lustre: trace pages aren't empty\n");
1130 pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1131 for (i = 0; i < num_possible_cpus(); i++)
1133 pr_cont("%d(on) ", i);
1135 pr_cont("%d(off) ", i);
1139 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1141 pr_err("Lustre: page %d belongs to cpu %d\n",
1143 pr_err("Lustre: There are %d pages unwritten\n", i);
1145 __LASSERT(list_empty(&pc.pc_pages));
1147 if (atomic_read(&tctl->tctl_shutdown)) {
1148 if (last_loop == 0) {
1155 init_waitqueue_entry(&__wait, current);
1156 add_wait_queue(&tctl->tctl_waitq, &__wait);
1157 schedule_timeout_interruptible(cfs_time_seconds(1));
1158 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1160 complete(&tctl->tctl_stop);
1164 int cfs_trace_start_thread(void)
1166 struct tracefiled_ctl *tctl = &trace_tctl;
1169 mutex_lock(&cfs_trace_thread_mutex);
1173 init_completion(&tctl->tctl_start);
1174 init_completion(&tctl->tctl_stop);
1175 init_waitqueue_head(&tctl->tctl_waitq);
1176 atomic_set(&tctl->tctl_shutdown, 0);
1178 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1183 wait_for_completion(&tctl->tctl_start);
1186 mutex_unlock(&cfs_trace_thread_mutex);
1190 void cfs_trace_stop_thread(void)
1192 struct tracefiled_ctl *tctl = &trace_tctl;
1194 mutex_lock(&cfs_trace_thread_mutex);
1195 if (thread_running) {
1196 pr_info("Lustre: shutting down debug daemon thread...\n");
1197 atomic_set(&tctl->tctl_shutdown, 1);
1198 wait_for_completion(&tctl->tctl_stop);
1201 mutex_unlock(&cfs_trace_thread_mutex);
1204 /* percents to share the total debug memory for each type */
1205 static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
1206 80, /* 80% pages for CFS_TCD_TYPE_PROC */
1207 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1208 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
1211 int cfs_tracefile_init(int max_pages)
1213 struct cfs_trace_cpu_data *tcd;
1217 /* initialize trace_data */
1218 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1219 for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
1221 kmalloc_array(num_possible_cpus(),
1222 sizeof(union cfs_trace_data_union),
1224 if (!cfs_trace_data[i])
1225 goto out_trace_data;
1228 /* arch related info initialized */
1229 cfs_tcd_for_each(tcd, i, j) {
1230 int factor = pages_factor[i];
1232 spin_lock_init(&tcd->tcd_lock);
1233 tcd->tcd_pages_factor = factor;
1237 INIT_LIST_HEAD(&tcd->tcd_pages);
1238 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1239 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1240 tcd->tcd_cur_pages = 0;
1241 tcd->tcd_cur_stock_pages = 0;
1242 tcd->tcd_cur_daemon_pages = 0;
1243 tcd->tcd_max_pages = (max_pages * factor) / 100;
1244 LASSERT(tcd->tcd_max_pages > 0);
1245 tcd->tcd_shutting_down = 0;
1248 for (i = 0; i < num_possible_cpus(); i++)
1249 for (j = 0; j < 3; j++) {
1250 cfs_trace_console_buffers[i][j] =
1251 kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
1253 if (!cfs_trace_console_buffers[i][j])
1260 for (i = 0; i < num_possible_cpus(); i++)
1261 for (j = 0; j < 3; j++) {
1262 kfree(cfs_trace_console_buffers[i][j]);
1263 cfs_trace_console_buffers[i][j] = NULL;
1266 for (i = 0; cfs_trace_data[i]; i++) {
1267 kfree(cfs_trace_data[i]);
1268 cfs_trace_data[i] = NULL;
1270 pr_err("lnet: Not enough memory\n");
1274 static void trace_cleanup_on_all_cpus(void)
1276 struct cfs_trace_cpu_data *tcd;
1277 struct cfs_trace_page *tage;
1278 struct cfs_trace_page *tmp;
1281 for_each_possible_cpu(cpu) {
1282 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1283 if (!tcd->tcd_pages_factor)
1284 /* Not initialised */
1286 tcd->tcd_shutting_down = 1;
1288 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1289 __LASSERT_TAGE_INVARIANT(tage);
1291 list_del(&tage->linkage);
1292 cfs_tage_free(tage);
1294 tcd->tcd_cur_pages = 0;
1299 static void cfs_trace_cleanup(void)
1301 struct page_collection pc;
1305 INIT_LIST_HEAD(&pc.pc_pages);
1307 trace_cleanup_on_all_cpus();
1309 for (i = 0; i < num_possible_cpus(); i++)
1310 for (j = 0; j < 3; j++) {
1311 kfree(cfs_trace_console_buffers[i][j]);
1312 cfs_trace_console_buffers[i][j] = NULL;
1315 for (i = 0; cfs_trace_data[i]; i++) {
1316 kfree(cfs_trace_data[i]);
1317 cfs_trace_data[i] = NULL;
1321 void cfs_tracefile_exit(void)
1323 cfs_trace_stop_thread();
1324 cfs_trace_cleanup();