4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LNET
39 #include "tracefile.h"
41 #include <linux/ctype.h>
43 #include <linux/kthread.h>
44 #include <linux/pagemap.h>
45 #include <linux/poll.h>
46 #include <linux/tty.h>
47 #include <linux/uaccess.h>
48 #include <libcfs/linux/linux-fs.h>
49 #include <libcfs/libcfs.h>
52 enum cfs_trace_buf_type {
53 CFS_TCD_TYPE_PROC = 0,
59 union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned;
61 char cfs_tracefile[TRACEFILE_NAME_SIZE];
62 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
64 struct task_struct *tctl_task;
66 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
67 static DECLARE_RWSEM(cfs_tracefile_sem);
69 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
70 struct cfs_trace_cpu_data *tcd);
72 /* trace file lock routines */
73 /* The walking argument indicates the locking comes from all tcd types
74 * iterator and we must lock it and dissable local irqs to avoid deadlocks
75 * with other interrupt locks that might be happening. See LU-1311
78 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
79 __acquires(&tcd->tcd_lock)
81 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
82 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
83 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
84 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
85 spin_lock_bh(&tcd->tcd_lock);
86 else if (unlikely(walking))
87 spin_lock_irq(&tcd->tcd_lock);
89 spin_lock(&tcd->tcd_lock);
93 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
94 __releases(&tcd->tcd_lock)
96 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
97 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
98 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
99 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
100 spin_unlock_bh(&tcd->tcd_lock);
101 else if (unlikely(walking))
102 spin_unlock_irq(&tcd->tcd_lock);
104 spin_unlock(&tcd->tcd_lock);
107 #define cfs_tcd_for_each(tcd, i, j) \
108 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) \
109 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
110 j < num_possible_cpus(); \
111 j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
113 #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
114 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] && \
115 (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
116 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
118 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
121 return CFS_TCD_TYPE_IRQ;
123 return CFS_TCD_TYPE_SOFTIRQ;
124 return CFS_TCD_TYPE_PROC;
127 static inline struct cfs_trace_cpu_data *
128 cfs_trace_get_tcd(void)
130 struct cfs_trace_cpu_data *tcd =
131 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
133 cfs_trace_lock_tcd(tcd, 0);
138 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
140 cfs_trace_unlock_tcd(tcd, 0);
145 static inline struct cfs_trace_page *
146 cfs_tage_from_list(struct list_head *list)
148 return list_entry(list, struct cfs_trace_page, linkage);
151 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
154 struct cfs_trace_page *tage;
156 /* My caller is trying to free memory */
157 if (!in_interrupt() && (current->flags & PF_MEMALLOC))
161 * Don't spam console with allocation failures: they will be reported
162 * by upper layer anyway.
165 page = alloc_page(gfp);
169 tage = kmalloc(sizeof(*tage), gfp);
176 atomic_inc(&cfs_tage_allocated);
180 static void cfs_tage_free(struct cfs_trace_page *tage)
182 __LASSERT(tage != NULL);
183 __LASSERT(tage->page != NULL);
185 __free_page(tage->page);
187 atomic_dec(&cfs_tage_allocated);
190 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
191 struct list_head *queue)
193 __LASSERT(tage != NULL);
194 __LASSERT(queue != NULL);
196 list_move_tail(&tage->linkage, queue);
199 /* return a page that has 'len' bytes left at the end */
200 static struct cfs_trace_page *
201 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
203 struct cfs_trace_page *tage;
204 struct task_struct *tsk;
206 if (tcd->tcd_cur_pages > 0) {
207 __LASSERT(!list_empty(&tcd->tcd_pages));
208 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
209 if (tage->used + len <= PAGE_SIZE)
213 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
214 if (tcd->tcd_cur_stock_pages > 0) {
215 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
216 --tcd->tcd_cur_stock_pages;
217 list_del_init(&tage->linkage);
219 tage = cfs_tage_alloc(GFP_ATOMIC);
220 if (unlikely(tage == NULL)) {
221 if ((!(current->flags & PF_MEMALLOC) ||
222 in_interrupt()) && printk_ratelimit())
223 pr_warn("Lustre: cannot allocate a tage (%ld)\n",
230 tage->cpu = smp_processor_id();
231 tage->type = tcd->tcd_type;
232 list_add_tail(&tage->linkage, &tcd->tcd_pages);
233 tcd->tcd_cur_pages++;
236 if (tcd->tcd_cur_pages > 8 && tsk)
238 * wake up tracefiled to process some pages.
240 wake_up_process(tsk);
247 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
249 int pgcount = tcd->tcd_cur_pages / 10;
250 struct page_collection pc;
251 struct cfs_trace_page *tage;
252 struct cfs_trace_page *tmp;
255 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
256 * from here: this will lead to infinite recursion.
259 if (printk_ratelimit())
260 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
261 pgcount + 1, tcd->tcd_cur_pages);
263 INIT_LIST_HEAD(&pc.pc_pages);
265 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
269 list_move_tail(&tage->linkage, &pc.pc_pages);
270 tcd->tcd_cur_pages--;
272 put_pages_on_tcd_daemon_list(&pc, tcd);
275 /* return a page that has 'len' bytes left at the end */
276 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
279 struct cfs_trace_page *tage;
282 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
283 * from here: this will lead to infinite recursion.
286 if (len > PAGE_SIZE) {
287 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
292 tage = cfs_trace_get_tage_try(tcd, len);
297 if (tcd->tcd_cur_pages > 0) {
298 tage = cfs_tage_from_list(tcd->tcd_pages.next);
300 cfs_tage_to_tail(tage, &tcd->tcd_pages);
305 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
306 struct libcfs_debug_msg_data *msgdata,
309 struct timespec64 ts;
311 ktime_get_real_ts64(&ts);
313 header->ph_subsys = msgdata->msg_subsys;
314 header->ph_mask = msgdata->msg_mask;
315 header->ph_cpu_id = smp_processor_id();
316 header->ph_type = cfs_trace_buf_idx_get();
317 /* y2038 safe since all user space treats this as unsigned, but
318 * will overflow in 2106
320 header->ph_sec = (u32)ts.tv_sec;
321 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
322 header->ph_stack = stack;
323 header->ph_pid = current->pid;
324 header->ph_line_num = msgdata->msg_line;
325 header->ph_extern_pid = 0;
329 * tty_write_msg - write a message to a certain tty, not just the console.
330 * @tty: the destination tty_struct
331 * @msg: the message to write
333 * tty_write_message is not exported, so write a same function for it
336 static void tty_write_msg(struct tty_struct *tty, const char *msg)
338 mutex_lock(&tty->atomic_write_lock);
340 if (tty->ops->write && tty->count > 0)
341 tty->ops->write(tty, msg, strlen(msg));
343 mutex_unlock(&tty->atomic_write_lock);
344 wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
347 static void cfs_tty_write_message(const char *prefix, int mask, const char *msg)
349 struct tty_struct *tty;
351 tty = get_current_tty();
355 tty_write_msg(tty, prefix);
356 if ((mask & D_EMERG) || (mask & D_ERROR))
357 tty_write_msg(tty, "Error");
358 tty_write_msg(tty, ": ");
359 tty_write_msg(tty, msg);
363 static void cfs_vprint_to_console(struct ptldebug_header *hdr, int mask,
364 struct va_format *vaf, const char *file,
367 char *prefix = "Lustre";
369 if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
372 if (mask & D_CONSOLE) {
374 pr_emerg("%sError: %pV", prefix, vaf);
375 else if (mask & D_ERROR)
376 pr_err("%sError: %pV", prefix, vaf);
377 else if (mask & D_WARNING)
378 pr_warn("%s: %pV", prefix, vaf);
379 else if (mask & libcfs_printk)
380 pr_info("%s: %pV", prefix, vaf);
383 pr_emerg("%sError: %d:%d:(%s:%d:%s()) %pV", prefix,
384 hdr->ph_pid, hdr->ph_extern_pid, file,
385 hdr->ph_line_num, fn, vaf);
386 else if (mask & D_ERROR)
387 pr_err("%sError: %d:%d:(%s:%d:%s()) %pV", prefix,
388 hdr->ph_pid, hdr->ph_extern_pid, file,
389 hdr->ph_line_num, fn, vaf);
390 else if (mask & D_WARNING)
391 pr_warn("%s: %d:%d:(%s:%d:%s()) %pV", prefix,
392 hdr->ph_pid, hdr->ph_extern_pid, file,
393 hdr->ph_line_num, fn, vaf);
394 else if (mask & (D_CONSOLE | libcfs_printk))
395 pr_info("%s: %pV", prefix, vaf);
399 /* tty_write_msg doesn't handle formatting */
400 cfs_tty_write_message(prefix, mask, vaf->fmt);
403 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
404 const char *file, const char *fn,
405 const char *fmt, ...)
407 struct va_format vaf;
413 cfs_vprint_to_console(hdr, mask, &vaf, file, fn);
416 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
417 const char *format, ...)
419 struct cfs_trace_cpu_data *tcd = NULL;
420 struct ptldebug_header header = {0};
421 struct cfs_trace_page *tage;
422 /* string_buf is used only if tcd != NULL, and is always set then */
423 char *string_buf = NULL;
426 int needed = 85; /* seeded with average message length */
430 int mask = msgdata->msg_mask;
431 char *file = (char *)msgdata->msg_file;
432 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
434 if (strchr(file, '/'))
435 file = strrchr(file, '/') + 1;
437 tcd = cfs_trace_get_tcd();
439 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
440 * pins us to a particular CPU. This avoids an smp_processor_id()
441 * warning on Linux when debugging is enabled.
443 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
445 if (!tcd) /* arch may not log in IRQ context */
448 if (tcd->tcd_cur_pages == 0)
449 header.ph_flags |= PH_FLAG_FIRST_RECORD;
451 if (tcd->tcd_shutting_down) {
452 cfs_trace_put_tcd(tcd);
457 known_size = strlen(file) + 1;
459 known_size += strlen(msgdata->msg_fn) + 1;
461 if (libcfs_debug_binary)
462 known_size += sizeof(header);
465 * May perform an additional pass to update 'needed' and increase
466 * tage buffer size to match vsnprintf reported size required
467 * On the second pass (retry=1) use vscnprintf [which returns
468 * number of bytes written not including the terminating nul]
469 * to clarify `needed` is used as number of bytes written
470 * for the remainder of this function
472 for (retry = 0; retry < 2; retry++) {
473 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
475 if (needed + known_size > PAGE_SIZE)
478 cfs_trace_put_tcd(tcd);
483 string_buf = (char *)page_address(tage->page) +
484 tage->used + known_size;
486 max_nob = PAGE_SIZE - tage->used - known_size;
488 pr_emerg("LustreError: negative max_nob: %d\n",
491 cfs_trace_put_tcd(tcd);
496 va_start(ap, format);
498 needed = vscnprintf(string_buf, max_nob, format, ap);
500 needed = vsnprintf(string_buf, max_nob, format, ap);
503 if (needed < max_nob) /* well. printing ok.. */
507 /* `needed` is actual bytes written to string_buf */
508 if (*(string_buf + needed - 1) != '\n') {
509 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
510 file, msgdata->msg_line, msgdata->msg_fn);
511 } else if (mask & D_TTY) {
512 /* TTY needs '\r\n' to move carriage to leftmost position */
513 if (needed < 2 || *(string_buf + needed - 2) != '\r')
514 pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n",
515 file, msgdata->msg_line, msgdata->msg_fn);
516 if (strnchr(string_buf, needed, '%'))
517 pr_info("Lustre: format at %s:%d:%s mustn't contain %%\n",
518 file, msgdata->msg_line, msgdata->msg_fn);
521 header.ph_len = known_size + needed;
522 debug_buf = (char *)page_address(tage->page) + tage->used;
524 if (libcfs_debug_binary) {
525 memcpy(debug_buf, &header, sizeof(header));
526 tage->used += sizeof(header);
527 debug_buf += sizeof(header);
530 strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
531 tage->used += strlen(file) + 1;
532 debug_buf += strlen(file) + 1;
534 if (msgdata->msg_fn) {
535 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
536 tage->used += strlen(msgdata->msg_fn) + 1;
537 debug_buf += strlen(msgdata->msg_fn) + 1;
540 __LASSERT(debug_buf == string_buf);
542 tage->used += needed;
543 __LASSERT(tage->used <= PAGE_SIZE);
546 if ((mask & libcfs_printk) == 0) {
547 /* no console output requested */
549 cfs_trace_put_tcd(tcd);
554 if (libcfs_console_ratelimit &&
555 cdls->cdls_next != 0 && /* not first time ever */
556 time_before(jiffies, cdls->cdls_next)) {
557 /* skipping a console message */
560 cfs_trace_put_tcd(tcd);
564 if (time_after(jiffies, cdls->cdls_next +
565 libcfs_console_max_delay +
566 cfs_time_seconds(10))) {
567 /* last timeout was a long time ago */
568 cdls->cdls_delay /= libcfs_console_backoff * 4;
570 cdls->cdls_delay *= libcfs_console_backoff;
573 if (cdls->cdls_delay < libcfs_console_min_delay)
574 cdls->cdls_delay = libcfs_console_min_delay;
575 else if (cdls->cdls_delay > libcfs_console_max_delay)
576 cdls->cdls_delay = libcfs_console_max_delay;
578 /* ensure cdls_next is never zero after it's been seen */
579 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
583 cfs_print_to_console(&header, mask, file, msgdata->msg_fn,
585 cfs_trace_put_tcd(tcd);
587 struct va_format vaf;
589 va_start(ap, format);
592 cfs_vprint_to_console(&header, mask,
593 &vaf, file, msgdata->msg_fn);
597 if (cdls != NULL && cdls->cdls_count != 0) {
598 /* Do not allow print this to TTY */
599 cfs_print_to_console(&header, mask & ~D_TTY, file,
601 "Skipped %d previous similar message%s\n",
603 (cdls->cdls_count > 1) ? "s" : "");
605 cdls->cdls_count = 0;
610 EXPORT_SYMBOL(libcfs_debug_msg);
613 cfs_trace_assertion_failed(const char *str,
614 struct libcfs_debug_msg_data *msgdata)
616 struct ptldebug_header hdr;
618 libcfs_panic_in_progress = 1;
619 libcfs_catastrophe = 1;
622 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
624 cfs_print_to_console(&hdr, D_EMERG, msgdata->msg_file, msgdata->msg_fn,
627 panic("Lustre debug assertion failure\n");
633 panic_collect_pages(struct page_collection *pc)
635 /* Do the collect_pages job on a single CPU: assumes that all other
636 * CPUs have been stopped during a panic. If this isn't true for some
637 * arch, this will have to be implemented separately in each arch. */
640 struct cfs_trace_cpu_data *tcd;
642 INIT_LIST_HEAD(&pc->pc_pages);
644 cfs_tcd_for_each(tcd, i, j) {
645 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
646 tcd->tcd_cur_pages = 0;
648 if (pc->pc_want_daemon_pages) {
649 list_splice_init(&tcd->tcd_daemon_pages,
651 tcd->tcd_cur_daemon_pages = 0;
656 static void collect_pages_on_all_cpus(struct page_collection *pc)
658 struct cfs_trace_cpu_data *tcd;
661 for_each_possible_cpu(cpu) {
662 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
663 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
664 tcd->tcd_cur_pages = 0;
665 if (pc->pc_want_daemon_pages) {
666 list_splice_init(&tcd->tcd_daemon_pages,
668 tcd->tcd_cur_daemon_pages = 0;
674 static void collect_pages(struct page_collection *pc)
676 INIT_LIST_HEAD(&pc->pc_pages);
678 if (libcfs_panic_in_progress)
679 panic_collect_pages(pc);
681 collect_pages_on_all_cpus(pc);
684 static void put_pages_back_on_all_cpus(struct page_collection *pc)
686 struct cfs_trace_cpu_data *tcd;
687 struct list_head *cur_head;
688 struct cfs_trace_page *tage;
689 struct cfs_trace_page *tmp;
692 for_each_possible_cpu(cpu) {
693 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
694 cur_head = tcd->tcd_pages.next;
696 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
699 __LASSERT_TAGE_INVARIANT(tage);
701 if (tage->cpu != cpu || tage->type != i)
704 cfs_tage_to_tail(tage, cur_head);
705 tcd->tcd_cur_pages++;
711 static void put_pages_back(struct page_collection *pc)
713 if (!libcfs_panic_in_progress)
714 put_pages_back_on_all_cpus(pc);
717 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
718 * we have a good amount of data at all times for dumping during an LBUG, even
719 * if we have been steadily writing (and otherwise discarding) pages via the
721 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
722 struct cfs_trace_cpu_data *tcd)
724 struct cfs_trace_page *tage;
725 struct cfs_trace_page *tmp;
727 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
728 __LASSERT_TAGE_INVARIANT(tage);
730 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
733 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
734 tcd->tcd_cur_daemon_pages++;
736 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
737 struct cfs_trace_page *victim;
739 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
740 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
742 __LASSERT_TAGE_INVARIANT(victim);
744 list_del(&victim->linkage);
745 cfs_tage_free(victim);
746 tcd->tcd_cur_daemon_pages--;
751 static void put_pages_on_daemon_list(struct page_collection *pc)
753 struct cfs_trace_cpu_data *tcd;
756 for_each_possible_cpu(cpu) {
757 cfs_tcd_for_each_type_lock(tcd, i, cpu)
758 put_pages_on_tcd_daemon_list(pc, tcd);
762 #ifdef LNET_DUMP_ON_PANIC
763 void cfs_trace_debug_print(void)
765 struct page_collection pc;
766 struct cfs_trace_page *tage;
767 struct cfs_trace_page *tmp;
769 pc.pc_want_daemon_pages = 1;
771 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
775 __LASSERT_TAGE_INVARIANT(tage);
778 p = page_address(page);
779 while (p < ((char *)page_address(page) + tage->used)) {
780 struct ptldebug_header *hdr;
785 p += strlen(file) + 1;
788 len = hdr->ph_len - (int)(p - (char *)hdr);
790 cfs_print_to_console(hdr, D_EMERG, file, fn,
796 list_del(&tage->linkage);
800 #endif /* LNET_DUMP_ON_PANIC */
802 int cfs_tracefile_dump_all_pages(char *filename)
804 struct page_collection pc;
806 struct cfs_trace_page *tage;
807 struct cfs_trace_page *tmp;
811 down_write(&cfs_tracefile_sem);
813 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
817 pr_err("LustreError: can't open %s for dump: rc = %d\n",
822 pc.pc_want_daemon_pages = 1;
824 if (list_empty(&pc.pc_pages)) {
829 /* ok, for now, just write the pages. in the future we'll be building
830 * iobufs with the pages and calling generic_direct_IO */
831 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
833 __LASSERT_TAGE_INVARIANT(tage);
835 buf = kmap(tage->page);
836 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
838 if (rc != (int)tage->used) {
839 pr_warn("Lustre: wanted to write %u but wrote %d\n",
842 __LASSERT(list_empty(&pc.pc_pages));
845 list_del(&tage->linkage);
849 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
851 pr_err("LustreError: sync returns: rc = %d\n", rc);
853 filp_close(filp, NULL);
855 up_write(&cfs_tracefile_sem);
859 void cfs_trace_flush_pages(void)
861 struct page_collection pc;
862 struct cfs_trace_page *tage;
864 pc.pc_want_daemon_pages = 1;
866 while (!list_empty(&pc.pc_pages)) {
867 tage = list_first_entry(&pc.pc_pages,
868 struct cfs_trace_page, linkage);
869 __LASSERT_TAGE_INVARIANT(tage);
871 list_del(&tage->linkage);
876 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
877 const char *knl_buffer, char *append)
879 /* NB if 'append' != NULL, it's a single character to append to the
880 * copied out string - usually "\n", for /proc entries and "" (i.e. a
881 * terminating zero byte) for sysctl entries */
882 int nob = strlen(knl_buffer);
884 if (nob > usr_buffer_nob)
885 nob = usr_buffer_nob;
887 if (copy_to_user(usr_buffer, knl_buffer, nob))
890 if (append != NULL && nob < usr_buffer_nob) {
891 if (copy_to_user(usr_buffer + nob, append, 1))
899 EXPORT_SYMBOL(cfs_trace_copyout_string);
901 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
907 str = memdup_user_nul(usr_str, usr_str_nob);
915 rc = cfs_tracefile_dump_all_pages(path);
921 int cfs_trace_daemon_command(char *str)
925 down_write(&cfs_tracefile_sem);
927 if (strcmp(str, "stop") == 0) {
928 up_write(&cfs_tracefile_sem);
929 cfs_trace_stop_thread();
930 down_write(&cfs_tracefile_sem);
931 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
933 } else if (strncmp(str, "size=", 5) == 0) {
936 rc = kstrtoul(str + 5, 10, &tmp);
938 if (tmp < 10 || tmp > 20480)
939 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
941 cfs_tracefile_size = tmp << 20;
943 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
945 } else if (str[0] != '/') {
948 strcpy(cfs_tracefile, str);
950 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
951 cfs_tracefile, (long)(cfs_tracefile_size >> 10));
953 cfs_trace_start_thread();
956 up_write(&cfs_tracefile_sem);
960 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
965 str = memdup_user_nul(usr_str, usr_str_nob);
969 rc = cfs_trace_daemon_command(strim(str));
975 int cfs_trace_set_debug_mb(int mb)
980 unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
981 unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
982 struct cfs_trace_cpu_data *tcd;
984 if (mb < num_possible_cpus()) {
985 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
986 mb, num_possible_cpus());
987 mb = num_possible_cpus();
991 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
996 mb /= num_possible_cpus();
997 pages = mb << (20 - PAGE_SHIFT);
999 down_write(&cfs_tracefile_sem);
1001 cfs_tcd_for_each(tcd, i, j)
1002 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1004 up_write(&cfs_tracefile_sem);
1009 int cfs_trace_get_debug_mb(void)
1013 struct cfs_trace_cpu_data *tcd;
1014 int total_pages = 0;
1016 down_read(&cfs_tracefile_sem);
1018 cfs_tcd_for_each(tcd, i, j)
1019 total_pages += tcd->tcd_max_pages;
1021 up_read(&cfs_tracefile_sem);
1023 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1026 static int tracefiled(void *arg)
1028 struct page_collection pc;
1029 struct cfs_trace_page *tage;
1030 struct cfs_trace_page *tmp;
1036 pc.pc_want_daemon_pages = 0;
1038 while (!last_loop) {
1039 schedule_timeout_interruptible(cfs_time_seconds(1));
1040 if (kthread_should_stop())
1043 if (list_empty(&pc.pc_pages))
1047 down_read(&cfs_tracefile_sem);
1048 if (cfs_tracefile[0] != 0) {
1049 filp = filp_open(cfs_tracefile,
1050 O_CREAT | O_RDWR | O_LARGEFILE,
1055 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1059 up_read(&cfs_tracefile_sem);
1061 put_pages_on_daemon_list(&pc);
1062 __LASSERT(list_empty(&pc.pc_pages));
1066 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1067 struct dentry *de = file_dentry(filp);
1068 static loff_t f_pos;
1070 __LASSERT_TAGE_INVARIANT(tage);
1072 if (f_pos >= (off_t)cfs_tracefile_size)
1074 else if (f_pos > i_size_read(de->d_inode))
1075 f_pos = i_size_read(de->d_inode);
1077 buf = kmap(tage->page);
1078 rc = cfs_kernel_write(filp, buf, tage->used, &f_pos);
1080 if (rc != (int)tage->used) {
1081 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1083 put_pages_back(&pc);
1084 __LASSERT(list_empty(&pc.pc_pages));
1089 filp_close(filp, NULL);
1090 put_pages_on_daemon_list(&pc);
1091 if (!list_empty(&pc.pc_pages)) {
1094 pr_alert("Lustre: trace pages aren't empty\n");
1095 pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1096 for (i = 0; i < num_possible_cpus(); i++)
1098 pr_cont("%d(on) ", i);
1100 pr_cont("%d(off) ", i);
1104 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1106 pr_err("Lustre: page %d belongs to cpu %d\n",
1108 pr_err("Lustre: There are %d pages unwritten\n", i);
1110 __LASSERT(list_empty(&pc.pc_pages));
1116 int cfs_trace_start_thread(void)
1118 struct task_struct *tsk;
1124 tsk = kthread_create(tracefiled, NULL, "ktracefiled");
1127 else if (cmpxchg(&tctl_task, NULL, tsk) != NULL)
1128 /* already running */
1131 wake_up_process(tsk);
1136 void cfs_trace_stop_thread(void)
1138 struct task_struct *tsk;
1140 tsk = xchg(&tctl_task, NULL);
1142 pr_info("Lustre: shutting down debug daemon thread...\n");
1147 /* percents to share the total debug memory for each type */
1148 static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = {
1149 80, /* 80% pages for CFS_TCD_TYPE_PROC */
1150 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1151 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
1154 int cfs_tracefile_init(int max_pages)
1156 struct cfs_trace_cpu_data *tcd;
1160 /* initialize trace_data */
1161 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1162 for (i = 0; i < CFS_TCD_TYPE_CNT; i++) {
1164 kmalloc_array(num_possible_cpus(),
1165 sizeof(union cfs_trace_data_union),
1167 if (!cfs_trace_data[i])
1168 goto out_trace_data;
1171 /* arch related info initialized */
1172 cfs_tcd_for_each(tcd, i, j) {
1173 int factor = pages_factor[i];
1175 spin_lock_init(&tcd->tcd_lock);
1176 tcd->tcd_pages_factor = factor;
1180 INIT_LIST_HEAD(&tcd->tcd_pages);
1181 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1182 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1183 tcd->tcd_cur_pages = 0;
1184 tcd->tcd_cur_stock_pages = 0;
1185 tcd->tcd_cur_daemon_pages = 0;
1186 tcd->tcd_max_pages = (max_pages * factor) / 100;
1187 LASSERT(tcd->tcd_max_pages > 0);
1188 tcd->tcd_shutting_down = 0;
1194 for (i = 0; cfs_trace_data[i]; i++) {
1195 kfree(cfs_trace_data[i]);
1196 cfs_trace_data[i] = NULL;
1198 pr_err("lnet: Not enough memory\n");
1202 static void trace_cleanup_on_all_cpus(void)
1204 struct cfs_trace_cpu_data *tcd;
1205 struct cfs_trace_page *tage;
1208 for_each_possible_cpu(cpu) {
1209 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1210 if (!tcd->tcd_pages_factor)
1211 /* Not initialised */
1213 tcd->tcd_shutting_down = 1;
1215 while (!list_empty(&tcd->tcd_pages)) {
1216 tage = list_first_entry(&tcd->tcd_pages,
1217 struct cfs_trace_page,
1219 __LASSERT_TAGE_INVARIANT(tage);
1221 list_del(&tage->linkage);
1222 cfs_tage_free(tage);
1224 tcd->tcd_cur_pages = 0;
1229 static void cfs_trace_cleanup(void)
1231 struct page_collection pc;
1234 INIT_LIST_HEAD(&pc.pc_pages);
1236 trace_cleanup_on_all_cpus();
1238 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) {
1239 kfree(cfs_trace_data[i]);
1240 cfs_trace_data[i] = NULL;
1244 void cfs_tracefile_exit(void)
1246 cfs_trace_stop_thread();
1247 cfs_trace_cleanup();