4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * libcfs/libcfs/tracefile.c
33 * Author: Zach Brown <zab@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_LNET
38 #include "tracefile.h"
40 #include <linux/ctype.h>
42 #include <linux/kthread.h>
43 #include <linux/pagemap.h>
44 #include <linux/poll.h>
45 #include <linux/uaccess.h>
46 #include <libcfs/linux/linux-fs.h>
47 #include <libcfs/libcfs.h>
50 enum cfs_trace_buf_type {
51 CFS_TCD_TYPE_PROC = 0,
57 union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned;
59 /* Pages containing records already processed by daemon.
60 * Link via ->lru, use size in ->private
62 static LIST_HEAD(daemon_pages);
63 static long daemon_pages_count;
64 static long daemon_pages_max;
66 char cfs_tracefile[TRACEFILE_NAME_SIZE];
67 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
69 struct task_struct *tctl_task;
71 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
72 static DECLARE_RWSEM(cfs_tracefile_sem);
74 /* trace file lock routines */
75 /* The walking argument indicates the locking comes from all tcd types
76 * iterator and we must lock it and dissable local irqs to avoid deadlocks
77 * with other interrupt locks that might be happening. See LU-1311
80 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
81 __acquires(&tcd->tcd_lock)
83 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
84 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
85 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
86 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
87 spin_lock_bh(&tcd->tcd_lock);
88 else if (unlikely(walking))
89 spin_lock_irq(&tcd->tcd_lock);
91 spin_lock(&tcd->tcd_lock);
95 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
96 __releases(&tcd->tcd_lock)
98 __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT);
99 if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
100 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
101 else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
102 spin_unlock_bh(&tcd->tcd_lock);
103 else if (unlikely(walking))
104 spin_unlock_irq(&tcd->tcd_lock);
106 spin_unlock(&tcd->tcd_lock);
109 #define cfs_tcd_for_each(tcd, i, j) \
110 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) \
111 for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
112 j < num_possible_cpus(); \
113 j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
115 #define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
116 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] && \
117 (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
118 cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
120 enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
123 return CFS_TCD_TYPE_IRQ;
125 return CFS_TCD_TYPE_SOFTIRQ;
126 return CFS_TCD_TYPE_PROC;
129 static inline struct cfs_trace_cpu_data *
130 cfs_trace_get_tcd(void)
132 struct cfs_trace_cpu_data *tcd =
133 &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
135 cfs_trace_lock_tcd(tcd, 0);
140 static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
142 cfs_trace_unlock_tcd(tcd, 0);
147 static inline struct cfs_trace_page *
148 cfs_tage_from_list(struct list_head *list)
150 return list_entry(list, struct cfs_trace_page, linkage);
153 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
156 struct cfs_trace_page *tage;
158 /* My caller is trying to free memory */
159 if (!in_interrupt() && (current->flags & PF_MEMALLOC))
163 * Don't spam console with allocation failures: they will be reported
164 * by upper layer anyway.
167 page = alloc_page(gfp);
171 tage = kmalloc(sizeof(*tage), gfp);
178 atomic_inc(&cfs_tage_allocated);
182 static void cfs_tage_free(struct cfs_trace_page *tage)
184 __LASSERT(tage != NULL);
185 __LASSERT(tage->page != NULL);
187 __free_page(tage->page);
189 atomic_dec(&cfs_tage_allocated);
192 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
193 struct list_head *queue)
195 __LASSERT(tage != NULL);
196 __LASSERT(queue != NULL);
198 list_move_tail(&tage->linkage, queue);
201 /* return a page that has 'len' bytes left at the end */
202 static struct cfs_trace_page *
203 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
205 struct cfs_trace_page *tage;
206 struct task_struct *tsk;
208 if (tcd->tcd_cur_pages > 0) {
209 __LASSERT(!list_empty(&tcd->tcd_pages));
210 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
211 if (tage->used + len <= PAGE_SIZE)
215 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
216 if (tcd->tcd_cur_stock_pages > 0) {
217 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
218 --tcd->tcd_cur_stock_pages;
219 list_del_init(&tage->linkage);
221 tage = cfs_tage_alloc(GFP_ATOMIC);
222 if (unlikely(tage == NULL)) {
223 if ((!(current->flags & PF_MEMALLOC) ||
224 in_interrupt()) && printk_ratelimit())
225 pr_warn("Lustre: cannot allocate a tage (%ld)\n",
232 tage->cpu = smp_processor_id();
233 tage->type = tcd->tcd_type;
234 list_add_tail(&tage->linkage, &tcd->tcd_pages);
235 tcd->tcd_cur_pages++;
238 if (tcd->tcd_cur_pages > 8 && tsk)
240 * wake up tracefiled to process some pages.
242 wake_up_process(tsk);
249 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
251 int pgcount = tcd->tcd_cur_pages / 10;
252 struct page_collection pc;
253 struct cfs_trace_page *tage;
254 struct cfs_trace_page *tmp;
257 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
258 * from here: this will lead to infinite recursion.
261 if (printk_ratelimit())
262 pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
263 pgcount + 1, tcd->tcd_cur_pages);
265 INIT_LIST_HEAD(&pc.pc_pages);
267 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
271 list_del(&tage->linkage);
273 tcd->tcd_cur_pages--;
277 /* return a page that has 'len' bytes left at the end */
278 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
281 struct cfs_trace_page *tage;
284 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
285 * from here: this will lead to infinite recursion.
288 if (len > PAGE_SIZE) {
289 pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n",
294 tage = cfs_trace_get_tage_try(tcd, len);
299 if (tcd->tcd_cur_pages > 0) {
300 tage = cfs_tage_from_list(tcd->tcd_pages.next);
302 cfs_tage_to_tail(tage, &tcd->tcd_pages);
307 static void cfs_set_ptldebug_header(struct ptldebug_header *header,
308 struct libcfs_debug_msg_data *msgdata,
311 struct timespec64 ts;
313 ktime_get_real_ts64(&ts);
315 header->ph_subsys = msgdata->msg_subsys;
316 header->ph_mask = msgdata->msg_mask;
317 header->ph_cpu_id = smp_processor_id();
318 header->ph_type = cfs_trace_buf_idx_get();
319 /* y2038 safe since all user space treats this as unsigned, but
320 * will overflow in 2106
322 header->ph_sec = (u32)ts.tv_sec;
323 header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
324 header->ph_stack = stack;
325 header->ph_pid = current->pid;
326 header->ph_line_num = msgdata->msg_line;
327 header->ph_extern_pid = 0;
330 static void cfs_vprint_to_console(struct ptldebug_header *hdr, int mask,
331 struct va_format *vaf, const char *file,
334 char *prefix = "Lustre";
336 if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET)
339 if (mask & D_CONSOLE) {
341 pr_emerg("%sError: %pV", prefix, vaf);
342 else if (mask & D_ERROR)
343 pr_err("%sError: %pV", prefix, vaf);
344 else if (mask & D_WARNING)
345 pr_warn("%s: %pV", prefix, vaf);
346 else if (mask & libcfs_printk)
347 pr_info("%s: %pV", prefix, vaf);
350 pr_emerg("%sError: %d:%d:(%s:%d:%s()) %pV", prefix,
351 hdr->ph_pid, hdr->ph_extern_pid, file,
352 hdr->ph_line_num, fn, vaf);
353 else if (mask & D_ERROR)
354 pr_err("%sError: %d:%d:(%s:%d:%s()) %pV", prefix,
355 hdr->ph_pid, hdr->ph_extern_pid, file,
356 hdr->ph_line_num, fn, vaf);
357 else if (mask & D_WARNING)
358 pr_warn("%s: %d:%d:(%s:%d:%s()) %pV", prefix,
359 hdr->ph_pid, hdr->ph_extern_pid, file,
360 hdr->ph_line_num, fn, vaf);
361 else if (mask & (D_CONSOLE | libcfs_printk))
362 pr_info("%s: %pV", prefix, vaf);
366 static void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
367 const char *file, const char *fn,
368 const char *fmt, ...)
370 struct va_format vaf;
376 cfs_vprint_to_console(hdr, mask, &vaf, file, fn);
379 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
380 const char *format, ...)
382 struct cfs_trace_cpu_data *tcd = NULL;
383 struct ptldebug_header header = {0};
384 struct cfs_trace_page *tage;
385 /* string_buf is used only if tcd != NULL, and is always set then */
386 char *string_buf = NULL;
389 int needed = 85; /* seeded with average message length */
393 int mask = msgdata->msg_mask;
394 char *file = (char *)msgdata->msg_file;
395 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
397 if (strchr(file, '/'))
398 file = strrchr(file, '/') + 1;
400 tcd = cfs_trace_get_tcd();
402 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
403 * pins us to a particular CPU. This avoids an smp_processor_id()
404 * warning on Linux when debugging is enabled.
406 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
408 if (!tcd) /* arch may not log in IRQ context */
411 if (tcd->tcd_cur_pages == 0)
412 header.ph_flags |= PH_FLAG_FIRST_RECORD;
414 if (tcd->tcd_shutting_down) {
415 cfs_trace_put_tcd(tcd);
420 known_size = strlen(file) + 1;
422 known_size += strlen(msgdata->msg_fn) + 1;
424 if (libcfs_debug_binary)
425 known_size += sizeof(header);
428 * May perform an additional pass to update 'needed' and increase
429 * tage buffer size to match vsnprintf reported size required
430 * On the second pass (retry=1) use vscnprintf [which returns
431 * number of bytes written not including the terminating nul]
432 * to clarify `needed` is used as number of bytes written
433 * for the remainder of this function
435 for (retry = 0; retry < 2; retry++) {
436 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
438 if (needed + known_size > PAGE_SIZE)
441 cfs_trace_put_tcd(tcd);
446 string_buf = (char *)page_address(tage->page) +
447 tage->used + known_size;
449 max_nob = PAGE_SIZE - tage->used - known_size;
451 pr_emerg("LustreError: negative max_nob: %d\n",
454 cfs_trace_put_tcd(tcd);
459 va_start(ap, format);
461 needed = vscnprintf(string_buf, max_nob, format, ap);
463 needed = vsnprintf(string_buf, max_nob, format, ap);
466 if (needed < max_nob) /* well. printing ok.. */
470 /* `needed` is actual bytes written to string_buf */
471 if (*(string_buf + needed - 1) != '\n') {
472 pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n",
473 file, msgdata->msg_line, msgdata->msg_fn);
476 header.ph_len = known_size + needed;
477 debug_buf = (char *)page_address(tage->page) + tage->used;
479 if (libcfs_debug_binary) {
480 memcpy(debug_buf, &header, sizeof(header));
481 tage->used += sizeof(header);
482 debug_buf += sizeof(header);
485 strlcpy(debug_buf, file, PAGE_SIZE - tage->used);
486 tage->used += strlen(file) + 1;
487 debug_buf += strlen(file) + 1;
489 if (msgdata->msg_fn) {
490 strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used);
491 tage->used += strlen(msgdata->msg_fn) + 1;
492 debug_buf += strlen(msgdata->msg_fn) + 1;
495 __LASSERT(debug_buf == string_buf);
497 tage->used += needed;
498 __LASSERT(tage->used <= PAGE_SIZE);
501 if ((mask & libcfs_printk) == 0) {
502 /* no console output requested */
504 cfs_trace_put_tcd(tcd);
509 if (libcfs_console_ratelimit &&
510 cdls->cdls_next != 0 && /* not first time ever */
511 time_before(jiffies, cdls->cdls_next)) {
512 /* skipping a console message */
515 cfs_trace_put_tcd(tcd);
519 if (time_after(jiffies, cdls->cdls_next +
520 libcfs_console_max_delay +
521 cfs_time_seconds(10))) {
522 /* last timeout was a long time ago */
523 cdls->cdls_delay /= libcfs_console_backoff * 4;
525 cdls->cdls_delay *= libcfs_console_backoff;
528 if (cdls->cdls_delay < libcfs_console_min_delay)
529 cdls->cdls_delay = libcfs_console_min_delay;
530 else if (cdls->cdls_delay > libcfs_console_max_delay)
531 cdls->cdls_delay = libcfs_console_max_delay;
533 /* ensure cdls_next is never zero after it's been seen */
534 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
538 cfs_print_to_console(&header, mask, file, msgdata->msg_fn,
540 cfs_trace_put_tcd(tcd);
542 struct va_format vaf;
544 va_start(ap, format);
547 cfs_vprint_to_console(&header, mask,
548 &vaf, file, msgdata->msg_fn);
552 if (cdls != NULL && cdls->cdls_count != 0) {
553 cfs_print_to_console(&header, mask, file,
555 "Skipped %d previous similar message%s\n",
557 (cdls->cdls_count > 1) ? "s" : "");
559 cdls->cdls_count = 0;
564 EXPORT_SYMBOL(libcfs_debug_msg);
567 cfs_trace_assertion_failed(const char *str,
568 struct libcfs_debug_msg_data *msgdata)
570 struct ptldebug_header hdr;
572 libcfs_panic_in_progress = 1;
573 libcfs_catastrophe = 1;
576 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
578 cfs_print_to_console(&hdr, D_EMERG, msgdata->msg_file, msgdata->msg_fn,
581 panic("Lustre debug assertion failure\n");
587 panic_collect_pages(struct page_collection *pc)
589 /* Do the collect_pages job on a single CPU: assumes that all other
590 * CPUs have been stopped during a panic. If this isn't true for some
591 * arch, this will have to be implemented separately in each arch. */
594 struct cfs_trace_cpu_data *tcd;
596 INIT_LIST_HEAD(&pc->pc_pages);
598 cfs_tcd_for_each(tcd, i, j) {
599 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
600 tcd->tcd_cur_pages = 0;
604 static void collect_pages_on_all_cpus(struct page_collection *pc)
606 struct cfs_trace_cpu_data *tcd;
609 for_each_possible_cpu(cpu) {
610 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
611 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
612 tcd->tcd_cur_pages = 0;
617 static void collect_pages(struct page_collection *pc)
619 INIT_LIST_HEAD(&pc->pc_pages);
621 if (libcfs_panic_in_progress)
622 panic_collect_pages(pc);
624 collect_pages_on_all_cpus(pc);
627 static void put_pages_back_on_all_cpus(struct page_collection *pc)
629 struct cfs_trace_cpu_data *tcd;
630 struct list_head *cur_head;
631 struct cfs_trace_page *tage;
632 struct cfs_trace_page *tmp;
635 for_each_possible_cpu(cpu) {
636 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
637 cur_head = tcd->tcd_pages.next;
639 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
642 __LASSERT_TAGE_INVARIANT(tage);
644 if (tage->cpu != cpu || tage->type != i)
647 cfs_tage_to_tail(tage, cur_head);
648 tcd->tcd_cur_pages++;
654 static void put_pages_back(struct page_collection *pc)
656 if (!libcfs_panic_in_progress)
657 put_pages_back_on_all_cpus(pc);
660 #ifdef LNET_DUMP_ON_PANIC
661 void cfs_trace_debug_print(void)
663 struct page_collection pc;
664 struct cfs_trace_page *tage;
665 struct cfs_trace_page *tmp;
669 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
672 __LASSERT_TAGE_INVARIANT(tage);
675 p = page_address(page);
676 while (p < ((char *)page_address(page) + tage->used)) {
677 struct ptldebug_header *hdr;
682 p += strlen(file) + 1;
685 len = hdr->ph_len - (int)(p - (char *)hdr);
687 cfs_print_to_console(hdr, D_EMERG, file, fn,
693 list_del(&tage->linkage);
696 down_write(&cfs_tracefile_sem);
697 while ((page = list_first_entry_or_null(&daemon_pages,
698 struct page, lru)) != NULL) {
701 p = page_address(page);
702 while (p < ((char *)page_address(page) + page->private)) {
703 struct ptldebug_header *hdr;
709 p += strlen(file) + 1;
712 len = hdr->ph_len - (int)(p - (char *)hdr);
714 cfs_print_to_console(hdr, D_EMERG, file, fn,
719 list_del_init(&page->lru);
720 daemon_pages_count -= 1;
723 up_write(&cfs_tracefile_sem);
725 #endif /* LNET_DUMP_ON_PANIC */
727 int cfs_tracefile_dump_all_pages(char *filename)
729 struct page_collection pc;
731 struct cfs_trace_page *tage;
732 struct cfs_trace_page *tmp;
737 down_write(&cfs_tracefile_sem);
739 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
743 pr_err("LustreError: can't open %s for dump: rc = %d\n",
749 if (list_empty(&pc.pc_pages)) {
754 /* ok, for now, just write the pages. in the future we'll be building
755 * iobufs with the pages and calling generic_direct_IO */
756 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
758 __LASSERT_TAGE_INVARIANT(tage);
760 buf = kmap(tage->page);
761 rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
763 if (rc != (int)tage->used) {
764 pr_warn("Lustre: wanted to write %u but wrote %d\n",
767 __LASSERT(list_empty(&pc.pc_pages));
770 list_del(&tage->linkage);
773 while ((page = list_first_entry_or_null(&daemon_pages,
774 struct page, lru)) != NULL) {
775 buf = page_address(page);
776 rc = cfs_kernel_write(filp, buf, page->private, &filp->f_pos);
777 if (rc != (int)page->private) {
778 pr_warn("Lustre: wanted to write %u but wrote %d\n",
779 (int)page->private, rc);
782 list_del(&page->lru);
783 daemon_pages_count -= 1;
786 rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
788 pr_err("LustreError: sync returns: rc = %d\n", rc);
790 filp_close(filp, NULL);
792 up_write(&cfs_tracefile_sem);
796 void cfs_trace_flush_pages(void)
798 struct page_collection pc;
799 struct cfs_trace_page *tage;
803 while (!list_empty(&pc.pc_pages)) {
804 tage = list_first_entry(&pc.pc_pages,
805 struct cfs_trace_page, linkage);
806 __LASSERT_TAGE_INVARIANT(tage);
808 list_del(&tage->linkage);
812 down_write(&cfs_tracefile_sem);
813 while ((page = list_first_entry_or_null(&daemon_pages,
814 struct page, lru)) != NULL) {
815 list_del(&page->lru);
816 daemon_pages_count -= 1;
819 up_write(&cfs_tracefile_sem);
822 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
823 const char *knl_buffer, char *append)
825 /* NB if 'append' != NULL, it's a single character to append to the
826 * copied out string - usually "\n", for /proc entries and "" (i.e. a
827 * terminating zero byte) for sysctl entries */
828 int nob = strlen(knl_buffer);
830 if (nob > usr_buffer_nob)
831 nob = usr_buffer_nob;
833 if (copy_to_user(usr_buffer, knl_buffer, nob))
836 if (append != NULL && nob < usr_buffer_nob) {
837 if (copy_to_user(usr_buffer + nob, append, 1))
845 EXPORT_SYMBOL(cfs_trace_copyout_string);
847 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
853 str = memdup_user_nul(usr_str, usr_str_nob);
861 rc = cfs_tracefile_dump_all_pages(path);
867 int cfs_trace_daemon_command(char *str)
871 down_write(&cfs_tracefile_sem);
873 if (strcmp(str, "stop") == 0) {
874 up_write(&cfs_tracefile_sem);
875 cfs_trace_stop_thread();
876 down_write(&cfs_tracefile_sem);
877 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
879 } else if (strncmp(str, "size=", 5) == 0) {
882 rc = kstrtoul(str + 5, 10, &tmp);
884 if (tmp < 10 || tmp > 20480)
885 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
887 cfs_tracefile_size = tmp << 20;
889 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
891 } else if (str[0] != '/') {
894 strcpy(cfs_tracefile, str);
896 pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
897 cfs_tracefile, (long)(cfs_tracefile_size >> 10));
899 cfs_trace_start_thread();
902 up_write(&cfs_tracefile_sem);
906 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
911 str = memdup_user_nul(usr_str, usr_str_nob);
915 rc = cfs_trace_daemon_command(strim(str));
921 int cfs_trace_set_debug_mb(int mb)
926 unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT));
927 unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5);
928 struct cfs_trace_cpu_data *tcd;
930 if (mb < num_possible_cpus()) {
931 pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
932 mb, num_possible_cpus());
933 mb = num_possible_cpus();
937 pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n",
942 mb /= num_possible_cpus();
943 pages = mb << (20 - PAGE_SHIFT);
945 down_write(&cfs_tracefile_sem);
947 cfs_tcd_for_each(tcd, i, j)
948 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
950 daemon_pages_max = pages;
951 up_write(&cfs_tracefile_sem);
956 int cfs_trace_get_debug_mb(void)
960 struct cfs_trace_cpu_data *tcd;
963 down_read(&cfs_tracefile_sem);
965 cfs_tcd_for_each(tcd, i, j)
966 total_pages += tcd->tcd_max_pages;
968 up_read(&cfs_tracefile_sem);
971 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
976 static int tracefiled(void *arg)
978 struct page_collection pc;
979 struct cfs_trace_page *tage;
980 struct cfs_trace_page *tmp;
987 LIST_HEAD(for_daemon_pages);
988 int for_daemon_pages_count = 0;
989 schedule_timeout_interruptible(cfs_time_seconds(1));
990 if (kthread_should_stop())
993 if (list_empty(&pc.pc_pages))
997 down_read(&cfs_tracefile_sem);
998 if (cfs_tracefile[0] != 0) {
999 filp = filp_open(cfs_tracefile,
1000 O_CREAT | O_RDWR | O_LARGEFILE,
1005 pr_warn("Lustre: couldn't open %s: rc = %d\n",
1009 up_read(&cfs_tracefile_sem);
1011 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1012 __LASSERT_TAGE_INVARIANT(tage);
1015 struct dentry *de = file_dentry(filp);
1016 static loff_t f_pos;
1018 if (f_pos >= (off_t)cfs_tracefile_size)
1020 else if (f_pos > i_size_read(de->d_inode))
1021 f_pos = i_size_read(de->d_inode);
1023 buf = kmap(tage->page);
1024 rc = cfs_kernel_write(filp, buf, tage->used,
1027 if (rc != (int)tage->used) {
1028 pr_warn("Lustre: wanted to write %u but wrote %d\n",
1030 put_pages_back(&pc);
1031 __LASSERT(list_empty(&pc.pc_pages));
1035 list_del_init(&tage->linkage);
1036 list_add_tail(&tage->page->lru, &for_daemon_pages);
1037 for_daemon_pages_count += 1;
1039 tage->page->private = (int)tage->used;
1041 atomic_dec(&cfs_tage_allocated);
1045 filp_close(filp, NULL);
1047 down_write(&cfs_tracefile_sem);
1048 list_splice_tail(&for_daemon_pages, &daemon_pages);
1049 daemon_pages_count += for_daemon_pages_count;
1050 while (daemon_pages_count > daemon_pages_max) {
1051 struct page *p = list_first_entry(&daemon_pages,
1055 daemon_pages_count -= 1;
1057 up_write(&cfs_tracefile_sem);
1059 if (!list_empty(&pc.pc_pages)) {
1062 pr_alert("Lustre: trace pages aren't empty\n");
1063 pr_err("Lustre: total cpus(%d): ", num_possible_cpus());
1064 for (i = 0; i < num_possible_cpus(); i++)
1066 pr_cont("%d(on) ", i);
1068 pr_cont("%d(off) ", i);
1072 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1074 pr_err("Lustre: page %d belongs to cpu %d\n",
1076 pr_err("Lustre: There are %d pages unwritten\n", i);
1078 __LASSERT(list_empty(&pc.pc_pages));
1084 int cfs_trace_start_thread(void)
1086 struct task_struct *tsk;
1092 tsk = kthread_create(tracefiled, NULL, "ktracefiled");
1095 else if (cmpxchg(&tctl_task, NULL, tsk) != NULL)
1096 /* already running */
1099 wake_up_process(tsk);
1104 void cfs_trace_stop_thread(void)
1106 struct task_struct *tsk;
1108 tsk = xchg(&tctl_task, NULL);
1110 pr_info("Lustre: shutting down debug daemon thread...\n");
1115 /* percents to share the total debug memory for each type */
1116 static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = {
1117 80, /* 80% pages for CFS_TCD_TYPE_PROC */
1118 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
1119 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
1122 int cfs_tracefile_init(int max_pages)
1124 struct cfs_trace_cpu_data *tcd;
1128 /* initialize trace_data */
1129 memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
1130 for (i = 0; i < CFS_TCD_TYPE_CNT; i++) {
1132 kmalloc_array(num_possible_cpus(),
1133 sizeof(union cfs_trace_data_union),
1135 if (!cfs_trace_data[i])
1136 goto out_trace_data;
1139 /* arch related info initialized */
1140 cfs_tcd_for_each(tcd, i, j) {
1141 int factor = pages_factor[i];
1143 /* Note that we have three separate spin_lock_init()
1144 * calls so that the locks get three separate classes
1145 * and lockdep never thinks they are related. As they
1146 * are used in different interrupt contexts, lockdep
1147 * would otherwise think that the usage would conflict.
1150 case CFS_TCD_TYPE_PROC:
1151 spin_lock_init(&tcd->tcd_lock);
1153 case CFS_TCD_TYPE_SOFTIRQ:
1154 spin_lock_init(&tcd->tcd_lock);
1156 case CFS_TCD_TYPE_IRQ:
1157 spin_lock_init(&tcd->tcd_lock);
1160 tcd->tcd_pages_factor = factor;
1164 INIT_LIST_HEAD(&tcd->tcd_pages);
1165 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1166 tcd->tcd_cur_pages = 0;
1167 tcd->tcd_cur_stock_pages = 0;
1168 tcd->tcd_max_pages = (max_pages * factor) / 100;
1169 LASSERT(tcd->tcd_max_pages > 0);
1170 tcd->tcd_shutting_down = 0;
1172 daemon_pages_max = max_pages;
1177 for (i = 0; cfs_trace_data[i]; i++) {
1178 kfree(cfs_trace_data[i]);
1179 cfs_trace_data[i] = NULL;
1181 pr_err("lnet: Not enough memory\n");
1185 static void trace_cleanup_on_all_cpus(void)
1187 struct cfs_trace_cpu_data *tcd;
1188 struct cfs_trace_page *tage;
1191 for_each_possible_cpu(cpu) {
1192 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1193 if (!tcd->tcd_pages_factor)
1194 /* Not initialised */
1196 tcd->tcd_shutting_down = 1;
1198 while (!list_empty(&tcd->tcd_pages)) {
1199 tage = list_first_entry(&tcd->tcd_pages,
1200 struct cfs_trace_page,
1202 __LASSERT_TAGE_INVARIANT(tage);
1204 list_del(&tage->linkage);
1205 cfs_tage_free(tage);
1207 tcd->tcd_cur_pages = 0;
1212 static void cfs_trace_cleanup(void)
1214 struct page_collection pc;
1217 INIT_LIST_HEAD(&pc.pc_pages);
1219 trace_cleanup_on_all_cpus();
1221 for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) {
1222 kfree(cfs_trace_data[i]);
1223 cfs_trace_data[i] = NULL;
1227 void cfs_tracefile_exit(void)
1229 cfs_trace_stop_thread();
1230 cfs_trace_flush_pages();
1231 cfs_trace_cleanup();