4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/tracefile.c
34 * Author: Zach Brown <zab@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LNET
40 #define LUSTRE_TRACEFILE_PRIVATE
41 #include "tracefile.h"
43 #include <linux/kthread.h>
44 #include <libcfs/libcfs.h>
46 /* XXX move things up to the top, comment */
47 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
49 char cfs_tracefile[TRACEFILE_NAME_SIZE];
50 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
51 static struct tracefiled_ctl trace_tctl;
52 static DEFINE_MUTEX(cfs_trace_thread_mutex);
53 static int thread_running = 0;
55 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
57 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
58 struct cfs_trace_cpu_data *tcd);
60 static inline struct cfs_trace_page *
61 cfs_tage_from_list(struct list_head *list)
63 return list_entry(list, struct cfs_trace_page, linkage);
66 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
69 struct cfs_trace_page *tage;
71 /* My caller is trying to free memory */
72 if (!in_interrupt() && memory_pressure_get())
76 * Don't spam console with allocation failures: they will be reported
77 * by upper layer anyway.
80 page = alloc_page(gfp);
84 tage = kmalloc(sizeof(*tage), gfp);
91 atomic_inc(&cfs_tage_allocated);
95 static void cfs_tage_free(struct cfs_trace_page *tage)
97 __LASSERT(tage != NULL);
98 __LASSERT(tage->page != NULL);
100 __free_page(tage->page);
102 atomic_dec(&cfs_tage_allocated);
105 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
106 struct list_head *queue)
108 __LASSERT(tage != NULL);
109 __LASSERT(queue != NULL);
111 list_move_tail(&tage->linkage, queue);
114 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
115 struct list_head *stock)
120 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
121 * from here: this will lead to infinite recursion.
124 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
125 struct cfs_trace_page *tage;
127 tage = cfs_tage_alloc(gfp);
130 list_add_tail(&tage->linkage, stock);
135 /* return a page that has 'len' bytes left at the end */
136 static struct cfs_trace_page *
137 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
139 struct cfs_trace_page *tage;
141 if (tcd->tcd_cur_pages > 0) {
142 __LASSERT(!list_empty(&tcd->tcd_pages));
143 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
144 if (tage->used + len <= PAGE_SIZE)
148 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
149 if (tcd->tcd_cur_stock_pages > 0) {
150 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
151 --tcd->tcd_cur_stock_pages;
152 list_del_init(&tage->linkage);
154 tage = cfs_tage_alloc(GFP_ATOMIC);
155 if (unlikely(tage == NULL)) {
156 if ((!memory_pressure_get() ||
157 in_interrupt()) && printk_ratelimit())
159 "cannot allocate a tage (%ld)\n",
166 tage->cpu = smp_processor_id();
167 tage->type = tcd->tcd_type;
168 list_add_tail(&tage->linkage, &tcd->tcd_pages);
169 tcd->tcd_cur_pages++;
171 if (tcd->tcd_cur_pages > 8 && thread_running) {
172 struct tracefiled_ctl *tctl = &trace_tctl;
174 * wake up tracefiled to process some pages.
176 wake_up(&tctl->tctl_waitq);
183 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
185 int pgcount = tcd->tcd_cur_pages / 10;
186 struct page_collection pc;
187 struct cfs_trace_page *tage;
188 struct cfs_trace_page *tmp;
191 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
192 * from here: this will lead to infinite recursion.
195 if (printk_ratelimit())
196 printk(KERN_WARNING "debug daemon buffer overflowed; "
197 "discarding 10%% of pages (%d of %ld)\n",
198 pgcount + 1, tcd->tcd_cur_pages);
200 INIT_LIST_HEAD(&pc.pc_pages);
202 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
206 list_move_tail(&tage->linkage, &pc.pc_pages);
207 tcd->tcd_cur_pages--;
209 put_pages_on_tcd_daemon_list(&pc, tcd);
212 /* return a page that has 'len' bytes left at the end */
213 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
216 struct cfs_trace_page *tage;
219 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
220 * from here: this will lead to infinite recursion.
223 if (len > PAGE_SIZE) {
225 "cowardly refusing to write %lu bytes in a page\n", len);
229 tage = cfs_trace_get_tage_try(tcd, len);
234 if (tcd->tcd_cur_pages > 0) {
235 tage = cfs_tage_from_list(tcd->tcd_pages.next);
237 cfs_tage_to_tail(tage, &tcd->tcd_pages);
242 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
243 const char *format, ...)
248 va_start(args, format);
249 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
254 EXPORT_SYMBOL(libcfs_debug_msg);
256 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
257 const char *format1, va_list args,
258 const char *format2, ...)
260 struct cfs_trace_cpu_data *tcd = NULL;
261 struct ptldebug_header header = {0};
262 struct cfs_trace_page *tage;
263 /* string_buf is used only if tcd != NULL, and is always set then */
264 char *string_buf = NULL;
267 int needed = 85; /* average message length */
272 int mask = msgdata->msg_mask;
273 char *file = (char *)msgdata->msg_file;
274 struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
276 if (strchr(file, '/'))
277 file = strrchr(file, '/') + 1;
279 tcd = cfs_trace_get_tcd();
281 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
282 * pins us to a particular CPU. This avoids an smp_processor_id()
283 * warning on Linux when debugging is enabled. */
284 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
286 if (tcd == NULL) /* arch may not log in IRQ context */
289 if (tcd->tcd_cur_pages == 0)
290 header.ph_flags |= PH_FLAG_FIRST_RECORD;
292 if (tcd->tcd_shutting_down) {
293 cfs_trace_put_tcd(tcd);
298 known_size = strlen(file) + 1;
300 known_size += strlen(msgdata->msg_fn) + 1;
302 if (libcfs_debug_binary)
303 known_size += sizeof(header);
306 * '2' used because vsnprintf return real size required for output
307 * _without_ terminating NULL.
308 * if needed is to small for this format.
310 for (i = 0; i < 2; i++) {
311 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
313 if (needed + known_size > PAGE_SIZE)
316 cfs_trace_put_tcd(tcd);
321 string_buf = (char *)page_address(tage->page) +
322 tage->used + known_size;
324 max_nob = PAGE_SIZE - tage->used - known_size;
326 printk(KERN_EMERG "negative max_nob: %d\n",
329 cfs_trace_put_tcd(tcd);
337 needed = vsnprintf(string_buf, max_nob, format1, ap);
342 remain = max_nob - needed;
346 va_start(ap, format2);
347 needed += vsnprintf(string_buf + needed, remain,
352 if (needed < max_nob) /* well. printing ok.. */
356 if (*(string_buf+needed-1) != '\n')
357 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
358 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
360 header.ph_len = known_size + needed;
361 debug_buf = (char *)page_address(tage->page) + tage->used;
363 if (libcfs_debug_binary) {
364 memcpy(debug_buf, &header, sizeof(header));
365 tage->used += sizeof(header);
366 debug_buf += sizeof(header);
369 strcpy(debug_buf, file);
370 tage->used += strlen(file) + 1;
371 debug_buf += strlen(file) + 1;
373 if (msgdata->msg_fn) {
374 strcpy(debug_buf, msgdata->msg_fn);
375 tage->used += strlen(msgdata->msg_fn) + 1;
376 debug_buf += strlen(msgdata->msg_fn) + 1;
379 __LASSERT(debug_buf == string_buf);
381 tage->used += needed;
382 __LASSERT(tage->used <= PAGE_SIZE);
385 if ((mask & libcfs_printk) == 0) {
386 /* no console output requested */
388 cfs_trace_put_tcd(tcd);
393 if (libcfs_console_ratelimit &&
394 cdls->cdls_next != 0 && /* not first time ever */
395 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
396 /* skipping a console message */
399 cfs_trace_put_tcd(tcd);
403 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
404 libcfs_console_max_delay
405 + cfs_time_seconds(10))) {
406 /* last timeout was a long time ago */
407 cdls->cdls_delay /= libcfs_console_backoff * 4;
409 cdls->cdls_delay *= libcfs_console_backoff;
412 if (cdls->cdls_delay < libcfs_console_min_delay)
413 cdls->cdls_delay = libcfs_console_min_delay;
414 else if (cdls->cdls_delay > libcfs_console_max_delay)
415 cdls->cdls_delay = libcfs_console_max_delay;
417 /* ensure cdls_next is never zero after it's been seen */
418 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
422 cfs_print_to_console(&header, mask, string_buf, needed, file,
424 cfs_trace_put_tcd(tcd);
426 string_buf = cfs_trace_get_console_buffer();
429 if (format1 != NULL) {
431 needed = vsnprintf(string_buf,
432 CFS_TRACE_CONSOLE_BUFFER_SIZE,
436 if (format2 != NULL) {
437 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
439 va_start(ap, format2);
440 needed += vsnprintf(string_buf+needed, remain,
445 cfs_print_to_console(&header, mask,
446 string_buf, needed, file, msgdata->msg_fn);
451 if (cdls != NULL && cdls->cdls_count != 0) {
452 string_buf = cfs_trace_get_console_buffer();
454 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
455 "Skipped %d previous similar message%s\n",
457 (cdls->cdls_count > 1) ? "s" : "");
459 cfs_print_to_console(&header, mask,
460 string_buf, needed, file, msgdata->msg_fn);
463 cdls->cdls_count = 0;
468 EXPORT_SYMBOL(libcfs_debug_vmsg2);
471 cfs_trace_assertion_failed(const char *str,
472 struct libcfs_debug_msg_data *msgdata)
474 struct ptldebug_header hdr;
476 libcfs_panic_in_progress = 1;
477 libcfs_catastrophe = 1;
480 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
482 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
483 msgdata->msg_file, msgdata->msg_fn);
485 panic("Lustre debug assertion failure\n");
491 panic_collect_pages(struct page_collection *pc)
493 /* Do the collect_pages job on a single CPU: assumes that all other
494 * CPUs have been stopped during a panic. If this isn't true for some
495 * arch, this will have to be implemented separately in each arch. */
498 struct cfs_trace_cpu_data *tcd;
500 INIT_LIST_HEAD(&pc->pc_pages);
502 cfs_tcd_for_each(tcd, i, j) {
503 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
504 tcd->tcd_cur_pages = 0;
506 if (pc->pc_want_daemon_pages) {
507 list_splice_init(&tcd->tcd_daemon_pages,
509 tcd->tcd_cur_daemon_pages = 0;
514 static void collect_pages_on_all_cpus(struct page_collection *pc)
516 struct cfs_trace_cpu_data *tcd;
519 for_each_possible_cpu(cpu) {
520 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
521 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
522 tcd->tcd_cur_pages = 0;
523 if (pc->pc_want_daemon_pages) {
524 list_splice_init(&tcd->tcd_daemon_pages,
526 tcd->tcd_cur_daemon_pages = 0;
532 static void collect_pages(struct page_collection *pc)
534 INIT_LIST_HEAD(&pc->pc_pages);
536 if (libcfs_panic_in_progress)
537 panic_collect_pages(pc);
539 collect_pages_on_all_cpus(pc);
542 static void put_pages_back_on_all_cpus(struct page_collection *pc)
544 struct cfs_trace_cpu_data *tcd;
545 struct list_head *cur_head;
546 struct cfs_trace_page *tage;
547 struct cfs_trace_page *tmp;
550 for_each_possible_cpu(cpu) {
551 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
552 cur_head = tcd->tcd_pages.next;
554 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
557 __LASSERT_TAGE_INVARIANT(tage);
559 if (tage->cpu != cpu || tage->type != i)
562 cfs_tage_to_tail(tage, cur_head);
563 tcd->tcd_cur_pages++;
569 static void put_pages_back(struct page_collection *pc)
571 if (!libcfs_panic_in_progress)
572 put_pages_back_on_all_cpus(pc);
575 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
576 * we have a good amount of data at all times for dumping during an LBUG, even
577 * if we have been steadily writing (and otherwise discarding) pages via the
579 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
580 struct cfs_trace_cpu_data *tcd)
582 struct cfs_trace_page *tage;
583 struct cfs_trace_page *tmp;
585 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
586 __LASSERT_TAGE_INVARIANT(tage);
588 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
591 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
592 tcd->tcd_cur_daemon_pages++;
594 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
595 struct cfs_trace_page *victim;
597 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
598 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
600 __LASSERT_TAGE_INVARIANT(victim);
602 list_del(&victim->linkage);
603 cfs_tage_free(victim);
604 tcd->tcd_cur_daemon_pages--;
609 static void put_pages_on_daemon_list(struct page_collection *pc)
611 struct cfs_trace_cpu_data *tcd;
614 for_each_possible_cpu(cpu) {
615 cfs_tcd_for_each_type_lock(tcd, i, cpu)
616 put_pages_on_tcd_daemon_list(pc, tcd);
620 void cfs_trace_debug_print(void)
622 struct page_collection pc;
623 struct cfs_trace_page *tage;
624 struct cfs_trace_page *tmp;
626 pc.pc_want_daemon_pages = 1;
628 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
632 __LASSERT_TAGE_INVARIANT(tage);
635 p = page_address(page);
636 while (p < ((char *)page_address(page) + tage->used)) {
637 struct ptldebug_header *hdr;
642 p += strlen(file) + 1;
645 len = hdr->ph_len - (int)(p - (char *)hdr);
647 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
652 list_del(&tage->linkage);
657 int cfs_tracefile_dump_all_pages(char *filename)
659 struct page_collection pc;
661 struct cfs_trace_page *tage;
662 struct cfs_trace_page *tmp;
663 mm_segment_t __oldfs;
667 cfs_tracefile_write_lock();
669 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
673 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
678 pc.pc_want_daemon_pages = 1;
680 if (list_empty(&pc.pc_pages)) {
687 /* ok, for now, just write the pages. in the future we'll be building
688 * iobufs with the pages and calling generic_direct_IO */
689 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
691 __LASSERT_TAGE_INVARIANT(tage);
693 buf = kmap(tage->page);
694 rc = vfs_write(filp, (__force const char __user *)buf,
695 tage->used, &filp->f_pos);
697 if (rc != (int)tage->used) {
698 printk(KERN_WARNING "wanted to write %u but wrote "
699 "%d\n", tage->used, rc);
701 __LASSERT(list_empty(&pc.pc_pages));
704 list_del(&tage->linkage);
708 rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
710 printk(KERN_ERR "sync returns %d\n", rc);
712 filp_close(filp, NULL);
714 cfs_tracefile_write_unlock();
718 void cfs_trace_flush_pages(void)
720 struct page_collection pc;
721 struct cfs_trace_page *tage;
722 struct cfs_trace_page *tmp;
724 pc.pc_want_daemon_pages = 1;
726 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
728 __LASSERT_TAGE_INVARIANT(tage);
730 list_del(&tage->linkage);
735 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
736 const char __user *usr_buffer, int usr_buffer_nob)
740 if (usr_buffer_nob > knl_buffer_nob)
743 if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob))
746 nob = strnlen(knl_buffer, usr_buffer_nob);
747 while (nob-- >= 0) /* strip trailing whitespace */
748 if (!isspace(knl_buffer[nob]))
751 if (nob < 0) /* empty string */
754 if (nob == knl_buffer_nob) /* no space to terminate */
757 knl_buffer[nob + 1] = 0; /* terminate */
760 EXPORT_SYMBOL(cfs_trace_copyin_string);
762 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
763 const char *knl_buffer, char *append)
765 /* NB if 'append' != NULL, it's a single character to append to the
766 * copied out string - usually "\n", for /proc entries and "" (i.e. a
767 * terminating zero byte) for sysctl entries */
768 int nob = strlen(knl_buffer);
770 if (nob > usr_buffer_nob)
771 nob = usr_buffer_nob;
773 if (copy_to_user(usr_buffer, knl_buffer, nob))
776 if (append != NULL && nob < usr_buffer_nob) {
777 if (copy_to_user(usr_buffer + nob, append, 1))
785 EXPORT_SYMBOL(cfs_trace_copyout_string);
787 int cfs_trace_allocate_string_buffer(char **str, int nob)
789 if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
792 *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
799 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
804 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
808 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
809 usr_str, usr_str_nob);
817 rc = cfs_tracefile_dump_all_pages(str);
823 int cfs_trace_daemon_command(char *str)
827 cfs_tracefile_write_lock();
829 if (strcmp(str, "stop") == 0) {
830 cfs_tracefile_write_unlock();
831 cfs_trace_stop_thread();
832 cfs_tracefile_write_lock();
833 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
835 } else if (strncmp(str, "size=", 5) == 0) {
836 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
837 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
838 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
840 cfs_tracefile_size <<= 20;
842 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
844 } else if (str[0] != '/') {
847 strcpy(cfs_tracefile, str);
850 "Lustre: debug daemon will attempt to start writing "
851 "to %s (%lukB max)\n", cfs_tracefile,
852 (long)(cfs_tracefile_size >> 10));
854 cfs_trace_start_thread();
857 cfs_tracefile_write_unlock();
861 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
866 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
870 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
871 usr_str, usr_str_nob);
873 rc = cfs_trace_daemon_command(str);
879 int cfs_trace_set_debug_mb(int mb)
884 int limit = cfs_trace_max_debug_mb();
885 struct cfs_trace_cpu_data *tcd;
887 if (mb < num_possible_cpus()) {
889 "Lustre: %d MB is too small for debug buffer size, "
890 "setting it to %d MB.\n", mb, num_possible_cpus());
891 mb = num_possible_cpus();
896 "Lustre: %d MB is too large for debug buffer size, "
897 "setting it to %d MB.\n", mb, limit);
901 mb /= num_possible_cpus();
902 pages = mb << (20 - PAGE_SHIFT);
904 cfs_tracefile_write_lock();
906 cfs_tcd_for_each(tcd, i, j)
907 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
909 cfs_tracefile_write_unlock();
914 int cfs_trace_get_debug_mb(void)
918 struct cfs_trace_cpu_data *tcd;
921 cfs_tracefile_read_lock();
923 cfs_tcd_for_each(tcd, i, j)
924 total_pages += tcd->tcd_max_pages;
926 cfs_tracefile_read_unlock();
928 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
931 static int tracefiled(void *arg)
933 struct page_collection pc;
934 struct tracefiled_ctl *tctl = arg;
935 struct cfs_trace_page *tage;
936 struct cfs_trace_page *tmp;
937 mm_segment_t __oldfs;
943 /* we're started late enough that we pick up init's fs context */
944 /* this is so broken in uml? what on earth is going on? */
946 complete(&tctl->tctl_start);
951 pc.pc_want_daemon_pages = 0;
953 if (list_empty(&pc.pc_pages))
957 cfs_tracefile_read_lock();
958 if (cfs_tracefile[0] != 0) {
959 filp = filp_open(cfs_tracefile,
960 O_CREAT | O_RDWR | O_LARGEFILE,
965 printk(KERN_WARNING "couldn't open %s: "
966 "%d\n", cfs_tracefile, rc);
969 cfs_tracefile_read_unlock();
971 put_pages_on_daemon_list(&pc);
972 __LASSERT(list_empty(&pc.pc_pages));
978 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
979 struct dentry *de = file_dentry(filp);
982 __LASSERT_TAGE_INVARIANT(tage);
984 if (f_pos >= (off_t)cfs_tracefile_size)
986 else if (f_pos > i_size_read(de->d_inode))
987 f_pos = i_size_read(de->d_inode);
989 buf = kmap(tage->page);
990 rc = vfs_write(filp, (__force const char __user *)buf,
993 if (rc != (int)tage->used) {
994 printk(KERN_WARNING "wanted to write %u "
995 "but wrote %d\n", tage->used, rc);
997 __LASSERT(list_empty(&pc.pc_pages));
1003 filp_close(filp, NULL);
1004 put_pages_on_daemon_list(&pc);
1005 if (!list_empty(&pc.pc_pages)) {
1008 printk(KERN_ALERT "Lustre: trace pages aren't "
1010 printk(KERN_ERR "total cpus(%d): ",
1011 num_possible_cpus());
1012 for (i = 0; i < num_possible_cpus(); i++)
1014 printk(KERN_ERR "%d(on) ", i);
1016 printk(KERN_ERR "%d(off) ", i);
1017 printk(KERN_ERR "\n");
1020 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1022 printk(KERN_ERR "page %d belongs to cpu "
1023 "%d\n", ++i, tage->cpu);
1024 printk(KERN_ERR "There are %d pages unwritten\n",
1027 __LASSERT(list_empty(&pc.pc_pages));
1029 if (atomic_read(&tctl->tctl_shutdown)) {
1030 if (last_loop == 0) {
1037 init_waitqueue_entry(&__wait, current);
1038 add_wait_queue(&tctl->tctl_waitq, &__wait);
1039 set_current_state(TASK_INTERRUPTIBLE);
1040 schedule_timeout(cfs_time_seconds(1));
1041 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1043 complete(&tctl->tctl_stop);
1047 int cfs_trace_start_thread(void)
1049 struct tracefiled_ctl *tctl = &trace_tctl;
1052 mutex_lock(&cfs_trace_thread_mutex);
1056 init_completion(&tctl->tctl_start);
1057 init_completion(&tctl->tctl_stop);
1058 init_waitqueue_head(&tctl->tctl_waitq);
1059 atomic_set(&tctl->tctl_shutdown, 0);
1061 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1066 wait_for_completion(&tctl->tctl_start);
1069 mutex_unlock(&cfs_trace_thread_mutex);
1073 void cfs_trace_stop_thread(void)
1075 struct tracefiled_ctl *tctl = &trace_tctl;
1077 mutex_lock(&cfs_trace_thread_mutex);
1078 if (thread_running) {
1080 "Lustre: shutting down debug daemon thread...\n");
1081 atomic_set(&tctl->tctl_shutdown, 1);
1082 wait_for_completion(&tctl->tctl_stop);
1085 mutex_unlock(&cfs_trace_thread_mutex);
1088 int cfs_tracefile_init(int max_pages)
1090 struct cfs_trace_cpu_data *tcd;
1096 rc = cfs_tracefile_init_arch();
1100 cfs_tcd_for_each(tcd, i, j) {
1101 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1102 factor = tcd->tcd_pages_factor;
1103 INIT_LIST_HEAD(&tcd->tcd_pages);
1104 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1105 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1106 tcd->tcd_cur_pages = 0;
1107 tcd->tcd_cur_stock_pages = 0;
1108 tcd->tcd_cur_daemon_pages = 0;
1109 tcd->tcd_max_pages = (max_pages * factor) / 100;
1110 LASSERT(tcd->tcd_max_pages > 0);
1111 tcd->tcd_shutting_down = 0;
1116 static void trace_cleanup_on_all_cpus(void)
1118 struct cfs_trace_cpu_data *tcd;
1119 struct cfs_trace_page *tage;
1120 struct cfs_trace_page *tmp;
1123 for_each_possible_cpu(cpu) {
1124 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1125 tcd->tcd_shutting_down = 1;
1127 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1128 __LASSERT_TAGE_INVARIANT(tage);
1130 list_del(&tage->linkage);
1131 cfs_tage_free(tage);
1133 tcd->tcd_cur_pages = 0;
1138 static void cfs_trace_cleanup(void)
1140 struct page_collection pc;
1142 INIT_LIST_HEAD(&pc.pc_pages);
1144 trace_cleanup_on_all_cpus();
1146 cfs_tracefile_fini_arch();
1149 void cfs_tracefile_exit(void)
1151 cfs_trace_stop_thread();
1152 cfs_trace_cleanup();