4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/tracefile.c
38 * Author: Zach Brown <zab@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LNET
44 #define LUSTRE_TRACEFILE_PRIVATE
45 #include "tracefile.h"
47 #include <libcfs/libcfs.h>
49 /* XXX move things up to the top, comment */
50 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
52 char cfs_tracefile[TRACEFILE_NAME_SIZE];
53 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
54 static struct tracefiled_ctl trace_tctl;
55 struct mutex cfs_trace_thread_mutex;
56 static int thread_running = 0;
58 atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
60 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
61 struct cfs_trace_cpu_data *tcd);
63 static inline struct cfs_trace_page *
64 cfs_tage_from_list(struct list_head *list)
66 return list_entry(list, struct cfs_trace_page, linkage);
69 static struct cfs_trace_page *cfs_tage_alloc(int gfp)
72 struct cfs_trace_page *tage;
74 /* My caller is trying to free memory */
75 if (!in_interrupt() && memory_pressure_get())
79 * Don't spam console with allocation failures: they will be reported
80 * by upper layer anyway.
83 page = alloc_page(gfp);
87 tage = kmalloc(sizeof(*tage), gfp);
94 atomic_inc(&cfs_tage_allocated);
98 static void cfs_tage_free(struct cfs_trace_page *tage)
100 __LASSERT(tage != NULL);
101 __LASSERT(tage->page != NULL);
103 __free_page(tage->page);
105 atomic_dec(&cfs_tage_allocated);
108 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
109 struct list_head *queue)
111 __LASSERT(tage != NULL);
112 __LASSERT(queue != NULL);
114 list_move_tail(&tage->linkage, queue);
117 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
118 struct list_head *stock)
123 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
124 * from here: this will lead to infinite recursion.
127 for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
128 struct cfs_trace_page *tage;
130 tage = cfs_tage_alloc(gfp);
133 list_add_tail(&tage->linkage, stock);
138 /* return a page that has 'len' bytes left at the end */
139 static struct cfs_trace_page *
140 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
142 struct cfs_trace_page *tage;
144 if (tcd->tcd_cur_pages > 0) {
145 __LASSERT(!list_empty(&tcd->tcd_pages));
146 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
147 if (tage->used + len <= PAGE_CACHE_SIZE)
151 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
152 if (tcd->tcd_cur_stock_pages > 0) {
153 tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
154 --tcd->tcd_cur_stock_pages;
155 list_del_init(&tage->linkage);
157 tage = cfs_tage_alloc(GFP_ATOMIC);
158 if (unlikely(tage == NULL)) {
159 if ((!memory_pressure_get() ||
160 in_interrupt()) && printk_ratelimit())
162 "cannot allocate a tage (%ld)\n",
169 tage->cpu = smp_processor_id();
170 tage->type = tcd->tcd_type;
171 list_add_tail(&tage->linkage, &tcd->tcd_pages);
172 tcd->tcd_cur_pages++;
174 if (tcd->tcd_cur_pages > 8 && thread_running) {
175 struct tracefiled_ctl *tctl = &trace_tctl;
177 * wake up tracefiled to process some pages.
179 wake_up(&tctl->tctl_waitq);
186 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
188 int pgcount = tcd->tcd_cur_pages / 10;
189 struct page_collection pc;
190 struct cfs_trace_page *tage;
191 struct cfs_trace_page *tmp;
194 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
195 * from here: this will lead to infinite recursion.
198 if (printk_ratelimit())
199 printk(KERN_WARNING "debug daemon buffer overflowed; "
200 "discarding 10%% of pages (%d of %ld)\n",
201 pgcount + 1, tcd->tcd_cur_pages);
203 INIT_LIST_HEAD(&pc.pc_pages);
205 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
209 list_move_tail(&tage->linkage, &pc.pc_pages);
210 tcd->tcd_cur_pages--;
212 put_pages_on_tcd_daemon_list(&pc, tcd);
215 /* return a page that has 'len' bytes left at the end */
216 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
219 struct cfs_trace_page *tage;
222 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
223 * from here: this will lead to infinite recursion.
226 if (len > PAGE_CACHE_SIZE) {
228 "cowardly refusing to write %lu bytes in a page\n", len);
232 tage = cfs_trace_get_tage_try(tcd, len);
237 if (tcd->tcd_cur_pages > 0) {
238 tage = cfs_tage_from_list(tcd->tcd_pages.next);
240 cfs_tage_to_tail(tage, &tcd->tcd_pages);
245 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
246 const char *format, ...)
251 va_start(args, format);
252 rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
257 EXPORT_SYMBOL(libcfs_debug_msg);
259 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
260 const char *format1, va_list args,
261 const char *format2, ...)
263 struct cfs_trace_cpu_data *tcd = NULL;
264 struct ptldebug_header header = {0};
265 struct cfs_trace_page *tage;
266 /* string_buf is used only if tcd != NULL, and is always set then */
267 char *string_buf = NULL;
270 int needed = 85; /* average message length */
276 int mask = msgdata->msg_mask;
277 char *file = (char *)msgdata->msg_file;
278 cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
280 if (strchr(file, '/'))
281 file = strrchr(file, '/') + 1;
283 tcd = cfs_trace_get_tcd();
285 /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
286 * pins us to a particular CPU. This avoids an smp_processor_id()
287 * warning on Linux when debugging is enabled. */
288 cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
290 if (tcd == NULL) /* arch may not log in IRQ context */
293 if (tcd->tcd_cur_pages == 0)
294 header.ph_flags |= PH_FLAG_FIRST_RECORD;
296 if (tcd->tcd_shutting_down) {
297 cfs_trace_put_tcd(tcd);
302 depth = __current_nesting_level();
303 known_size = strlen(file) + 1 + depth;
305 known_size += strlen(msgdata->msg_fn) + 1;
307 if (libcfs_debug_binary)
308 known_size += sizeof(header);
311 * '2' used because vsnprintf return real size required for output
312 * _without_ terminating NULL.
313 * if needed is to small for this format.
315 for (i = 0; i < 2; i++) {
316 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
318 if (needed + known_size > PAGE_CACHE_SIZE)
321 cfs_trace_put_tcd(tcd);
326 string_buf = (char *)page_address(tage->page) +
327 tage->used + known_size;
329 max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
331 printk(KERN_EMERG "negative max_nob: %d\n",
334 cfs_trace_put_tcd(tcd);
342 needed = vsnprintf(string_buf, max_nob, format1, ap);
347 remain = max_nob - needed;
351 va_start(ap, format2);
352 needed += vsnprintf(string_buf + needed, remain,
357 if (needed < max_nob) /* well. printing ok.. */
361 if (*(string_buf+needed-1) != '\n')
362 printk(KERN_INFO "format at %s:%d:%s doesn't end in "
363 "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
365 header.ph_len = known_size + needed;
366 debug_buf = (char *)page_address(tage->page) + tage->used;
368 if (libcfs_debug_binary) {
369 memcpy(debug_buf, &header, sizeof(header));
370 tage->used += sizeof(header);
371 debug_buf += sizeof(header);
374 /* indent message according to the nesting level */
375 while (depth-- > 0) {
376 *(debug_buf++) = '.';
380 strcpy(debug_buf, file);
381 tage->used += strlen(file) + 1;
382 debug_buf += strlen(file) + 1;
384 if (msgdata->msg_fn) {
385 strcpy(debug_buf, msgdata->msg_fn);
386 tage->used += strlen(msgdata->msg_fn) + 1;
387 debug_buf += strlen(msgdata->msg_fn) + 1;
390 __LASSERT(debug_buf == string_buf);
392 tage->used += needed;
393 __LASSERT(tage->used <= PAGE_CACHE_SIZE);
396 if ((mask & libcfs_printk) == 0) {
397 /* no console output requested */
399 cfs_trace_put_tcd(tcd);
404 if (libcfs_console_ratelimit &&
405 cdls->cdls_next != 0 && /* not first time ever */
406 !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
407 /* skipping a console message */
410 cfs_trace_put_tcd(tcd);
414 if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
415 libcfs_console_max_delay
416 + cfs_time_seconds(10))) {
417 /* last timeout was a long time ago */
418 cdls->cdls_delay /= libcfs_console_backoff * 4;
420 cdls->cdls_delay *= libcfs_console_backoff;
423 if (cdls->cdls_delay < libcfs_console_min_delay)
424 cdls->cdls_delay = libcfs_console_min_delay;
425 else if (cdls->cdls_delay > libcfs_console_max_delay)
426 cdls->cdls_delay = libcfs_console_max_delay;
428 /* ensure cdls_next is never zero after it's been seen */
429 cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
433 cfs_print_to_console(&header, mask, string_buf, needed, file,
435 cfs_trace_put_tcd(tcd);
437 string_buf = cfs_trace_get_console_buffer();
440 if (format1 != NULL) {
442 needed = vsnprintf(string_buf,
443 CFS_TRACE_CONSOLE_BUFFER_SIZE,
447 if (format2 != NULL) {
448 remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
450 va_start(ap, format2);
451 needed += vsnprintf(string_buf+needed, remain,
456 cfs_print_to_console(&header, mask,
457 string_buf, needed, file, msgdata->msg_fn);
459 cfs_trace_put_console_buffer(string_buf);
462 if (cdls != NULL && cdls->cdls_count != 0) {
463 string_buf = cfs_trace_get_console_buffer();
465 needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
466 "Skipped %d previous similar message%s\n",
468 (cdls->cdls_count > 1) ? "s" : "");
470 cfs_print_to_console(&header, mask,
471 string_buf, needed, file, msgdata->msg_fn);
473 cfs_trace_put_console_buffer(string_buf);
474 cdls->cdls_count = 0;
479 EXPORT_SYMBOL(libcfs_debug_vmsg2);
482 cfs_trace_assertion_failed(const char *str,
483 struct libcfs_debug_msg_data *msgdata)
485 struct ptldebug_header hdr;
487 libcfs_panic_in_progress = 1;
488 libcfs_catastrophe = 1;
491 cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
493 cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
494 msgdata->msg_file, msgdata->msg_fn);
496 panic("Lustre debug assertion failure\n");
502 panic_collect_pages(struct page_collection *pc)
504 /* Do the collect_pages job on a single CPU: assumes that all other
505 * CPUs have been stopped during a panic. If this isn't true for some
506 * arch, this will have to be implemented separately in each arch. */
509 struct cfs_trace_cpu_data *tcd;
511 INIT_LIST_HEAD(&pc->pc_pages);
513 cfs_tcd_for_each(tcd, i, j) {
514 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
515 tcd->tcd_cur_pages = 0;
517 if (pc->pc_want_daemon_pages) {
518 list_splice_init(&tcd->tcd_daemon_pages,
520 tcd->tcd_cur_daemon_pages = 0;
525 static void collect_pages_on_all_cpus(struct page_collection *pc)
527 struct cfs_trace_cpu_data *tcd;
530 cfs_for_each_possible_cpu(cpu) {
531 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
532 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
533 tcd->tcd_cur_pages = 0;
534 if (pc->pc_want_daemon_pages) {
535 list_splice_init(&tcd->tcd_daemon_pages,
537 tcd->tcd_cur_daemon_pages = 0;
543 static void collect_pages(struct page_collection *pc)
545 INIT_LIST_HEAD(&pc->pc_pages);
547 if (libcfs_panic_in_progress)
548 panic_collect_pages(pc);
550 collect_pages_on_all_cpus(pc);
553 static void put_pages_back_on_all_cpus(struct page_collection *pc)
555 struct cfs_trace_cpu_data *tcd;
556 struct list_head *cur_head;
557 struct cfs_trace_page *tage;
558 struct cfs_trace_page *tmp;
561 cfs_for_each_possible_cpu(cpu) {
562 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
563 cur_head = tcd->tcd_pages.next;
565 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
568 __LASSERT_TAGE_INVARIANT(tage);
570 if (tage->cpu != cpu || tage->type != i)
573 cfs_tage_to_tail(tage, cur_head);
574 tcd->tcd_cur_pages++;
580 static void put_pages_back(struct page_collection *pc)
582 if (!libcfs_panic_in_progress)
583 put_pages_back_on_all_cpus(pc);
586 /* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
587 * we have a good amount of data at all times for dumping during an LBUG, even
588 * if we have been steadily writing (and otherwise discarding) pages via the
590 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
591 struct cfs_trace_cpu_data *tcd)
593 struct cfs_trace_page *tage;
594 struct cfs_trace_page *tmp;
596 list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
597 __LASSERT_TAGE_INVARIANT(tage);
599 if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
602 cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
603 tcd->tcd_cur_daemon_pages++;
605 if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
606 struct cfs_trace_page *victim;
608 __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
609 victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
611 __LASSERT_TAGE_INVARIANT(victim);
613 list_del(&victim->linkage);
614 cfs_tage_free(victim);
615 tcd->tcd_cur_daemon_pages--;
620 static void put_pages_on_daemon_list(struct page_collection *pc)
622 struct cfs_trace_cpu_data *tcd;
625 cfs_for_each_possible_cpu(cpu) {
626 cfs_tcd_for_each_type_lock(tcd, i, cpu)
627 put_pages_on_tcd_daemon_list(pc, tcd);
631 void cfs_trace_debug_print(void)
633 struct page_collection pc;
634 struct cfs_trace_page *tage;
635 struct cfs_trace_page *tmp;
637 pc.pc_want_daemon_pages = 1;
639 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
643 __LASSERT_TAGE_INVARIANT(tage);
646 p = page_address(page);
647 while (p < ((char *)page_address(page) + tage->used)) {
648 struct ptldebug_header *hdr;
653 p += strlen(file) + 1;
656 len = hdr->ph_len - (int)(p - (char *)hdr);
658 cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
663 list_del(&tage->linkage);
668 int cfs_tracefile_dump_all_pages(char *filename)
670 struct page_collection pc;
672 struct cfs_trace_page *tage;
673 struct cfs_trace_page *tmp;
678 cfs_tracefile_write_lock();
680 filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
684 printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
689 pc.pc_want_daemon_pages = 1;
691 if (list_empty(&pc.pc_pages)) {
696 /* ok, for now, just write the pages. in the future we'll be building
697 * iobufs with the pages and calling generic_direct_IO */
699 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
701 __LASSERT_TAGE_INVARIANT(tage);
703 rc = filp_write(filp, page_address(tage->page),
704 tage->used, filp_poff(filp));
705 if (rc != (int)tage->used) {
706 printk(KERN_WARNING "wanted to write %u but wrote "
707 "%d\n", tage->used, rc);
709 __LASSERT(list_empty(&pc.pc_pages));
712 list_del(&tage->linkage);
716 rc = filp_fsync(filp, 0, LLONG_MAX);
718 printk(KERN_ERR "sync returns %d\n", rc);
720 filp_close(filp, NULL);
722 cfs_tracefile_write_unlock();
726 void cfs_trace_flush_pages(void)
728 struct page_collection pc;
729 struct cfs_trace_page *tage;
730 struct cfs_trace_page *tmp;
732 pc.pc_want_daemon_pages = 1;
734 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
736 __LASSERT_TAGE_INVARIANT(tage);
738 list_del(&tage->linkage);
743 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
744 const char *usr_buffer, int usr_buffer_nob)
748 if (usr_buffer_nob > knl_buffer_nob)
751 if (copy_from_user((void *)knl_buffer,
752 (void *)usr_buffer, usr_buffer_nob))
755 nob = strnlen(knl_buffer, usr_buffer_nob);
756 while (nob-- >= 0) /* strip trailing whitespace */
757 if (!isspace(knl_buffer[nob]))
760 if (nob < 0) /* empty string */
763 if (nob == knl_buffer_nob) /* no space to terminate */
766 knl_buffer[nob + 1] = 0; /* terminate */
769 EXPORT_SYMBOL(cfs_trace_copyin_string);
771 int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
772 const char *knl_buffer, char *append)
774 /* NB if 'append' != NULL, it's a single character to append to the
775 * copied out string - usually "\n", for /proc entries and "" (i.e. a
776 * terminating zero byte) for sysctl entries */
777 int nob = strlen(knl_buffer);
779 if (nob > usr_buffer_nob)
780 nob = usr_buffer_nob;
782 if (copy_to_user(usr_buffer, knl_buffer, nob))
785 if (append != NULL && nob < usr_buffer_nob) {
786 if (copy_to_user(usr_buffer + nob, append, 1))
794 EXPORT_SYMBOL(cfs_trace_copyout_string);
796 int cfs_trace_allocate_string_buffer(char **str, int nob)
798 if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
801 *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
808 void cfs_trace_free_string_buffer(char *str, int nob)
813 int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
818 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
822 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
823 usr_str, usr_str_nob);
827 #if !defined(__WINNT__)
833 rc = cfs_tracefile_dump_all_pages(str);
835 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
839 int cfs_trace_daemon_command(char *str)
843 cfs_tracefile_write_lock();
845 if (strcmp(str, "stop") == 0) {
846 cfs_tracefile_write_unlock();
847 cfs_trace_stop_thread();
848 cfs_tracefile_write_lock();
849 memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
851 } else if (strncmp(str, "size=", 5) == 0) {
852 cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
853 if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
854 cfs_tracefile_size = CFS_TRACEFILE_SIZE;
856 cfs_tracefile_size <<= 20;
858 } else if (strlen(str) >= sizeof(cfs_tracefile)) {
861 } else if (str[0] != '/') {
865 strcpy(cfs_tracefile, str);
868 "Lustre: debug daemon will attempt to start writing "
869 "to %s (%lukB max)\n", cfs_tracefile,
870 (long)(cfs_tracefile_size >> 10));
872 cfs_trace_start_thread();
875 cfs_tracefile_write_unlock();
879 int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
884 rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
888 rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
889 usr_str, usr_str_nob);
891 rc = cfs_trace_daemon_command(str);
893 cfs_trace_free_string_buffer(str, usr_str_nob + 1);
897 int cfs_trace_set_debug_mb(int mb)
902 int limit = cfs_trace_max_debug_mb();
903 struct cfs_trace_cpu_data *tcd;
905 if (mb < num_possible_cpus()) {
907 "Lustre: %d MB is too small for debug buffer size, "
908 "setting it to %d MB.\n", mb, num_possible_cpus());
909 mb = num_possible_cpus();
914 "Lustre: %d MB is too large for debug buffer size, "
915 "setting it to %d MB.\n", mb, limit);
919 mb /= num_possible_cpus();
920 pages = mb << (20 - PAGE_CACHE_SHIFT);
922 cfs_tracefile_write_lock();
924 cfs_tcd_for_each(tcd, i, j)
925 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
927 cfs_tracefile_write_unlock();
932 int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
937 rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
941 return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
944 int cfs_trace_get_debug_mb(void)
948 struct cfs_trace_cpu_data *tcd;
951 cfs_tracefile_read_lock();
953 cfs_tcd_for_each(tcd, i, j)
954 total_pages += tcd->tcd_max_pages;
956 cfs_tracefile_read_unlock();
958 return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
961 static int tracefiled(void *arg)
963 struct page_collection pc;
964 struct tracefiled_ctl *tctl = arg;
965 struct cfs_trace_page *tage;
966 struct cfs_trace_page *tmp;
973 /* we're started late enough that we pick up init's fs context */
974 /* this is so broken in uml? what on earth is going on? */
976 complete(&tctl->tctl_start);
981 pc.pc_want_daemon_pages = 0;
983 if (list_empty(&pc.pc_pages))
987 cfs_tracefile_read_lock();
988 if (cfs_tracefile[0] != 0) {
989 filp = filp_open(cfs_tracefile,
990 O_CREAT | O_RDWR | O_LARGEFILE,
995 printk(KERN_WARNING "couldn't open %s: "
996 "%d\n", cfs_tracefile, rc);
999 cfs_tracefile_read_unlock();
1001 put_pages_on_daemon_list(&pc);
1002 __LASSERT(list_empty(&pc.pc_pages));
1008 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
1009 static loff_t f_pos;
1011 __LASSERT_TAGE_INVARIANT(tage);
1013 if (f_pos >= (off_t)cfs_tracefile_size)
1015 else if (f_pos > (off_t)filp_size(filp))
1016 f_pos = filp_size(filp);
1018 rc = filp_write(filp, page_address(tage->page),
1019 tage->used, &f_pos);
1020 if (rc != (int)tage->used) {
1021 printk(KERN_WARNING "wanted to write %u "
1022 "but wrote %d\n", tage->used, rc);
1023 put_pages_back(&pc);
1024 __LASSERT(list_empty(&pc.pc_pages));
1030 filp_close(filp, NULL);
1031 put_pages_on_daemon_list(&pc);
1032 if (!list_empty(&pc.pc_pages)) {
1035 printk(KERN_ALERT "Lustre: trace pages aren't "
1037 printk(KERN_ERR "total cpus(%d): ",
1038 num_possible_cpus());
1039 for (i = 0; i < num_possible_cpus(); i++)
1041 printk(KERN_ERR "%d(on) ", i);
1043 printk(KERN_ERR "%d(off) ", i);
1044 printk(KERN_ERR "\n");
1047 list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1049 printk(KERN_ERR "page %d belongs to cpu "
1050 "%d\n", ++i, tage->cpu);
1051 printk(KERN_ERR "There are %d pages unwritten\n",
1054 __LASSERT(list_empty(&pc.pc_pages));
1056 if (atomic_read(&tctl->tctl_shutdown)) {
1057 if (last_loop == 0) {
1064 init_waitqueue_entry_current(&__wait);
1065 add_wait_queue(&tctl->tctl_waitq, &__wait);
1066 set_current_state(TASK_INTERRUPTIBLE);
1067 waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
1068 cfs_time_seconds(1));
1069 remove_wait_queue(&tctl->tctl_waitq, &__wait);
1071 complete(&tctl->tctl_stop);
1075 int cfs_trace_start_thread(void)
1077 struct tracefiled_ctl *tctl = &trace_tctl;
1080 mutex_lock(&cfs_trace_thread_mutex);
1084 init_completion(&tctl->tctl_start);
1085 init_completion(&tctl->tctl_stop);
1086 init_waitqueue_head(&tctl->tctl_waitq);
1087 atomic_set(&tctl->tctl_shutdown, 0);
1089 if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1094 wait_for_completion(&tctl->tctl_start);
1097 mutex_unlock(&cfs_trace_thread_mutex);
1101 void cfs_trace_stop_thread(void)
1103 struct tracefiled_ctl *tctl = &trace_tctl;
1105 mutex_lock(&cfs_trace_thread_mutex);
1106 if (thread_running) {
1108 "Lustre: shutting down debug daemon thread...\n");
1109 atomic_set(&tctl->tctl_shutdown, 1);
1110 wait_for_completion(&tctl->tctl_stop);
1113 mutex_unlock(&cfs_trace_thread_mutex);
1116 int cfs_tracefile_init(int max_pages)
1118 struct cfs_trace_cpu_data *tcd;
1124 rc = cfs_tracefile_init_arch();
1128 cfs_tcd_for_each(tcd, i, j) {
1129 /* tcd_pages_factor is initialized int tracefile_init_arch. */
1130 factor = tcd->tcd_pages_factor;
1131 INIT_LIST_HEAD(&tcd->tcd_pages);
1132 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1133 INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1134 tcd->tcd_cur_pages = 0;
1135 tcd->tcd_cur_stock_pages = 0;
1136 tcd->tcd_cur_daemon_pages = 0;
1137 tcd->tcd_max_pages = (max_pages * factor) / 100;
1138 LASSERT(tcd->tcd_max_pages > 0);
1139 tcd->tcd_shutting_down = 0;
1144 static void trace_cleanup_on_all_cpus(void)
1146 struct cfs_trace_cpu_data *tcd;
1147 struct cfs_trace_page *tage;
1148 struct cfs_trace_page *tmp;
1151 cfs_for_each_possible_cpu(cpu) {
1152 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1153 tcd->tcd_shutting_down = 1;
1155 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
1156 __LASSERT_TAGE_INVARIANT(tage);
1158 list_del(&tage->linkage);
1159 cfs_tage_free(tage);
1161 tcd->tcd_cur_pages = 0;
1166 static void cfs_trace_cleanup(void)
1168 struct page_collection pc;
1170 INIT_LIST_HEAD(&pc.pc_pages);
1172 trace_cleanup_on_all_cpus();
1174 cfs_tracefile_fini_arch();
1177 void cfs_tracefile_exit(void)
1179 cfs_trace_stop_thread();
1180 cfs_trace_cleanup();