X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Ftracefile.c;h=ac473c5eae651f2d2bf7de424e5a1f3941cba88a;hb=f9fe2977d184fbc8e345c4a77f4596d059b7d118;hp=ff61359686da6fcc6842e4d07f60351ff2a55b5f;hpb=56c9bf4f2349b4bf96f3f8ba71b030bcec0edd83;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index ff61359..ac473c5 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,21 +15,18 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * libcfs/libcfs/tracefile.c * @@ -39,525 +34,621 @@ * Author: Phil Schwan */ - #define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE #include "tracefile.h" +#include +#include +#include +#include +#include +#include +#include #include -/* XXX move things up to the top, comment */ -union trace_data_union (*trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; -char tracefile[TRACEFILE_NAME_SIZE]; -long long tracefile_size = TRACEFILE_SIZE; -static struct tracefiled_ctl trace_tctl; -struct semaphore trace_thread_sem; -static int thread_running = 0; +enum cfs_trace_buf_type { + CFS_TCD_TYPE_PROC = 0, + CFS_TCD_TYPE_SOFTIRQ, + CFS_TCD_TYPE_IRQ, + CFS_TCD_TYPE_CNT +}; -atomic_t tage_allocated = ATOMIC_INIT(0); +union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned; -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct trace_cpu_data *tcd); +/* Pages containing records already processed by daemon. + * Link via ->lru, use size in ->private + */ +static LIST_HEAD(daemon_pages); +static long daemon_pages_count; +static long daemon_pages_max; + +char cfs_tracefile[TRACEFILE_NAME_SIZE]; +long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; + +struct task_struct *tctl_task; -static inline struct trace_page *tage_from_list(struct list_head *list) +static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); +static DECLARE_RWSEM(cfs_tracefile_sem); + +/* trace file lock routines */ +/* The walking argument indicates the locking comes from all tcd types + * iterator and we must lock it and dissable local irqs to avoid deadlocks + * with other interrupt locks that might be happening. See LU-1311 + * for details. + */ +int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __acquires(&tcd->tcd_lock) { - return list_entry(list, struct trace_page, linkage); + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_lock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_lock_irq(&tcd->tcd_lock); + else + spin_lock(&tcd->tcd_lock); + return 1; } -static struct trace_page *tage_alloc(int gfp) +void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __releases(&tcd->tcd_lock) { - cfs_page_t *page; - struct trace_page *tage; - - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= CFS_ALLOC_NOWARN; - page = cfs_alloc_page(gfp); - if (page == NULL) - return NULL; - - tage = cfs_alloc(sizeof(*tage), gfp); - if (tage == NULL) { - cfs_free_page(page); - return NULL; - } - - tage->page = page; - atomic_inc(&tage_allocated); - return tage; + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_unlock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_unlock_irq(&tcd->tcd_lock); + else + spin_unlock(&tcd->tcd_lock); } -static void tage_free(struct trace_page *tage) -{ - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); +#define cfs_tcd_for_each(tcd, i, j) \ + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) \ + for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ + j < num_possible_cpus(); \ + j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) - cfs_free_page(tage->page); - cfs_free(tage); - atomic_dec(&tage_allocated); -} +#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] && \ + (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ + cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) -static void tage_to_tail(struct trace_page *tage, struct list_head *queue) +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void) { - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); - - list_move_tail(&tage->linkage, queue); + if (in_irq()) + return CFS_TCD_TYPE_IRQ; + if (in_softirq()) + return CFS_TCD_TYPE_SOFTIRQ; + return CFS_TCD_TYPE_PROC; } -int trace_refill_stock(struct trace_cpu_data *tcd, int gfp, - struct list_head *stock) +static inline struct cfs_trace_cpu_data * +cfs_trace_get_tcd(void) { - int i; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ + struct cfs_trace_cpu_data *tcd = + &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) { - struct trace_page *tage; + cfs_trace_lock_tcd(tcd, 0); - tage = tage_alloc(gfp); - if (tage == NULL) - break; - list_add_tail(&tage->linkage, stock); - } - return i; + return tcd; } -/* return a page that has 'len' bytes left at the end */ -static struct trace_page *trace_get_tage_try(struct trace_cpu_data *tcd, - unsigned long len) +static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) { - struct trace_page *tage; + cfs_trace_unlock_tcd(tcd, 0); - if (tcd->tcd_cur_pages > 0) { - __LASSERT(!list_empty(&tcd->tcd_pages)); - tage = tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= CFS_PAGE_SIZE) - return tage; - } - - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { - if (tcd->tcd_cur_stock_pages > 0) { - tage = tage_from_list(tcd->tcd_stock_pages.prev); - -- tcd->tcd_cur_stock_pages; - list_del_init(&tage->linkage); - } else { - tage = tage_alloc(CFS_ALLOC_ATOMIC); - if (tage == NULL) { - if (printk_ratelimit()) - printk(KERN_WARNING - "cannot allocate a tage (%ld)\n", - tcd->tcd_cur_pages); - return NULL; - } - } - - tage->used = 0; - tage->cpu = smp_processor_id(); - tage->type = tcd->tcd_type; - list_add_tail(&tage->linkage, &tcd->tcd_pages); - tcd->tcd_cur_pages++; - - if (tcd->tcd_cur_pages > 8 && thread_running) { - struct tracefiled_ctl *tctl = &trace_tctl; - /* - * wake up tracefiled to process some pages. - */ - cfs_waitq_signal(&tctl->tctl_waitq); - } - return tage; - } - return NULL; + put_cpu(); } -static void tcd_shrink(struct trace_cpu_data *tcd) +static inline struct cfs_trace_page * +cfs_tage_from_list(struct list_head *list) { - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct trace_page *tage; - struct trace_page *tmp; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (printk_ratelimit()) - printk(KERN_WARNING "debug daemon buffer overflowed; " - "discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); - - CFS_INIT_LIST_HEAD(&pc.pc_pages); - spin_lock_init(&pc.pc_lock); - - cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages, - struct trace_page, linkage) { - if (pgcount-- == 0) - break; - - list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); + return list_entry(list, struct cfs_trace_page, linkage); } -/* return a page that has 'len' bytes left at the end */ -static struct trace_page *trace_get_tage(struct trace_cpu_data *tcd, - unsigned long len) +static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) { - struct trace_page *tage; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (len > CFS_PAGE_SIZE) { - printk(KERN_ERR - "cowardly refusing to write %lu bytes in a page\n", len); - return NULL; - } - - tage = trace_get_tage_try(tcd, len); - if (tage != NULL) - return tage; - if (thread_running) - tcd_shrink(tcd); - if (tcd->tcd_cur_pages > 0) { - tage = tage_from_list(tcd->tcd_pages.next); - tage->used = 0; - tage_to_tail(tage, &tcd->tcd_pages); - } - return tage; + struct page *page; + struct cfs_trace_page *tage; + + /* My caller is trying to free memory */ + if (!in_interrupt() && (current->flags & PF_MEMALLOC)) + return NULL; + + /* + * Don't spam console with allocation failures: they will be reported + * by upper layer anyway. + */ + gfp |= __GFP_NOWARN; + page = alloc_page(gfp); + if (page == NULL) + return NULL; + + tage = kmalloc(sizeof(*tage), gfp); + if (tage == NULL) { + __free_page(page); + return NULL; + } + + tage->page = page; + atomic_inc(&cfs_tage_allocated); + return tage; } -int libcfs_debug_vmsg2(cfs_debug_limit_state_t *cdls, int subsys, int mask, - const char *file, const char *fn, const int line, - const char *format1, va_list args, - const char *format2, ...) +static void cfs_tage_free(struct cfs_trace_page *tage) { - struct trace_cpu_data *tcd = NULL; - struct ptldebug_header header; - struct trace_page *tage; - /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - - if (strchr(file, '/')) - file = strrchr(file, '/') + 1; - - - set_ptldebug_header(&header, subsys, mask, line, CDEBUG_STACK()); - - tcd = trace_get_tcd(); - if (tcd == NULL) /* arch may not log in IRQ context */ - goto console; - - if (tcd->tcd_shutting_down) { - trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - depth = __current_nesting_level(); - known_size = strlen(file) + 1 + depth; - if (fn) - known_size += strlen(fn) + 1; - - if (libcfs_debug_binary) - known_size += sizeof(header); - - /*/ - * '2' used because vsnprintf return real size required for output - * _without_ terminating NULL. - * if needed is to small for this format. - */ - for (i = 0; i < 2; i++) { - tage = trace_get_tage(tcd, needed + known_size + 1); - if (tage == NULL) { - if (needed + known_size > CFS_PAGE_SIZE) - mask |= D_ERROR; - - trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - string_buf = (char *)cfs_page_address(tage->page) + - tage->used + known_size; - - max_nob = CFS_PAGE_SIZE - tage->used - known_size; - if (max_nob <= 0) { - printk(KERN_EMERG "negative max_nob: %i\n", max_nob); - mask |= D_ERROR; - trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, max_nob, format1, ap); - va_end(ap); - } - - if (format2) { - remain = max_nob - needed; - if (remain < 0) - remain = 0; - - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - - if (needed < max_nob) /* well. printing ok.. */ - break; - } - - if (*(string_buf+needed-1) != '\n') - printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", - file, line, fn); - - header.ph_len = known_size + needed; - debug_buf = (char *)cfs_page_address(tage->page) + tage->used; - - if (libcfs_debug_binary) { - memcpy(debug_buf, &header, sizeof(header)); - tage->used += sizeof(header); - debug_buf += sizeof(header); - } - - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++ tage->used; - } - - strcpy(debug_buf, file); - tage->used += strlen(file) + 1; - debug_buf += strlen(file) + 1; + __LASSERT(tage != NULL); + __LASSERT(tage->page != NULL); - if (fn) { - strcpy(debug_buf, fn); - tage->used += strlen(fn) + 1; - debug_buf += strlen(fn) + 1; - } - - __LASSERT(debug_buf == string_buf); + __free_page(tage->page); + kfree(tage); + atomic_dec(&cfs_tage_allocated); +} - tage->used += needed; - __LASSERT (tage->used <= CFS_PAGE_SIZE); +static void cfs_tage_to_tail(struct cfs_trace_page *tage, + struct list_head *queue) +{ + __LASSERT(tage != NULL); + __LASSERT(queue != NULL); -console: - if ((mask & libcfs_printk) == 0) { - /* no console output requested */ - if (tcd != NULL) - trace_put_tcd(tcd); - return 1; - } + list_move_tail(&tage->linkage, queue); +} - if (cdls != NULL) { - if (libcfs_console_ratelimit && - cdls->cdls_next != 0 && /* not first time ever */ - !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { - /* skipping a console message */ - cdls->cdls_count++; - if (tcd != NULL) - trace_put_tcd(tcd); - return 1; - } - - if (cfs_time_after(cfs_time_current(), cdls->cdls_next + - libcfs_console_max_delay - + cfs_time_seconds(10))) { - /* last timeout was a long time ago */ - cdls->cdls_delay /= libcfs_console_backoff * 4; - } else { - cdls->cdls_delay *= libcfs_console_backoff; - - if (cdls->cdls_delay < libcfs_console_min_delay) - cdls->cdls_delay = libcfs_console_min_delay; - else if (cdls->cdls_delay > libcfs_console_max_delay) - cdls->cdls_delay = libcfs_console_max_delay; - } - - /* ensure cdls_next is never zero after it's been seen */ - cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; - } +/* return a page that has 'len' bytes left at the end */ +static struct cfs_trace_page * +cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) +{ + struct cfs_trace_page *tage; + struct task_struct *tsk; + + if (tcd->tcd_cur_pages > 0) { + __LASSERT(!list_empty(&tcd->tcd_pages)); + tage = cfs_tage_from_list(tcd->tcd_pages.prev); + if (tage->used + len <= PAGE_SIZE) + return tage; + } + + if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { + if (tcd->tcd_cur_stock_pages > 0) { + tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); + --tcd->tcd_cur_stock_pages; + list_del_init(&tage->linkage); + } else { + tage = cfs_tage_alloc(GFP_ATOMIC); + if (unlikely(tage == NULL)) { + if ((!(current->flags & PF_MEMALLOC) || + in_interrupt()) && printk_ratelimit()) + pr_warn("Lustre: cannot allocate a tage (%ld)\n", + tcd->tcd_cur_pages); + return NULL; + } + } + + tage->used = 0; + tage->cpu = smp_processor_id(); + tage->type = tcd->tcd_type; + list_add_tail(&tage->linkage, &tcd->tcd_pages); + tcd->tcd_cur_pages++; + + tsk = tctl_task; + if (tcd->tcd_cur_pages > 8 && tsk) + /* + * wake up tracefiled to process some pages. + */ + wake_up_process(tsk); + + return tage; + } + return NULL; +} - if (tcd != NULL) { - print_to_console(&header, mask, string_buf, needed, file, fn); - trace_put_tcd(tcd); - } else { - string_buf = trace_get_console_buffer(); - - needed = 0; - if (format1 != NULL) { - va_copy(ap, args); - needed = vsnprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, format1, ap); - va_end(ap); - } - if (format2 != NULL) { - remain = TRACE_CONSOLE_BUFFER_SIZE - needed; - if (remain > 0) { - va_start(ap, format2); - needed += vsnprintf(string_buf+needed, remain, format2, ap); - va_end(ap); - } - } - print_to_console(&header, mask, - string_buf, needed, file, fn); - - trace_put_console_buffer(string_buf); - } +static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) +{ + int pgcount = tcd->tcd_cur_pages / 10; + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + + if (printk_ratelimit()) + pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", + pgcount + 1, tcd->tcd_cur_pages); + + INIT_LIST_HEAD(&pc.pc_pages); + + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { + if (pgcount-- == 0) + break; + + list_del(&tage->linkage); + cfs_tage_free(tage); + tcd->tcd_cur_pages--; + } +} - if (cdls != NULL && cdls->cdls_count != 0) { - string_buf = trace_get_console_buffer(); +/* return a page that has 'len' bytes left at the end */ +static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, + unsigned long len) +{ + struct cfs_trace_page *tage; + + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + + if (len > PAGE_SIZE) { + pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n", + len); + return NULL; + } + + tage = cfs_trace_get_tage_try(tcd, len); + if (tage != NULL) + return tage; + if (tctl_task) + cfs_tcd_shrink(tcd); + if (tcd->tcd_cur_pages > 0) { + tage = cfs_tage_from_list(tcd->tcd_pages.next); + tage->used = 0; + cfs_tage_to_tail(tage, &tcd->tcd_pages); + } + return tage; +} - needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE, - "Skipped %d previous similar message%s\n", - cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : ""); +static void cfs_set_ptldebug_header(struct ptldebug_header *header, + struct libcfs_debug_msg_data *msgdata, + unsigned long stack) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + + header->ph_subsys = msgdata->msg_subsys; + header->ph_mask = msgdata->msg_mask; + header->ph_cpu_id = smp_processor_id(); + header->ph_type = cfs_trace_buf_idx_get(); + /* y2038 safe since all user space treats this as unsigned, but + * will overflow in 2106 + */ + header->ph_sec = (u32)ts.tv_sec; + header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; + header->ph_stack = stack; + header->ph_pid = current->pid; + header->ph_line_num = msgdata->msg_line; + header->ph_extern_pid = 0; +} - print_to_console(&header, mask, - string_buf, needed, file, fn); +static void cfs_vprint_to_console(struct ptldebug_header *hdr, int mask, + struct va_format *vaf, const char *file, + const char *fn) +{ + char *prefix = "Lustre"; + + if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET) + prefix = "LNet"; + + if (mask & D_CONSOLE) { + if (mask & D_EMERG) + pr_emerg("%sError: %pV", prefix, vaf); + else if (mask & D_ERROR) + pr_err("%sError: %pV", prefix, vaf); + else if (mask & D_WARNING) + pr_warn("%s: %pV", prefix, vaf); + else if (mask & libcfs_printk) + pr_info("%s: %pV", prefix, vaf); + } else { + if (mask & D_EMERG) + pr_emerg("%sError: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & D_ERROR) + pr_err("%sError: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & D_WARNING) + pr_warn("%s: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & (D_CONSOLE | libcfs_printk)) + pr_info("%s: %pV", prefix, vaf); + } +} - trace_put_console_buffer(string_buf); - cdls->cdls_count = 0; - } +static void cfs_print_to_console(struct ptldebug_header *hdr, int mask, + const char *file, const char *fn, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; - return 0; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + cfs_vprint_to_console(hdr, mask, &vaf, file, fn); } -EXPORT_SYMBOL(libcfs_debug_vmsg2); -void -libcfs_assertion_failed(const char *expr, const char *file, - const char *func, const int line) +int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, + const char *format, ...) { - libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line, - "ASSERTION(%s) failed\n", expr); - /* cfs_enter_debugger(); */ - lbug_with_loc(file, func, line); + struct cfs_trace_cpu_data *tcd = NULL; + struct ptldebug_header header = {0}; + struct cfs_trace_page *tage; + /* string_buf is used only if tcd != NULL, and is always set then */ + char *string_buf = NULL; + char *debug_buf; + int known_size; + int needed = 85; /* seeded with average message length */ + int max_nob; + va_list ap; + int retry; + int mask = msgdata->msg_mask; + char *file = (char *)msgdata->msg_file; + struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; + + if (strchr(file, '/')) + file = strrchr(file, '/') + 1; + + tcd = cfs_trace_get_tcd(); + + /* cfs_trace_get_tcd() grabs a lock, which disables preemption and + * pins us to a particular CPU. This avoids an smp_processor_id() + * warning on Linux when debugging is enabled. + */ + cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); + + if (!tcd) /* arch may not log in IRQ context */ + goto console; + + if (tcd->tcd_cur_pages == 0) + header.ph_flags |= PH_FLAG_FIRST_RECORD; + + if (tcd->tcd_shutting_down) { + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + known_size = strlen(file) + 1; + if (msgdata->msg_fn) + known_size += strlen(msgdata->msg_fn) + 1; + + if (libcfs_debug_binary) + known_size += sizeof(header); + + /* + * May perform an additional pass to update 'needed' and increase + * tage buffer size to match vsnprintf reported size required + * On the second pass (retry=1) use vscnprintf [which returns + * number of bytes written not including the terminating nul] + * to clarify `needed` is used as number of bytes written + * for the remainder of this function + */ + for (retry = 0; retry < 2; retry++) { + tage = cfs_trace_get_tage(tcd, needed + known_size + 1); + if (!tage) { + if (needed + known_size > PAGE_SIZE) + mask |= D_ERROR; + + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + string_buf = (char *)page_address(tage->page) + + tage->used + known_size; + + max_nob = PAGE_SIZE - tage->used - known_size; + if (max_nob <= 0) { + pr_emerg("LustreError: negative max_nob: %d\n", + max_nob); + mask |= D_ERROR; + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + va_start(ap, format); + if (retry) + needed = vscnprintf(string_buf, max_nob, format, ap); + else + needed = vsnprintf(string_buf, max_nob, format, ap); + va_end(ap); + + if (needed < max_nob) /* well. printing ok.. */ + break; + } + + /* `needed` is actual bytes written to string_buf */ + if (*(string_buf + needed - 1) != '\n') { + pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n", + file, msgdata->msg_line, msgdata->msg_fn); + } + + header.ph_len = known_size + needed; + debug_buf = (char *)page_address(tage->page) + tage->used; + + if (libcfs_debug_binary) { + memcpy(debug_buf, &header, sizeof(header)); + tage->used += sizeof(header); + debug_buf += sizeof(header); + } + + strlcpy(debug_buf, file, PAGE_SIZE - tage->used); + tage->used += strlen(file) + 1; + debug_buf += strlen(file) + 1; + + if (msgdata->msg_fn) { + strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used); + tage->used += strlen(msgdata->msg_fn) + 1; + debug_buf += strlen(msgdata->msg_fn) + 1; + } + + __LASSERT(debug_buf == string_buf); + + tage->used += needed; + __LASSERT(tage->used <= PAGE_SIZE); + +console: + if ((mask & libcfs_printk) == 0) { + /* no console output requested */ + if (tcd != NULL) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (cdls != NULL) { + if (libcfs_console_ratelimit && + cdls->cdls_next != 0 && /* not first time ever */ + time_before(jiffies, cdls->cdls_next)) { + /* skipping a console message */ + cdls->cdls_count++; + if (tcd != NULL) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (time_after(jiffies, cdls->cdls_next + + libcfs_console_max_delay + + cfs_time_seconds(10))) { + /* last timeout was a long time ago */ + cdls->cdls_delay /= libcfs_console_backoff * 4; + } else { + cdls->cdls_delay *= libcfs_console_backoff; + } + + if (cdls->cdls_delay < libcfs_console_min_delay) + cdls->cdls_delay = libcfs_console_min_delay; + else if (cdls->cdls_delay > libcfs_console_max_delay) + cdls->cdls_delay = libcfs_console_max_delay; + + /* ensure cdls_next is never zero after it's been seen */ + cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1; + } + + if (tcd) { + cfs_print_to_console(&header, mask, file, msgdata->msg_fn, + "%s", string_buf); + cfs_trace_put_tcd(tcd); + } else { + struct va_format vaf; + + va_start(ap, format); + vaf.fmt = format; + vaf.va = ≈ + cfs_vprint_to_console(&header, mask, + &vaf, file, msgdata->msg_fn); + va_end(ap); + } + + if (cdls != NULL && cdls->cdls_count != 0) { + cfs_print_to_console(&header, mask, file, + msgdata->msg_fn, + "Skipped %d previous similar message%s\n", + cdls->cdls_count, + (cdls->cdls_count > 1) ? "s" : ""); + + cdls->cdls_count = 0; + } + + return 0; } -EXPORT_SYMBOL(libcfs_assertion_failed); +EXPORT_SYMBOL(libcfs_debug_msg); void -trace_assertion_failed(const char *str, - const char *fn, const char *file, int line) +cfs_trace_assertion_failed(const char *str, + struct libcfs_debug_msg_data *msgdata) { - struct ptldebug_header hdr; + struct ptldebug_header hdr; - libcfs_panic_in_progress = 1; - libcfs_catastrophe = 1; - mb(); + libcfs_panic_in_progress = 1; + libcfs_catastrophe = 1; + smp_mb(); - set_ptldebug_header(&hdr, DEBUG_SUBSYSTEM, D_EMERG, line, - CDEBUG_STACK()); + cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); - print_to_console(&hdr, D_EMERG, str, strlen(str), file, fn); + cfs_print_to_console(&hdr, D_EMERG, msgdata->msg_file, msgdata->msg_fn, + "%s", str); - LIBCFS_PANIC("Lustre debug assertion failure\n"); + panic("Lustre debug assertion failure\n"); - /* not reached */ + /* not reached */ } static void panic_collect_pages(struct page_collection *pc) { - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ - int i; - int j; - struct trace_cpu_data *tcd; - - CFS_INIT_LIST_HEAD(&pc->pc_pages); - - tcd_for_each(tcd, i, j) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } + /* Do the collect_pages job on a single CPU: assumes that all other + * CPUs have been stopped during a panic. If this isn't true for some + * arch, this will have to be implemented separately in each arch. */ + int i; + int j; + struct cfs_trace_cpu_data *tcd; + + INIT_LIST_HEAD(&pc->pc_pages); + + cfs_tcd_for_each(tcd, i, j) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + } } static void collect_pages_on_all_cpus(struct page_collection *pc) { - struct trace_cpu_data *tcd; - int i, cpu; - - spin_lock(&pc->pc_lock); - for_each_possible_cpu(cpu) { - tcd_for_each_type_lock(tcd, i, cpu) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } - spin_unlock(&pc->pc_lock); + struct cfs_trace_cpu_data *tcd; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + } + } } static void collect_pages(struct page_collection *pc) { - CFS_INIT_LIST_HEAD(&pc->pc_pages); + INIT_LIST_HEAD(&pc->pc_pages); - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); + if (libcfs_panic_in_progress) + panic_collect_pages(pc); + else + collect_pages_on_all_cpus(pc); } static void put_pages_back_on_all_cpus(struct page_collection *pc) { - struct trace_cpu_data *tcd; - struct list_head *cur_head; - struct trace_page *tage; - struct trace_page *tmp; + struct cfs_trace_cpu_data *tcd; + struct list_head *cur_head; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; int i, cpu; - spin_lock(&pc->pc_lock); - for_each_possible_cpu(cpu) { - tcd_for_each_type_lock(tcd, i, cpu) { + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { cur_head = tcd->tcd_pages.next; - cfs_list_for_each_entry_safe_typed(tage, tmp, - &pc->pc_pages, - struct trace_page, - linkage) { + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, + linkage) { - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - if (tage->cpu != cpu || tage->type != i) - continue; + if (tage->cpu != cpu || tage->type != i) + continue; - tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } - spin_unlock(&pc->pc_lock); + cfs_tage_to_tail(tage, cur_head); + tcd->tcd_cur_pages++; + } + } + } } static void put_pages_back(struct page_collection *pc) @@ -566,204 +657,170 @@ static void put_pages_back(struct page_collection *pc) put_pages_back_on_all_cpus(pc); } -/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that - * we have a good amount of data at all times for dumping during an LBUG, even - * if we have been steadily writing (and otherwise discarding) pages via the - * debug daemon. */ -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct trace_cpu_data *tcd) +#ifdef LNET_DUMP_ON_PANIC +void cfs_trace_debug_print(void) { - struct trace_page *tage; - struct trace_page *tmp; - - spin_lock(&pc->pc_lock); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages, - struct trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; - - tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; - - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct trace_page *victim; - - __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); - victim = tage_from_list(tcd->tcd_daemon_pages.next); - - __LASSERT_TAGE_INVARIANT(victim); - - list_del(&victim->linkage); - tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } - spin_unlock(&pc->pc_lock); -} - -static void put_pages_on_daemon_list(struct page_collection *pc) -{ - struct trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - tcd_for_each_type_lock(tcd, i, cpu) - put_pages_on_tcd_daemon_list(pc, tcd); - } -} - -void trace_debug_print(void) -{ - struct page_collection pc; - struct trace_page *tage; - struct trace_page *tmp; - - spin_lock_init(&pc.pc_lock); - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct trace_page, linkage) { - char *p, *file, *fn; - cfs_page_t *page; - - __LASSERT_TAGE_INVARIANT(tage); - - page = tage->page; - p = cfs_page_address(page); - while (p < ((char *)cfs_page_address(page) + tage->used)) { - struct ptldebug_header *hdr; - int len; - hdr = (void *)p; - p += sizeof(*hdr); - file = p; - p += strlen(file) + 1; - fn = p; - p += strlen(fn) + 1; - len = hdr->ph_len - (int)(p - (char *)hdr); - - print_to_console(hdr, D_EMERG, p, len, file, fn); - - p += len; - } - - list_del(&tage->linkage); - tage_free(tage); - } + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + struct page *page; + + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + char *p, *file, *fn; + + __LASSERT_TAGE_INVARIANT(tage); + + page = tage->page; + p = page_address(page); + while (p < ((char *)page_address(page) + tage->used)) { + struct ptldebug_header *hdr; + int len; + hdr = (void *)p; + p += sizeof(*hdr); + file = p; + p += strlen(file) + 1; + fn = p; + p += strlen(fn) + 1; + len = hdr->ph_len - (int)(p - (char *)hdr); + + cfs_print_to_console(hdr, D_EMERG, file, fn, + "%.*s", len, p); + + p += len; + } + + list_del(&tage->linkage); + cfs_tage_free(tage); + } + down_write(&cfs_tracefile_sem); + while ((page = list_first_entry_or_null(&daemon_pages, + struct page, lru)) != NULL) { + char *p, *file, *fn; + + p = page_address(page); + while (p < ((char *)page_address(page) + page->private)) { + struct ptldebug_header *hdr; + int len; + + hdr = (void *)p; + p += sizeof(*hdr); + file = p; + p += strlen(file) + 1; + fn = p; + p += strlen(fn) + 1; + len = hdr->ph_len - (int)(p - (char *)hdr); + + cfs_print_to_console(hdr, D_EMERG, file, fn, + "%.*s", len, p); + + p += len; + } + list_del_init(&page->lru); + daemon_pages_count -= 1; + put_page(page); + } + up_write(&cfs_tracefile_sem); } +#endif /* LNET_DUMP_ON_PANIC */ -int tracefile_dump_all_pages(char *filename) +int cfs_tracefile_dump_all_pages(char *filename) { - struct page_collection pc; - cfs_file_t *filp; - struct trace_page *tage; - struct trace_page *tmp; - int rc; - - CFS_DECL_MMSPACE; - - tracefile_write_lock(); - - filp = cfs_filp_open(filename, - O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc); - if (!filp) { - if (rc != -EEXIST) - printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n", - filename, rc); - goto out; - } - - spin_lock_init(&pc.pc_lock); - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) { - rc = 0; - goto close; - } - - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ - CFS_MMSPACE_OPEN; - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - rc = cfs_filp_write(filp, cfs_page_address(tage->page), - tage->used, cfs_filp_poff(filp)); - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but wrote " - "%d\n", tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - list_del(&tage->linkage); - tage_free(tage); - } - CFS_MMSPACE_CLOSE; - rc = cfs_filp_fsync(filp); - if (rc) - printk(KERN_ERR "sync returns %d\n", rc); - close: - cfs_filp_close(filp); - out: - tracefile_write_unlock(); - return rc; -} - -void trace_flush_pages(void) -{ - struct page_collection pc; - struct trace_page *tage; - struct trace_page *tmp; - - spin_lock_init(&pc.pc_lock); - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - tage_free(tage); - } + struct page_collection pc; + struct file *filp; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + char *buf; + struct page *page; + int rc; + + down_write(&cfs_tracefile_sem); + + filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + pr_err("LustreError: can't open %s for dump: rc = %d\n", + filename, rc); + goto out; + } + + collect_pages(&pc); + if (list_empty(&pc.pc_pages)) { + rc = 0; + goto close; + } + + /* ok, for now, just write the pages. in the future we'll be building + * iobufs with the pages and calling generic_direct_IO */ + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + + __LASSERT_TAGE_INVARIANT(tage); + + buf = kmap(tage->page); + rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos); + kunmap(tage->page); + if (rc != (int)tage->used) { + pr_warn("Lustre: wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + list_del(&tage->linkage); + cfs_tage_free(tage); + } + while ((page = list_first_entry_or_null(&daemon_pages, + struct page, lru)) != NULL) { + buf = page_address(page); + rc = cfs_kernel_write(filp, buf, page->private, &filp->f_pos); + if (rc != (int)page->private) { + pr_warn("Lustre: wanted to write %u but wrote %d\n", + (int)page->private, rc); + break; + } + list_del(&page->lru); + daemon_pages_count -= 1; + put_page(page); + } + rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1); + if (rc) + pr_err("LustreError: sync returns: rc = %d\n", rc); +close: + filp_close(filp, NULL); +out: + up_write(&cfs_tracefile_sem); + return rc; } -int trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char *usr_buffer, int usr_buffer_nob) +void cfs_trace_flush_pages(void) { - int nob; - - if (usr_buffer_nob > knl_buffer_nob) - return -EOVERFLOW; - - if (copy_from_user((void *)knl_buffer, - (void *)usr_buffer, usr_buffer_nob)) - return -EFAULT; - - nob = strnlen(knl_buffer, usr_buffer_nob); - while (nob-- >= 0) /* strip trailing whitespace */ - if (!isspace(knl_buffer[nob])) - break; - - if (nob < 0) /* empty string */ - return -EINVAL; - - if (nob == knl_buffer_nob) /* no space to terminate */ - return -EOVERFLOW; - - knl_buffer[nob + 1] = 0; /* terminate */ - return 0; + struct page_collection pc; + struct cfs_trace_page *tage; + struct page *page; + + collect_pages(&pc); + while (!list_empty(&pc.pc_pages)) { + tage = list_first_entry(&pc.pc_pages, + struct cfs_trace_page, linkage); + __LASSERT_TAGE_INVARIANT(tage); + + list_del(&tage->linkage); + cfs_tage_free(tage); + } + + down_write(&cfs_tracefile_sem); + while ((page = list_first_entry_or_null(&daemon_pages, + struct page, lru)) != NULL) { + list_del(&page->lru); + daemon_pages_count -= 1; + put_page(page); + } + up_write(&cfs_tracefile_sem); } -int trace_copyout_string(char *usr_buffer, int usr_buffer_nob, - const char *knl_buffer, char *append) +int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, + const char *knl_buffer, char *append) { /* NB if 'append' != NULL, it's a single character to append to the * copied out string - usually "\n", for /proc entries and "" (i.e. a @@ -773,11 +830,11 @@ int trace_copyout_string(char *usr_buffer, int usr_buffer_nob, if (nob > usr_buffer_nob) nob = usr_buffer_nob; - if (copy_to_user(usr_buffer, knl_buffer, nob)) + if (copy_to_user(usr_buffer, knl_buffer, nob)) return -EFAULT; if (append != NULL && nob < usr_buffer_nob) { - if (copy_to_user(usr_buffer + nob, append, 1)) + if (copy_to_user(usr_buffer + nob, append, 1)) return -EFAULT; nob++; @@ -785,387 +842,372 @@ int trace_copyout_string(char *usr_buffer, int usr_buffer_nob, return nob; } +EXPORT_SYMBOL(cfs_trace_copyout_string); -int trace_allocate_string_buffer(char **str, int nob) -{ - if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */ - return -EINVAL; - - *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO); - if (*str == NULL) - return -ENOMEM; - - return 0; -} - -void trace_free_string_buffer(char *str, int nob) +int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) { - cfs_free(str); + char *str; + char *path; + int rc; + + str = memdup_user_nul(usr_str, usr_str_nob); + if (IS_ERR(str)) + return PTR_ERR(str); + + path = strim(str); + if (path[0] != '/') + rc = -EINVAL; + else + rc = cfs_tracefile_dump_all_pages(path); + kfree(str); + + return rc; } -int trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc != 0) - goto out; - -#if !defined(__WINNT__) - if (str[0] != '/') { - rc = -EINVAL; - goto out; - } -#endif - rc = tracefile_dump_all_pages(str); -out: - trace_free_string_buffer(str, usr_str_nob + 1); - return rc; -} - -int trace_daemon_command(char *str) +int cfs_trace_daemon_command(char *str) { int rc = 0; - tracefile_write_lock(); + down_write(&cfs_tracefile_sem); if (strcmp(str, "stop") == 0) { - tracefile_write_unlock(); - trace_stop_thread(); - tracefile_write_lock(); - memset(tracefile, 0, sizeof(tracefile)); - - } else if (strncmp(str, "size=", 5) == 0) { - tracefile_size = simple_strtoul(str + 5, NULL, 0); - if (tracefile_size < 10 || tracefile_size > 20480) - tracefile_size = TRACEFILE_SIZE; - else - tracefile_size <<= 20; - - } else if (strlen(str) >= sizeof(tracefile)) { + up_write(&cfs_tracefile_sem); + cfs_trace_stop_thread(); + down_write(&cfs_tracefile_sem); + memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); + + } else if (strncmp(str, "size=", 5) == 0) { + unsigned long tmp; + + rc = kstrtoul(str + 5, 10, &tmp); + if (!rc) { + if (tmp < 10 || tmp > 20480) + cfs_tracefile_size = CFS_TRACEFILE_SIZE; + else + cfs_tracefile_size = tmp << 20; + } + } else if (strlen(str) >= sizeof(cfs_tracefile)) { rc = -ENAMETOOLONG; -#ifndef __WINNT__ } else if (str[0] != '/') { rc = -EINVAL; -#endif } else { - strcpy(tracefile, str); + strcpy(cfs_tracefile, str); - printk(KERN_INFO "Lustre: debug daemon will attempt to start writing " - "to %s (%lukB max)\n", tracefile, - (long)(tracefile_size >> 10)); + pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", + cfs_tracefile, (long)(cfs_tracefile_size >> 10)); - trace_start_thread(); + cfs_trace_start_thread(); } - tracefile_write_unlock(); + up_write(&cfs_tracefile_sem); return rc; } -int trace_daemon_command_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) { - char *str; - int rc; + char *str; + int rc; - rc = trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; + str = memdup_user_nul(usr_str, usr_str_nob); + if (IS_ERR(str)) + return PTR_ERR(str); - rc = trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc == 0) - rc = trace_daemon_command(str); + rc = cfs_trace_daemon_command(strim(str)); + kfree(str); - trace_free_string_buffer(str, usr_str_nob + 1); - return rc; + return rc; } -int trace_set_debug_mb(int mb) +int cfs_trace_set_debug_mb(int mb) { - int i; - int j; - int pages; - int limit = trace_max_debug_mb(); - struct trace_cpu_data *tcd; + int i; + int j; + unsigned long pages; + unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT)); + unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5); + struct cfs_trace_cpu_data *tcd; - if (mb < num_possible_cpus()) - return -EINVAL; - - if (mb > limit) { - printk(KERN_ERR "Lustre: Refusing to set debug buffer size to " - "%dMB - limit is %d\n", mb, limit); - return -EINVAL; - } + if (mb < num_possible_cpus()) { + pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", + mb, num_possible_cpus()); + mb = num_possible_cpus(); + } - mb /= num_possible_cpus(); - pages = mb << (20 - CFS_PAGE_SHIFT); + if (mb > limit) { + pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n", + mb, limit); + mb = limit; + } - tracefile_write_lock(); + mb /= num_possible_cpus(); + pages = mb << (20 - PAGE_SHIFT); - tcd_for_each(tcd, i, j) - tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; + down_write(&cfs_tracefile_sem); - tracefile_write_unlock(); - - return 0; -} - -int trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob) -{ - char str[32]; - int rc; + cfs_tcd_for_each(tcd, i, j) + tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; - rc = trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob); - if (rc < 0) - return rc; + daemon_pages_max = pages; + up_write(&cfs_tracefile_sem); - return trace_set_debug_mb(simple_strtoul(str, NULL, 0)); + return mb; } -int trace_get_debug_mb(void) +int cfs_trace_get_debug_mb(void) { int i; int j; - struct trace_cpu_data *tcd; + struct cfs_trace_cpu_data *tcd; int total_pages = 0; - tracefile_read_lock(); + down_read(&cfs_tracefile_sem); - tcd_for_each(tcd, i, j) + cfs_tcd_for_each(tcd, i, j) total_pages += tcd->tcd_max_pages; - tracefile_read_unlock(); + up_read(&cfs_tracefile_sem); - return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1; + return (total_pages >> (20 - PAGE_SHIFT)) + 1; } static int tracefiled(void *arg) { - struct page_collection pc; - struct tracefiled_ctl *tctl = arg; - struct trace_page *tage; - struct trace_page *tmp; - struct ptldebug_header *hdr; - cfs_file_t *filp; - int rc; - - CFS_DECL_MMSPACE; - - /* we're started late enough that we pick up init's fs context */ - /* this is so broken in uml? what on earth is going on? */ - cfs_daemonize("ktracefiled"); - - spin_lock_init(&pc.pc_lock); - complete(&tctl->tctl_start); - - while (1) { - cfs_waitlink_t __wait; - - pc.pc_want_daemon_pages = 0; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) - continue; - - filp = NULL; - tracefile_read_lock(); - if (tracefile[0] != 0) { - filp = cfs_filp_open(tracefile, - O_CREAT | O_RDWR | O_LARGEFILE, - 0600, &rc); - if (!(filp)) - printk(KERN_WARNING "couldn't open %s: %d\n", - tracefile, rc); - } - tracefile_read_unlock(); - if (filp == NULL) { - put_pages_on_daemon_list(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - continue; - } - - CFS_MMSPACE_OPEN; - - /* mark the first header, so we can sort in chunks */ - tage = tage_from_list(pc.pc_pages.next); - __LASSERT_TAGE_INVARIANT(tage); - - hdr = cfs_page_address(tage->page); - hdr->ph_flags |= PH_FLAG_FIRST_RECORD; - - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct trace_page, linkage) { - static loff_t f_pos; - - __LASSERT_TAGE_INVARIANT(tage); - - if (f_pos >= (off_t)tracefile_size) - f_pos = 0; - else if (f_pos > (off_t)cfs_filp_size(filp)) - f_pos = cfs_filp_size(filp); - - rc = cfs_filp_write(filp, cfs_page_address(tage->page), - tage->used, &f_pos); - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but " - "wrote %d\n", tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - } - } - CFS_MMSPACE_CLOSE; - - cfs_filp_close(filp); - put_pages_on_daemon_list(&pc); - if (!list_empty(&pc.pc_pages)) { - int i; - - printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); - printk(KERN_ERR "total cpus(%d): ", num_possible_cpus()); - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_online(i)) - printk(KERN_ERR "%d(on) ", i); - else - printk(KERN_ERR "%d(off) ", i); - printk(KERN_ERR "\n"); - - i = 0; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) - printk(KERN_ERR "page %d belongs to cpu %d\n", - ++i, tage->cpu); - printk(KERN_ERR "There are %d pages unwritten\n", i); - } - __LASSERT(list_empty(&pc.pc_pages)); - - cfs_waitlink_init(&__wait); - cfs_waitq_add(&tctl->tctl_waitq, &__wait); - set_current_state(TASK_INTERRUPTIBLE); - cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE, - cfs_time_seconds(1)); - cfs_waitq_del(&tctl->tctl_waitq, &__wait); - - if (atomic_read(&tctl->tctl_shutdown)) - break; - } - complete(&tctl->tctl_stop); - return 0; + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + struct file *filp; + char *buf; + int last_loop = 0; + int rc; + + while (!last_loop) { + LIST_HEAD(for_daemon_pages); + int for_daemon_pages_count = 0; + schedule_timeout_interruptible(cfs_time_seconds(1)); + if (kthread_should_stop()) + last_loop = 1; + collect_pages(&pc); + if (list_empty(&pc.pc_pages)) + continue; + + filp = NULL; + down_read(&cfs_tracefile_sem); + if (cfs_tracefile[0] != 0) { + filp = filp_open(cfs_tracefile, + O_CREAT | O_RDWR | O_LARGEFILE, + 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + pr_warn("Lustre: couldn't open %s: rc = %d\n", + cfs_tracefile, rc); + } + } + up_read(&cfs_tracefile_sem); + + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); + + if (filp) { + struct dentry *de = file_dentry(filp); + static loff_t f_pos; + + if (f_pos >= (off_t)cfs_tracefile_size) + f_pos = 0; + else if (f_pos > i_size_read(de->d_inode)) + f_pos = i_size_read(de->d_inode); + + buf = kmap(tage->page); + rc = cfs_kernel_write(filp, buf, tage->used, + &f_pos); + kunmap(tage->page); + if (rc != (int)tage->used) { + pr_warn("Lustre: wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + } + list_del_init(&tage->linkage); + list_add_tail(&tage->page->lru, &for_daemon_pages); + for_daemon_pages_count += 1; + + tage->page->private = (int)tage->used; + kfree(tage); + atomic_dec(&cfs_tage_allocated); + } + + if (filp) + filp_close(filp, NULL); + + down_write(&cfs_tracefile_sem); + list_splice_tail(&for_daemon_pages, &daemon_pages); + daemon_pages_count += for_daemon_pages_count; + while (daemon_pages_count > daemon_pages_max) { + struct page *p = list_first_entry(&daemon_pages, + struct page, lru); + list_del(&p->lru); + put_page(p); + daemon_pages_count -= 1; + } + up_write(&cfs_tracefile_sem); + + if (!list_empty(&pc.pc_pages)) { + int i; + + pr_alert("Lustre: trace pages aren't empty\n"); + pr_err("Lustre: total cpus(%d): ", num_possible_cpus()); + for (i = 0; i < num_possible_cpus(); i++) + if (cpu_online(i)) + pr_cont("%d(on) ", i); + else + pr_cont("%d(off) ", i); + pr_cont("\n"); + + i = 0; + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, + linkage) + pr_err("Lustre: page %d belongs to cpu %d\n", + ++i, tage->cpu); + pr_err("Lustre: There are %d pages unwritten\n", i); + } + __LASSERT(list_empty(&pc.pc_pages)); + } + + return 0; } -int trace_start_thread(void) +int cfs_trace_start_thread(void) { - struct tracefiled_ctl *tctl = &trace_tctl; - int rc = 0; - - mutex_down(&trace_thread_sem); - if (thread_running) - goto out; - - init_completion(&tctl->tctl_start); - init_completion(&tctl->tctl_stop); - cfs_waitq_init(&tctl->tctl_waitq); - atomic_set(&tctl->tctl_shutdown, 0); - - if (cfs_kernel_thread(tracefiled, tctl, 0) < 0) { - rc = -ECHILD; - goto out; - } - - wait_for_completion(&tctl->tctl_start); - thread_running = 1; -out: - mutex_up(&trace_thread_sem); - return rc; + struct task_struct *tsk; + int rc = 0; + + if (tctl_task) + return 0; + + tsk = kthread_create(tracefiled, NULL, "ktracefiled"); + if (IS_ERR(tsk)) + rc = -ECHILD; + else if (cmpxchg(&tctl_task, NULL, tsk) != NULL) + /* already running */ + kthread_stop(tsk); + else + wake_up_process(tsk); + + return rc; } -void trace_stop_thread(void) +void cfs_trace_stop_thread(void) { - struct tracefiled_ctl *tctl = &trace_tctl; - - mutex_down(&trace_thread_sem); - if (thread_running) { - printk(KERN_INFO "Lustre: shutting down debug daemon thread...\n"); - atomic_set(&tctl->tctl_shutdown, 1); - wait_for_completion(&tctl->tctl_stop); - thread_running = 0; - } - mutex_up(&trace_thread_sem); + struct task_struct *tsk; + + tsk = xchg(&tctl_task, NULL); + if (tsk) { + pr_info("Lustre: shutting down debug daemon thread...\n"); + kthread_stop(tsk); + } } -int tracefile_init(int max_pages) -{ - struct trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = tracefile_init_arch(); - if (rc != 0) - return rc; - - tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - CFS_INIT_LIST_HEAD(&tcd->tcd_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } +/* percents to share the total debug memory for each type */ +static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = { + 80, /* 80% pages for CFS_TCD_TYPE_PROC */ + 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ + 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ +}; - return 0; +int cfs_tracefile_init(int max_pages) +{ + struct cfs_trace_cpu_data *tcd; + int i; + int j; + + /* initialize trace_data */ + memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); + for (i = 0; i < CFS_TCD_TYPE_CNT; i++) { + cfs_trace_data[i] = + kmalloc_array(num_possible_cpus(), + sizeof(union cfs_trace_data_union), + GFP_KERNEL); + if (!cfs_trace_data[i]) + goto out_trace_data; + } + + /* arch related info initialized */ + cfs_tcd_for_each(tcd, i, j) { + int factor = pages_factor[i]; + + spin_lock_init(&tcd->tcd_lock); + tcd->tcd_pages_factor = factor; + tcd->tcd_type = i; + tcd->tcd_cpu = j; + + INIT_LIST_HEAD(&tcd->tcd_pages); + INIT_LIST_HEAD(&tcd->tcd_stock_pages); + tcd->tcd_cur_pages = 0; + tcd->tcd_cur_stock_pages = 0; + tcd->tcd_max_pages = (max_pages * factor) / 100; + LASSERT(tcd->tcd_max_pages > 0); + tcd->tcd_shutting_down = 0; + } + daemon_pages_max = max_pages; + + return 0; + +out_trace_data: + for (i = 0; cfs_trace_data[i]; i++) { + kfree(cfs_trace_data[i]); + cfs_trace_data[i] = NULL; + } + pr_err("lnet: Not enough memory\n"); + return -ENOMEM; } static void trace_cleanup_on_all_cpus(void) { - struct trace_cpu_data *tcd; - struct trace_page *tage; - struct trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; - - cfs_list_for_each_entry_safe_typed(tage, tmp, - &tcd->tcd_pages, - struct trace_page, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - tage_free(tage); - } - - tcd->tcd_cur_pages = 0; - } - } + struct cfs_trace_cpu_data *tcd; + struct cfs_trace_page *tage; + int i, cpu; + + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + if (!tcd->tcd_pages_factor) + /* Not initialised */ + continue; + tcd->tcd_shutting_down = 1; + + while (!list_empty(&tcd->tcd_pages)) { + tage = list_first_entry(&tcd->tcd_pages, + struct cfs_trace_page, + linkage); + __LASSERT_TAGE_INVARIANT(tage); + + list_del(&tage->linkage); + cfs_tage_free(tage); + } + tcd->tcd_cur_pages = 0; + } + } } -static void trace_cleanup(void) +static void cfs_trace_cleanup(void) { - struct page_collection pc; + struct page_collection pc; + int i; - CFS_INIT_LIST_HEAD(&pc.pc_pages); - spin_lock_init(&pc.pc_lock); + INIT_LIST_HEAD(&pc.pc_pages); - trace_cleanup_on_all_cpus(); + trace_cleanup_on_all_cpus(); - tracefile_fini_arch(); + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) { + kfree(cfs_trace_data[i]); + cfs_trace_data[i] = NULL; + } } -void tracefile_exit(void) +void cfs_tracefile_exit(void) { - trace_stop_thread(); - trace_cleanup(); + cfs_trace_stop_thread(); + cfs_trace_flush_pages(); + cfs_trace_cleanup(); }