X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=libcfs%2Flibcfs%2Ftracefile.c;h=36a16faa066bcef46b62b18ac16916c65193eb37;hp=cdc218e2eef9e3e17c7589110549a5d8283ab029;hb=67af976c806994cec27414d24b43f6519d72c240;hpb=9fb46705ae86aa2c0ac29427f0ff24f923560eb7 diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index cdc218e..36a16fa 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -15,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -37,100 +35,166 @@ * Author: Phil Schwan */ - #define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE #include "tracefile.h" +#include +#include +#include +#include +#include +#include +#include +#include #include -/* XXX move things up to the top, comment */ -union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned; + +enum cfs_trace_buf_type { + CFS_TCD_TYPE_PROC = 0, + CFS_TCD_TYPE_SOFTIRQ, + CFS_TCD_TYPE_IRQ, + CFS_TCD_TYPE_CNT +}; + +union cfs_trace_data_union (*cfs_trace_data[CFS_TCD_TYPE_CNT])[NR_CPUS] __cacheline_aligned; char cfs_tracefile[TRACEFILE_NAME_SIZE]; long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; static struct tracefiled_ctl trace_tctl; -struct mutex cfs_trace_thread_mutex; +static DEFINE_MUTEX(cfs_trace_thread_mutex); static int thread_running = 0; -cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0); +static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); +static DECLARE_RWSEM(cfs_tracefile_sem); static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd); + struct cfs_trace_cpu_data *tcd); -static inline struct cfs_trace_page * -cfs_tage_from_list(cfs_list_t *list) +/* trace file lock routines */ +/* The walking argument indicates the locking comes from all tcd types + * iterator and we must lock it and dissable local irqs to avoid deadlocks + * with other interrupt locks that might be happening. See LU-1311 + * for details. + */ +int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __acquires(&tcd->tcd_lock) { - return cfs_list_entry(list, struct cfs_trace_page, linkage); + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_lock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_lock_irq(&tcd->tcd_lock); + else + spin_lock(&tcd->tcd_lock); + return 1; } -static struct cfs_trace_page *cfs_tage_alloc(int gfp) +void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) + __releases(&tcd->tcd_lock) { - cfs_page_t *page; - struct cfs_trace_page *tage; + __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_CNT); + if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) + spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); + else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) + spin_unlock_bh(&tcd->tcd_lock); + else if (unlikely(walking)) + spin_unlock_irq(&tcd->tcd_lock); + else + spin_unlock(&tcd->tcd_lock); +} - /* My caller is trying to free memory */ - if (!cfs_in_interrupt() && cfs_memory_pressure_get()) - return NULL; +#define cfs_tcd_for_each(tcd, i, j) \ + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) \ + for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ + j < num_possible_cpus(); \ + j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= CFS_ALLOC_NOWARN; - page = cfs_alloc_page(gfp); - if (page == NULL) - return NULL; - - tage = cfs_alloc(sizeof(*tage), gfp); - if (tage == NULL) { - cfs_free_page(page); - return NULL; - } +#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i] && \ + (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ + cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) - tage->page = page; - cfs_atomic_inc(&cfs_tage_allocated); - return tage; +enum cfs_trace_buf_type cfs_trace_buf_idx_get(void) +{ + if (in_irq()) + return CFS_TCD_TYPE_IRQ; + if (in_softirq()) + return CFS_TCD_TYPE_SOFTIRQ; + return CFS_TCD_TYPE_PROC; } -static void cfs_tage_free(struct cfs_trace_page *tage) +static inline struct cfs_trace_cpu_data * +cfs_trace_get_tcd(void) { - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); + struct cfs_trace_cpu_data *tcd = + &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; + + cfs_trace_lock_tcd(tcd, 0); - cfs_free_page(tage->page); - cfs_free(tage); - cfs_atomic_dec(&cfs_tage_allocated); + return tcd; } -static void cfs_tage_to_tail(struct cfs_trace_page *tage, - cfs_list_t *queue) +static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd) { - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); + cfs_trace_unlock_tcd(tcd, 0); - cfs_list_move_tail(&tage->linkage, queue); + put_cpu(); } -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp, - cfs_list_t *stock) +static inline struct cfs_trace_page * +cfs_tage_from_list(struct list_head *list) { - int i; + return list_entry(list, struct cfs_trace_page, linkage); +} - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ +static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) +{ + struct page *page; + struct cfs_trace_page *tage; - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) { - struct cfs_trace_page *tage; + /* My caller is trying to free memory */ + if (!in_interrupt() && (current->flags & PF_MEMALLOC)) + return NULL; + + /* + * Don't spam console with allocation failures: they will be reported + * by upper layer anyway. + */ + gfp |= __GFP_NOWARN; + page = alloc_page(gfp); + if (page == NULL) + return NULL; + + tage = kmalloc(sizeof(*tage), gfp); + if (tage == NULL) { + __free_page(page); + return NULL; + } + + tage->page = page; + atomic_inc(&cfs_tage_allocated); + return tage; +} - tage = cfs_tage_alloc(gfp); - if (tage == NULL) - break; - cfs_list_add_tail(&tage->linkage, stock); - } - return i; +static void cfs_tage_free(struct cfs_trace_page *tage) +{ + __LASSERT(tage != NULL); + __LASSERT(tage->page != NULL); + + __free_page(tage->page); + kfree(tage); + atomic_dec(&cfs_tage_allocated); +} + +static void cfs_tage_to_tail(struct cfs_trace_page *tage, + struct list_head *queue) +{ + __LASSERT(tage != NULL); + __LASSERT(queue != NULL); + + list_move_tail(&tage->linkage, queue); } /* return a page that has 'len' bytes left at the end */ @@ -140,75 +204,72 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) struct cfs_trace_page *tage; if (tcd->tcd_cur_pages > 0) { - __LASSERT(!cfs_list_empty(&tcd->tcd_pages)); + __LASSERT(!list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= CFS_PAGE_SIZE) + if (tage->used + len <= PAGE_SIZE) return tage; } - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { - if (tcd->tcd_cur_stock_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); - -- tcd->tcd_cur_stock_pages; - cfs_list_del_init(&tage->linkage); - } else { - tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC); - if (tage == NULL) { - if (printk_ratelimit()) - printk(CFS_KERN_WARNING - "cannot allocate a tage (%ld)\n", - tcd->tcd_cur_pages); - return NULL; - } - } - - tage->used = 0; - tage->cpu = cfs_smp_processor_id(); - tage->type = tcd->tcd_type; - cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages); - tcd->tcd_cur_pages++; - - if (tcd->tcd_cur_pages > 8 && thread_running) { - struct tracefiled_ctl *tctl = &trace_tctl; - /* - * wake up tracefiled to process some pages. - */ - cfs_waitq_signal(&tctl->tctl_waitq); - } - return tage; + if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { + if (tcd->tcd_cur_stock_pages > 0) { + tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); + --tcd->tcd_cur_stock_pages; + list_del_init(&tage->linkage); + } else { + tage = cfs_tage_alloc(GFP_ATOMIC); + if (unlikely(tage == NULL)) { + if ((!(current->flags & PF_MEMALLOC) || + in_interrupt()) && printk_ratelimit()) + pr_warn("Lustre: cannot allocate a tage (%ld)\n", + tcd->tcd_cur_pages); + return NULL; + } + } + + tage->used = 0; + tage->cpu = smp_processor_id(); + tage->type = tcd->tcd_type; + list_add_tail(&tage->linkage, &tcd->tcd_pages); + tcd->tcd_cur_pages++; + + if (tcd->tcd_cur_pages > 8 && thread_running) { + struct tracefiled_ctl *tctl = &trace_tctl; + /* + * wake up tracefiled to process some pages. + */ + wake_up(&tctl->tctl_waitq); + } + return tage; } return NULL; } static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) { - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; + int pgcount = tcd->tcd_cur_pages / 10; + struct page_collection pc; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ + /* + * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ - if (printk_ratelimit()) - printk(CFS_KERN_WARNING "debug daemon buffer overflowed; " - "discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); + if (printk_ratelimit()) + pr_warn("Lustre: debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", + pgcount + 1, tcd->tcd_cur_pages); - CFS_INIT_LIST_HEAD(&pc.pc_pages); - spin_lock_init(&pc.pc_lock); + INIT_LIST_HEAD(&pc.pc_pages); - cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages, - struct cfs_trace_page, linkage) { - if (pgcount-- == 0) - break; + list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { + if (pgcount-- == 0) + break; - cfs_list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); + list_move_tail(&tage->linkage, &pc.pc_pages); + tcd->tcd_cur_pages--; + } + put_pages_on_tcd_daemon_list(&pc, tcd); } /* return a page that has 'len' bytes left at the end */ @@ -222,11 +283,11 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, * from here: this will lead to infinite recursion. */ - if (len > CFS_PAGE_SIZE) { - printk(CFS_KERN_ERR - "cowardly refusing to write %lu bytes in a page\n", len); - return NULL; - } + if (len > PAGE_SIZE) { + pr_err("LustreError: cowardly refusing to write %lu bytes in a page\n", + len); + return NULL; + } tage = cfs_trace_get_tage_try(tcd, len); if (tage != NULL) @@ -241,293 +302,355 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, return tage; } -int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, - const char *format, ...) +static void cfs_set_ptldebug_header(struct ptldebug_header *header, + struct libcfs_debug_msg_data *msgdata, + unsigned long stack) { - va_list args; - int rc; - - va_start(args, format); - rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); - va_end(args); - - return rc; + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + + header->ph_subsys = msgdata->msg_subsys; + header->ph_mask = msgdata->msg_mask; + header->ph_cpu_id = smp_processor_id(); + header->ph_type = cfs_trace_buf_idx_get(); + /* y2038 safe since all user space treats this as unsigned, but + * will overflow in 2106 + */ + header->ph_sec = (u32)ts.tv_sec; + header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; + header->ph_stack = stack; + header->ph_pid = current->pid; + header->ph_line_num = msgdata->msg_line; + header->ph_extern_pid = 0; } -EXPORT_SYMBOL(libcfs_debug_msg); -int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, - const char *format1, va_list args, - const char *format2, ...) +/** + * tty_write_msg - write a message to a certain tty, not just the console. + * @tty: the destination tty_struct + * @msg: the message to write + * + * tty_write_message is not exported, so write a same function for it + * + */ +static void tty_write_msg(struct tty_struct *tty, const char *msg) { - struct cfs_trace_cpu_data *tcd = NULL; - struct ptldebug_header header = {0}; - struct cfs_trace_page *tage; - /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - int mask = msgdata->msg_mask; - char *file = (char *)msgdata->msg_file; - cfs_debug_limit_state_t *cdls = msgdata->msg_cdls; - - if (strchr(file, '/')) - file = strrchr(file, '/') + 1; - - tcd = cfs_trace_get_tcd(); - - /* cfs_trace_get_tcd() grabs a lock, which disables preemption and - * pins us to a particular CPU. This avoids an smp_processor_id() - * warning on Linux when debugging is enabled. */ - cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); - - if (tcd == NULL) /* arch may not log in IRQ context */ - goto console; - - if (tcd->tcd_cur_pages == 0) - header.ph_flags |= PH_FLAG_FIRST_RECORD; - - if (tcd->tcd_shutting_down) { - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - depth = __current_nesting_level(); - known_size = strlen(file) + 1 + depth; - if (msgdata->msg_fn) - known_size += strlen(msgdata->msg_fn) + 1; - - if (libcfs_debug_binary) - known_size += sizeof(header); - - /*/ - * '2' used because vsnprintf return real size required for output - * _without_ terminating NULL. - * if needed is to small for this format. - */ - for (i = 0; i < 2; i++) { - tage = cfs_trace_get_tage(tcd, needed + known_size + 1); - if (tage == NULL) { - if (needed + known_size > CFS_PAGE_SIZE) - mask |= D_ERROR; - - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - string_buf = (char *)cfs_page_address(tage->page) + - tage->used + known_size; - - max_nob = CFS_PAGE_SIZE - tage->used - known_size; - if (max_nob <= 0) { - printk(CFS_KERN_EMERG "negative max_nob: %d\n", - max_nob); - mask |= D_ERROR; - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, max_nob, format1, ap); - va_end(ap); - } - - if (format2) { - remain = max_nob - needed; - if (remain < 0) - remain = 0; - - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - - if (needed < max_nob) /* well. printing ok.. */ - break; - } - - if (*(string_buf+needed-1) != '\n') - printk(CFS_KERN_INFO "format at %s:%d:%s doesn't end in " - "newline\n", file, msgdata->msg_line, msgdata->msg_fn); - - header.ph_len = known_size + needed; - debug_buf = (char *)cfs_page_address(tage->page) + tage->used; - - if (libcfs_debug_binary) { - memcpy(debug_buf, &header, sizeof(header)); - tage->used += sizeof(header); - debug_buf += sizeof(header); - } - - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++ tage->used; - } - - strcpy(debug_buf, file); - tage->used += strlen(file) + 1; - debug_buf += strlen(file) + 1; - - if (msgdata->msg_fn) { - strcpy(debug_buf, msgdata->msg_fn); - tage->used += strlen(msgdata->msg_fn) + 1; - debug_buf += strlen(msgdata->msg_fn) + 1; - } - - __LASSERT(debug_buf == string_buf); - - tage->used += needed; - __LASSERT (tage->used <= CFS_PAGE_SIZE); - -console: - if ((mask & libcfs_printk) == 0) { - /* no console output requested */ - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cdls != NULL) { - if (libcfs_console_ratelimit && - cdls->cdls_next != 0 && /* not first time ever */ - !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { - /* skipping a console message */ - cdls->cdls_count++; - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cfs_time_after(cfs_time_current(), cdls->cdls_next + - libcfs_console_max_delay - + cfs_time_seconds(10))) { - /* last timeout was a long time ago */ - cdls->cdls_delay /= libcfs_console_backoff * 4; - } else { - cdls->cdls_delay *= libcfs_console_backoff; - - if (cdls->cdls_delay < libcfs_console_min_delay) - cdls->cdls_delay = libcfs_console_min_delay; - else if (cdls->cdls_delay > libcfs_console_max_delay) - cdls->cdls_delay = libcfs_console_max_delay; - } - - /* ensure cdls_next is never zero after it's been seen */ - cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; - } - - if (tcd != NULL) { - cfs_print_to_console(&header, mask, string_buf, needed, file, - msgdata->msg_fn); - cfs_trace_put_tcd(tcd); - } else { - string_buf = cfs_trace_get_console_buffer(); - - needed = 0; - if (format1 != NULL) { - va_copy(ap, args); - needed = vsnprintf(string_buf, - CFS_TRACE_CONSOLE_BUFFER_SIZE, - format1, ap); - va_end(ap); - } - if (format2 != NULL) { - remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; - if (remain > 0) { - va_start(ap, format2); - needed += vsnprintf(string_buf+needed, remain, - format2, ap); - va_end(ap); - } - } - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - cfs_trace_put_console_buffer(string_buf); - } - - if (cdls != NULL && cdls->cdls_count != 0) { - string_buf = cfs_trace_get_console_buffer(); + mutex_lock(&tty->atomic_write_lock); + tty_lock(tty); + if (tty->ops->write && tty->count > 0) + tty->ops->write(tty, msg, strlen(msg)); + tty_unlock(tty); + mutex_unlock(&tty->atomic_write_lock); + wake_up_interruptible_poll(&tty->write_wait, POLLOUT); +} - needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, - "Skipped %d previous similar message%s\n", - cdls->cdls_count, - (cdls->cdls_count > 1) ? "s" : ""); +static void cfs_tty_write_message(const char *prefix, int mask, const char *msg) +{ + struct tty_struct *tty; + + tty = get_current_tty(); + if (!tty) + return; + + tty_write_msg(tty, prefix); + if ((mask & D_EMERG) || (mask & D_ERROR)) + tty_write_msg(tty, "Error"); + tty_write_msg(tty, ": "); + tty_write_msg(tty, msg); + tty_kref_put(tty); +} - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); +static void cfs_vprint_to_console(struct ptldebug_header *hdr, int mask, + struct va_format *vaf, const char *file, + const char *fn) +{ + char *prefix = "Lustre"; + + if (hdr->ph_subsys == S_LND || hdr->ph_subsys == S_LNET) + prefix = "LNet"; + + if (mask & D_CONSOLE) { + if (mask & D_EMERG) + pr_emerg("%sError: %pV", prefix, vaf); + else if (mask & D_ERROR) + pr_err("%sError: %pV", prefix, vaf); + else if (mask & D_WARNING) + pr_warn("%s: %pV", prefix, vaf); + else if (mask & libcfs_printk) + pr_info("%s: %pV", prefix, vaf); + } else { + if (mask & D_EMERG) + pr_emerg("%sError: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & D_ERROR) + pr_err("%sError: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & D_WARNING) + pr_warn("%s: %d:%d:(%s:%d:%s()) %pV", prefix, + hdr->ph_pid, hdr->ph_extern_pid, file, + hdr->ph_line_num, fn, vaf); + else if (mask & (D_CONSOLE | libcfs_printk)) + pr_info("%s: %pV", prefix, vaf); + } + + if (mask & D_TTY) + /* tty_write_msg doesn't handle formatting */ + cfs_tty_write_message(prefix, mask, vaf->fmt); +} - cfs_trace_put_console_buffer(string_buf); - cdls->cdls_count = 0; - } +static void cfs_print_to_console(struct ptldebug_header *hdr, int mask, + const char *file, const char *fn, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; - return 0; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + cfs_vprint_to_console(hdr, mask, &vaf, file, fn); } -EXPORT_SYMBOL(libcfs_debug_vmsg2); -void -libcfs_assertion_failed(const char *expr, struct libcfs_debug_msg_data *msgdata) +int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, + const char *format, ...) { - libcfs_debug_msg(msgdata, "ASSERTION(%s) failed\n", expr); - /* cfs_enter_debugger(); */ - lbug_with_loc(msgdata); + struct cfs_trace_cpu_data *tcd = NULL; + struct ptldebug_header header = {0}; + struct cfs_trace_page *tage; + /* string_buf is used only if tcd != NULL, and is always set then */ + char *string_buf = NULL; + char *debug_buf; + int known_size; + int needed = 85; /* seeded with average message length */ + int max_nob; + va_list ap; + int retry; + int mask = msgdata->msg_mask; + char *file = (char *)msgdata->msg_file; + struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; + + if (strchr(file, '/')) + file = strrchr(file, '/') + 1; + + tcd = cfs_trace_get_tcd(); + + /* cfs_trace_get_tcd() grabs a lock, which disables preemption and + * pins us to a particular CPU. This avoids an smp_processor_id() + * warning on Linux when debugging is enabled. + */ + cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); + + if (!tcd) /* arch may not log in IRQ context */ + goto console; + + if (tcd->tcd_cur_pages == 0) + header.ph_flags |= PH_FLAG_FIRST_RECORD; + + if (tcd->tcd_shutting_down) { + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + known_size = strlen(file) + 1; + if (msgdata->msg_fn) + known_size += strlen(msgdata->msg_fn) + 1; + + if (libcfs_debug_binary) + known_size += sizeof(header); + + /* + * May perform an additional pass to update 'needed' and increase + * tage buffer size to match vsnprintf reported size required + * On the second pass (retry=1) use vscnprintf [which returns + * number of bytes written not including the terminating nul] + * to clarify `needed` is used as number of bytes written + * for the remainder of this function + */ + for (retry = 0; retry < 2; retry++) { + tage = cfs_trace_get_tage(tcd, needed + known_size + 1); + if (!tage) { + if (needed + known_size > PAGE_SIZE) + mask |= D_ERROR; + + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + string_buf = (char *)page_address(tage->page) + + tage->used + known_size; + + max_nob = PAGE_SIZE - tage->used - known_size; + if (max_nob <= 0) { + pr_emerg("LustreError: negative max_nob: %d\n", + max_nob); + mask |= D_ERROR; + cfs_trace_put_tcd(tcd); + tcd = NULL; + goto console; + } + + va_start(ap, format); + if (retry) + needed = vscnprintf(string_buf, max_nob, format, ap); + else + needed = vsnprintf(string_buf, max_nob, format, ap); + va_end(ap); + + if (needed < max_nob) /* well. printing ok.. */ + break; + } + + /* `needed` is actual bytes written to string_buf */ + if (*(string_buf + needed - 1) != '\n') { + pr_info("Lustre: format at %s:%d:%s doesn't end in newline\n", + file, msgdata->msg_line, msgdata->msg_fn); + } else if (mask & D_TTY) { + /* TTY needs '\r\n' to move carriage to leftmost position */ + if (needed < 2 || *(string_buf + needed - 2) != '\r') + pr_info("Lustre: format at %s:%d:%s doesn't end in '\\r\\n'\n", + file, msgdata->msg_line, msgdata->msg_fn); + if (strnchr(string_buf, needed, '%')) + pr_info("Lustre: format at %s:%d:%s mustn't contain %%\n", + file, msgdata->msg_line, msgdata->msg_fn); + } + + header.ph_len = known_size + needed; + debug_buf = (char *)page_address(tage->page) + tage->used; + + if (libcfs_debug_binary) { + memcpy(debug_buf, &header, sizeof(header)); + tage->used += sizeof(header); + debug_buf += sizeof(header); + } + + strlcpy(debug_buf, file, PAGE_SIZE - tage->used); + tage->used += strlen(file) + 1; + debug_buf += strlen(file) + 1; + + if (msgdata->msg_fn) { + strlcpy(debug_buf, msgdata->msg_fn, PAGE_SIZE - tage->used); + tage->used += strlen(msgdata->msg_fn) + 1; + debug_buf += strlen(msgdata->msg_fn) + 1; + } + + __LASSERT(debug_buf == string_buf); + + tage->used += needed; + __LASSERT(tage->used <= PAGE_SIZE); + +console: + if ((mask & libcfs_printk) == 0) { + /* no console output requested */ + if (tcd != NULL) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (cdls != NULL) { + if (libcfs_console_ratelimit && + cdls->cdls_next != 0 && /* not first time ever */ + time_before(jiffies, cdls->cdls_next)) { + /* skipping a console message */ + cdls->cdls_count++; + if (tcd != NULL) + cfs_trace_put_tcd(tcd); + return 1; + } + + if (time_after(jiffies, cdls->cdls_next + + libcfs_console_max_delay + + cfs_time_seconds(10))) { + /* last timeout was a long time ago */ + cdls->cdls_delay /= libcfs_console_backoff * 4; + } else { + cdls->cdls_delay *= libcfs_console_backoff; + } + + if (cdls->cdls_delay < libcfs_console_min_delay) + cdls->cdls_delay = libcfs_console_min_delay; + else if (cdls->cdls_delay > libcfs_console_max_delay) + cdls->cdls_delay = libcfs_console_max_delay; + + /* ensure cdls_next is never zero after it's been seen */ + cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1; + } + + if (tcd) { + cfs_print_to_console(&header, mask, file, msgdata->msg_fn, + "%s", string_buf); + cfs_trace_put_tcd(tcd); + } else { + struct va_format vaf; + + va_start(ap, format); + vaf.fmt = format; + vaf.va = ≈ + cfs_vprint_to_console(&header, mask, + &vaf, file, msgdata->msg_fn); + va_end(ap); + } + + if (cdls != NULL && cdls->cdls_count != 0) { + /* Do not allow print this to TTY */ + cfs_print_to_console(&header, mask & ~D_TTY, file, + msgdata->msg_fn, + "Skipped %d previous similar message%s\n", + cdls->cdls_count, + (cdls->cdls_count > 1) ? "s" : ""); + + cdls->cdls_count = 0; + } + + return 0; } -EXPORT_SYMBOL(libcfs_assertion_failed); +EXPORT_SYMBOL(libcfs_debug_msg); void cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *msgdata) + struct libcfs_debug_msg_data *msgdata) { - struct ptldebug_header hdr; + struct ptldebug_header hdr; - libcfs_panic_in_progress = 1; - libcfs_catastrophe = 1; - cfs_mb(); + libcfs_panic_in_progress = 1; + libcfs_catastrophe = 1; + smp_mb(); - cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); + cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); - cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), - msgdata->msg_file, msgdata->msg_fn); + cfs_print_to_console(&hdr, D_EMERG, msgdata->msg_file, msgdata->msg_fn, + "%s", str); - LIBCFS_PANIC("Lustre debug assertion failure\n"); + panic("Lustre debug assertion failure\n"); - /* not reached */ + /* not reached */ } static void panic_collect_pages(struct page_collection *pc) { - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ - int i; - int j; - struct cfs_trace_cpu_data *tcd; + /* Do the collect_pages job on a single CPU: assumes that all other + * CPUs have been stopped during a panic. If this isn't true for some + * arch, this will have to be implemented separately in each arch. */ + int i; + int j; + struct cfs_trace_cpu_data *tcd; - CFS_INIT_LIST_HEAD(&pc->pc_pages); + INIT_LIST_HEAD(&pc->pc_pages); - cfs_tcd_for_each(tcd, i, j) { - cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; + cfs_tcd_for_each(tcd, i, j) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - cfs_list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, + &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } } static void collect_pages_on_all_cpus(struct page_collection *pc) @@ -535,60 +658,54 @@ static void collect_pages_on_all_cpus(struct page_collection *pc) struct cfs_trace_cpu_data *tcd; int i, cpu; - spin_lock(&pc->pc_lock); - cfs_for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - cfs_list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } - spin_unlock(&pc->pc_lock); + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + list_splice_init(&tcd->tcd_pages, &pc->pc_pages); + tcd->tcd_cur_pages = 0; + if (pc->pc_want_daemon_pages) { + list_splice_init(&tcd->tcd_daemon_pages, + &pc->pc_pages); + tcd->tcd_cur_daemon_pages = 0; + } + } + } } static void collect_pages(struct page_collection *pc) { - CFS_INIT_LIST_HEAD(&pc->pc_pages); + INIT_LIST_HEAD(&pc->pc_pages); - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); + if (libcfs_panic_in_progress) + panic_collect_pages(pc); + else + collect_pages_on_all_cpus(pc); } static void put_pages_back_on_all_cpus(struct page_collection *pc) { struct cfs_trace_cpu_data *tcd; - cfs_list_t *cur_head; + struct list_head *cur_head; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; int i, cpu; - spin_lock(&pc->pc_lock); - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { cur_head = tcd->tcd_pages.next; - cfs_list_for_each_entry_safe_typed(tage, tmp, - &pc->pc_pages, - struct cfs_trace_page, - linkage) { + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, + linkage) { - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - if (tage->cpu != cpu || tage->type != i) - continue; + if (tage->cpu != cpu || tage->type != i) + continue; - cfs_tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } - spin_unlock(&pc->pc_lock); + cfs_tage_to_tail(tage, cur_head); + tcd->tcd_cur_pages++; + } + } + } } static void put_pages_back(struct page_collection *pc) @@ -607,32 +724,28 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - spin_lock(&pc->pc_lock); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages, - struct cfs_trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); + list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { + __LASSERT_TAGE_INVARIANT(tage); - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; + if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) + continue; - cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; + cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); + tcd->tcd_cur_daemon_pages++; - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct cfs_trace_page *victim; + if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { + struct cfs_trace_page *victim; - __LASSERT(!cfs_list_empty(&tcd->tcd_daemon_pages)); - victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); + __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); + victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); __LASSERT_TAGE_INVARIANT(victim); - cfs_list_del(&victim->linkage); - cfs_tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } - spin_unlock(&pc->pc_lock); + list_del(&victim->linkage); + cfs_tage_free(victim); + tcd->tcd_cur_daemon_pages--; + } + } } static void put_pages_on_daemon_list(struct page_collection *pc) @@ -640,162 +753,127 @@ static void put_pages_on_daemon_list(struct page_collection *pc) struct cfs_trace_cpu_data *tcd; int i, cpu; - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) put_pages_on_tcd_daemon_list(pc, tcd); } } +#ifdef LNET_DUMP_ON_PANIC void cfs_trace_debug_print(void) { struct page_collection pc; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; - spin_lock_init(&pc.pc_lock); - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { - char *p, *file, *fn; - cfs_page_t *page; - - __LASSERT_TAGE_INVARIANT(tage); - - page = tage->page; - p = cfs_page_address(page); - while (p < ((char *)cfs_page_address(page) + tage->used)) { - struct ptldebug_header *hdr; - int len; - hdr = (void *)p; - p += sizeof(*hdr); - file = p; - p += strlen(file) + 1; - fn = p; - p += strlen(fn) + 1; - len = hdr->ph_len - (int)(p - (char *)hdr); - - cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); - - p += len; - } - - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + char *p, *file, *fn; + struct page *page; + + __LASSERT_TAGE_INVARIANT(tage); + + page = tage->page; + p = page_address(page); + while (p < ((char *)page_address(page) + tage->used)) { + struct ptldebug_header *hdr; + int len; + hdr = (void *)p; + p += sizeof(*hdr); + file = p; + p += strlen(file) + 1; + fn = p; + p += strlen(fn) + 1; + len = hdr->ph_len - (int)(p - (char *)hdr); + + cfs_print_to_console(hdr, D_EMERG, file, fn, + "%.*s", len, p); + + p += len; + } + + list_del(&tage->linkage); + cfs_tage_free(tage); + } } +#endif /* LNET_DUMP_ON_PANIC */ int cfs_tracefile_dump_all_pages(char *filename) { - struct page_collection pc; - cfs_file_t *filp; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int rc; - - CFS_DECL_MMSPACE; - - cfs_tracefile_write_lock(); - - filp = cfs_filp_open(filename, - O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600, &rc); - if (!filp) { - if (rc != -EEXIST) - printk(CFS_KERN_ERR - "LustreError: can't open %s for dump: rc %d\n", - filename, rc); - goto out; - } + struct page_collection pc; + struct file *filp; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + char *buf; + int rc; + + down_write(&cfs_tracefile_sem); + + filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + pr_err("LustreError: can't open %s for dump: rc = %d\n", + filename, rc); + goto out; + } - spin_lock_init(&pc.pc_lock); pc.pc_want_daemon_pages = 1; collect_pages(&pc); - if (cfs_list_empty(&pc.pc_pages)) { + if (list_empty(&pc.pc_pages)) { rc = 0; goto close; } - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ - CFS_MMSPACE_OPEN; - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - rc = cfs_filp_write(filp, cfs_page_address(tage->page), - tage->used, cfs_filp_poff(filp)); - if (rc != (int)tage->used) { - printk(CFS_KERN_WARNING "wanted to write %u but wrote " - "%d\n", tage->used, rc); - put_pages_back(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); - break; - } - cfs_list_del(&tage->linkage); + /* ok, for now, just write the pages. in the future we'll be building + * iobufs with the pages and calling generic_direct_IO */ + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + + __LASSERT_TAGE_INVARIANT(tage); + + buf = kmap(tage->page); + rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos); + kunmap(tage->page); + if (rc != (int)tage->used) { + pr_warn("Lustre: wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + list_del(&tage->linkage); cfs_tage_free(tage); } - CFS_MMSPACE_CLOSE; - rc = cfs_filp_fsync(filp); - if (rc) - printk(CFS_KERN_ERR "sync returns %d\n", rc); - close: - cfs_filp_close(filp); - out: - cfs_tracefile_write_unlock(); - return rc; + + rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1); + if (rc) + pr_err("LustreError: sync returns: rc = %d\n", rc); +close: + filp_close(filp, NULL); +out: + up_write(&cfs_tracefile_sem); + return rc; } void cfs_trace_flush_pages(void) { struct page_collection pc; struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - spin_lock_init(&pc.pc_lock); + pc.pc_want_daemon_pages = 1; + collect_pages(&pc); + while (!list_empty(&pc.pc_pages)) { + tage = list_first_entry(&pc.pc_pages, + struct cfs_trace_page, linkage); + __LASSERT_TAGE_INVARIANT(tage); - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char *usr_buffer, int usr_buffer_nob) -{ - int nob; - - if (usr_buffer_nob > knl_buffer_nob) - return -EOVERFLOW; - - if (cfs_copy_from_user((void *)knl_buffer, - (void *)usr_buffer, usr_buffer_nob)) - return -EFAULT; - - nob = strnlen(knl_buffer, usr_buffer_nob); - while (nob-- >= 0) /* strip trailing whitespace */ - if (!isspace(knl_buffer[nob])) - break; - - if (nob < 0) /* empty string */ - return -EINVAL; - - if (nob == knl_buffer_nob) /* no space to terminate */ - return -EOVERFLOW; - - knl_buffer[nob + 1] = 0; /* terminate */ - return 0; + list_del(&tage->linkage); + cfs_tage_free(tage); + } } -EXPORT_SYMBOL(cfs_trace_copyin_string); -int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob, +int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, const char *knl_buffer, char *append) { /* NB if 'append' != NULL, it's a single character to append to the @@ -806,11 +884,11 @@ int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob, if (nob > usr_buffer_nob) nob = usr_buffer_nob; - if (cfs_copy_to_user(usr_buffer, knl_buffer, nob)) + if (copy_to_user(usr_buffer, knl_buffer, nob)) return -EFAULT; if (append != NULL && nob < usr_buffer_nob) { - if (cfs_copy_to_user(usr_buffer + nob, append, 1)) + if (copy_to_user(usr_buffer + nob, append, 1)) return -EFAULT; nob++; @@ -822,150 +900,122 @@ EXPORT_SYMBOL(cfs_trace_copyout_string); int cfs_trace_allocate_string_buffer(char **str, int nob) { - if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */ + if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */ return -EINVAL; - *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO); + *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); if (*str == NULL) return -ENOMEM; return 0; } -void cfs_trace_free_string_buffer(char *str, int nob) -{ - cfs_free(str); -} - -int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) { - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc != 0) - goto out; - -#if !defined(__WINNT__) - if (str[0] != '/') { - rc = -EINVAL; - goto out; - } -#endif - rc = cfs_tracefile_dump_all_pages(str); -out: - cfs_trace_free_string_buffer(str, usr_str_nob + 1); - return rc; + char *str; + char *path; + int rc; + + str = memdup_user_nul(usr_str, usr_str_nob); + if (!str) + return -ENOMEM; + + path = strim(str); + if (path[0] != '/') + rc = -EINVAL; + else + rc = cfs_tracefile_dump_all_pages(path); + kfree(str); + + return rc; } int cfs_trace_daemon_command(char *str) { int rc = 0; - cfs_tracefile_write_lock(); + down_write(&cfs_tracefile_sem); if (strcmp(str, "stop") == 0) { - cfs_tracefile_write_unlock(); + up_write(&cfs_tracefile_sem); cfs_trace_stop_thread(); - cfs_tracefile_write_lock(); + down_write(&cfs_tracefile_sem); memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); - } else if (strncmp(str, "size=", 5) == 0) { - cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0); - if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480) - cfs_tracefile_size = CFS_TRACEFILE_SIZE; - else - cfs_tracefile_size <<= 20; + } else if (strncmp(str, "size=", 5) == 0) { + unsigned long tmp; + rc = kstrtoul(str + 5, 10, &tmp); + if (!rc) { + if (tmp < 10 || tmp > 20480) + cfs_tracefile_size = CFS_TRACEFILE_SIZE; + else + cfs_tracefile_size = tmp << 20; + } } else if (strlen(str) >= sizeof(cfs_tracefile)) { rc = -ENAMETOOLONG; -#ifndef __WINNT__ } else if (str[0] != '/') { rc = -EINVAL; -#endif } else { - strcpy(cfs_tracefile, str); + strcpy(cfs_tracefile, str); - printk(CFS_KERN_INFO - "Lustre: debug daemon will attempt to start writing " - "to %s (%lukB max)\n", cfs_tracefile, - (long)(cfs_tracefile_size >> 10)); + pr_info("Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", + cfs_tracefile, (long)(cfs_tracefile_size >> 10)); - cfs_trace_start_thread(); + cfs_trace_start_thread(); } - cfs_tracefile_write_unlock(); + up_write(&cfs_tracefile_sem); return rc; } -int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) { - char *str; - int rc; + char *str; + int rc; - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; + str = memdup_user_nul(usr_str, usr_str_nob); + if (!str) + return -ENOMEM; - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc == 0) - rc = cfs_trace_daemon_command(str); + rc = cfs_trace_daemon_command(strim(str)); + kfree(str); - cfs_trace_free_string_buffer(str, usr_str_nob + 1); - return rc; + return rc; } int cfs_trace_set_debug_mb(int mb) { - int i; - int j; - int pages; - int limit = cfs_trace_max_debug_mb(); - struct cfs_trace_cpu_data *tcd; - - if (mb < cfs_num_possible_cpus()) { - printk(CFS_KERN_WARNING - "Lustre: %d MB is too small for debug buffer size, " - "setting it to %d MB.\n", mb, cfs_num_possible_cpus()); - mb = cfs_num_possible_cpus(); - } - - if (mb > limit) { - printk(CFS_KERN_WARNING - "Lustre: %d MB is too large for debug buffer size, " - "setting it to %d MB.\n", mb, limit); - mb = limit; - } - - mb /= cfs_num_possible_cpus(); - pages = mb << (20 - CFS_PAGE_SHIFT); + int i; + int j; + unsigned long pages; + unsigned long total_mb = (cfs_totalram_pages() >> (20 - PAGE_SHIFT)); + unsigned long limit = max_t(unsigned long, 512, (total_mb * 4) / 5); + struct cfs_trace_cpu_data *tcd; - cfs_tracefile_write_lock(); + if (mb < num_possible_cpus()) { + pr_warn("Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", + mb, num_possible_cpus()); + mb = num_possible_cpus(); + } - cfs_tcd_for_each(tcd, i, j) - tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; + if (mb > limit) { + pr_warn("Lustre: %d MB is too large for debug buffer size, setting it to %lu MB.\n", + mb, limit); + mb = limit; + } - cfs_tracefile_write_unlock(); + mb /= num_possible_cpus(); + pages = mb << (20 - PAGE_SHIFT); - return 0; -} + down_write(&cfs_tracefile_sem); -int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob) -{ - char str[32]; - int rc; + cfs_tcd_for_each(tcd, i, j) + tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; - rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob); - if (rc < 0) - return rc; + up_write(&cfs_tracefile_sem); - return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0)); + return mb; } int cfs_trace_get_debug_mb(void) @@ -975,128 +1025,113 @@ int cfs_trace_get_debug_mb(void) struct cfs_trace_cpu_data *tcd; int total_pages = 0; - cfs_tracefile_read_lock(); + down_read(&cfs_tracefile_sem); cfs_tcd_for_each(tcd, i, j) total_pages += tcd->tcd_max_pages; - cfs_tracefile_read_unlock(); + up_read(&cfs_tracefile_sem); - return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1; + return (total_pages >> (20 - PAGE_SHIFT)) + 1; } static int tracefiled(void *arg) { - struct page_collection pc; - struct tracefiled_ctl *tctl = arg; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - cfs_file_t *filp; - int last_loop = 0; - int rc; - - CFS_DECL_MMSPACE; + struct page_collection pc; + struct tracefiled_ctl *tctl = arg; + struct cfs_trace_page *tage; + struct cfs_trace_page *tmp; + struct file *filp; + char *buf; + int last_loop = 0; + int rc; - /* we're started late enough that we pick up init's fs context */ - /* this is so broken in uml? what on earth is going on? */ - cfs_daemonize("ktracefiled"); + /* we're started late enough that we pick up init's fs context */ + /* this is so broken in uml? what on earth is going on? */ - spin_lock_init(&pc.pc_lock); complete(&tctl->tctl_start); - while (1) { - cfs_waitlink_t __wait; - - pc.pc_want_daemon_pages = 0; - collect_pages(&pc); - if (cfs_list_empty(&pc.pc_pages)) - goto end_loop; - - filp = NULL; - cfs_tracefile_read_lock(); - if (cfs_tracefile[0] != 0) { - filp = cfs_filp_open(cfs_tracefile, - O_CREAT | O_RDWR | O_LARGEFILE, - 0600, &rc); - if (!(filp)) - printk(CFS_KERN_WARNING "couldn't open %s: " - "%d\n", cfs_tracefile, rc); - } - cfs_tracefile_read_unlock(); - if (filp == NULL) { - put_pages_on_daemon_list(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); - goto end_loop; - } - - CFS_MMSPACE_OPEN; - - cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, - struct cfs_trace_page, - linkage) { - static loff_t f_pos; - - __LASSERT_TAGE_INVARIANT(tage); - - if (f_pos >= (off_t)cfs_tracefile_size) - f_pos = 0; - else if (f_pos > (off_t)cfs_filp_size(filp)) - f_pos = cfs_filp_size(filp); - - rc = cfs_filp_write(filp, cfs_page_address(tage->page), - tage->used, &f_pos); - if (rc != (int)tage->used) { - printk(CFS_KERN_WARNING "wanted to write %u " - "but wrote %d\n", tage->used, rc); - put_pages_back(&pc); - __LASSERT(cfs_list_empty(&pc.pc_pages)); - } - } - CFS_MMSPACE_CLOSE; - - cfs_filp_close(filp); - put_pages_on_daemon_list(&pc); - if (!cfs_list_empty(&pc.pc_pages)) { - int i; - - printk(CFS_KERN_ALERT "Lustre: trace pages aren't " - " empty\n"); - printk(CFS_KERN_ERR "total cpus(%d): ", - cfs_num_possible_cpus()); - for (i = 0; i < cfs_num_possible_cpus(); i++) - if (cpu_online(i)) - printk(CFS_KERN_ERR "%d(on) ", i); - else - printk(CFS_KERN_ERR "%d(off) ", i); - printk(CFS_KERN_ERR "\n"); - - i = 0; - cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) - printk(CFS_KERN_ERR "page %d belongs to cpu " - "%d\n", ++i, tage->cpu); - printk(CFS_KERN_ERR "There are %d pages unwritten\n", - i); - } - __LASSERT(cfs_list_empty(&pc.pc_pages)); -end_loop: - if (cfs_atomic_read(&tctl->tctl_shutdown)) { - if (last_loop == 0) { - last_loop = 1; - continue; - } else { - break; - } - } - cfs_waitlink_init(&__wait); - cfs_waitq_add(&tctl->tctl_waitq, &__wait); - cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); - cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE, - cfs_time_seconds(1)); - cfs_waitq_del(&tctl->tctl_waitq, &__wait); - } + pc.pc_want_daemon_pages = 0; + + while (!last_loop) { + wait_event_timeout(tctl->tctl_waitq, + ({ collect_pages(&pc); + !list_empty(&pc.pc_pages); }) || + atomic_read(&tctl->tctl_shutdown), + cfs_time_seconds(1)); + if (atomic_read(&tctl->tctl_shutdown)) + last_loop = 1; + if (list_empty(&pc.pc_pages)) + continue; + + filp = NULL; + down_read(&cfs_tracefile_sem); + if (cfs_tracefile[0] != 0) { + filp = filp_open(cfs_tracefile, + O_CREAT | O_RDWR | O_LARGEFILE, + 0600); + if (IS_ERR(filp)) { + rc = PTR_ERR(filp); + filp = NULL; + pr_warn("Lustre: couldn't open %s: rc = %d\n", + cfs_tracefile, rc); + } + } + up_read(&cfs_tracefile_sem); + if (filp == NULL) { + put_pages_on_daemon_list(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + continue; + } + + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { + struct dentry *de = file_dentry(filp); + static loff_t f_pos; + + __LASSERT_TAGE_INVARIANT(tage); + + if (f_pos >= (off_t)cfs_tracefile_size) + f_pos = 0; + else if (f_pos > i_size_read(de->d_inode)) + f_pos = i_size_read(de->d_inode); + + buf = kmap(tage->page); + rc = cfs_kernel_write(filp, buf, tage->used, &f_pos); + kunmap(tage->page); + if (rc != (int)tage->used) { + pr_warn("Lustre: wanted to write %u but wrote %d\n", + tage->used, rc); + put_pages_back(&pc); + __LASSERT(list_empty(&pc.pc_pages)); + break; + } + } + + filp_close(filp, NULL); + put_pages_on_daemon_list(&pc); + if (!list_empty(&pc.pc_pages)) { + int i; + + pr_alert("Lustre: trace pages aren't empty\n"); + pr_err("Lustre: total cpus(%d): ", num_possible_cpus()); + for (i = 0; i < num_possible_cpus(); i++) + if (cpu_online(i)) + pr_cont("%d(on) ", i); + else + pr_cont("%d(off) ", i); + pr_cont("\n"); + + i = 0; + list_for_each_entry_safe(tage, tmp, &pc.pc_pages, + linkage) + pr_err("Lustre: page %d belongs to cpu %d\n", + ++i, tage->cpu); + pr_err("Lustre: There are %d pages unwritten\n", i); + } + __LASSERT(list_empty(&pc.pc_pages)); + } complete(&tctl->tctl_stop); - return 0; + return 0; } int cfs_trace_start_thread(void) @@ -1110,16 +1145,16 @@ int cfs_trace_start_thread(void) init_completion(&tctl->tctl_start); init_completion(&tctl->tctl_stop); - cfs_waitq_init(&tctl->tctl_waitq); - cfs_atomic_set(&tctl->tctl_shutdown, 0); + init_waitqueue_head(&tctl->tctl_waitq); + atomic_set(&tctl->tctl_shutdown, 0); - if (cfs_create_thread(tracefiled, tctl, 0) < 0) { - rc = -ECHILD; - goto out; - } + if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) { + rc = -ECHILD; + goto out; + } wait_for_completion(&tctl->tctl_start); - thread_running = 1; + thread_running = 1; out: mutex_unlock(&cfs_trace_thread_mutex); return rc; @@ -1127,84 +1162,113 @@ out: void cfs_trace_stop_thread(void) { - struct tracefiled_ctl *tctl = &trace_tctl; + struct tracefiled_ctl *tctl = &trace_tctl; mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) { - printk(CFS_KERN_INFO - "Lustre: shutting down debug daemon thread...\n"); - cfs_atomic_set(&tctl->tctl_shutdown, 1); + if (thread_running) { + pr_info("Lustre: shutting down debug daemon thread...\n"); + atomic_set(&tctl->tctl_shutdown, 1); wait_for_completion(&tctl->tctl_stop); - thread_running = 0; - } + thread_running = 0; + } mutex_unlock(&cfs_trace_thread_mutex); } +/* percents to share the total debug memory for each type */ +static unsigned int pages_factor[CFS_TCD_TYPE_CNT] = { + 80, /* 80% pages for CFS_TCD_TYPE_PROC */ + 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ + 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ +}; + int cfs_tracefile_init(int max_pages) { - struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = cfs_tracefile_init_arch(); - if (rc != 0) - return rc; - - cfs_tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - CFS_INIT_LIST_HEAD(&tcd->tcd_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_stock_pages); - CFS_INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } - - return 0; + struct cfs_trace_cpu_data *tcd; + int i; + int j; + + /* initialize trace_data */ + memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); + for (i = 0; i < CFS_TCD_TYPE_CNT; i++) { + cfs_trace_data[i] = + kmalloc_array(num_possible_cpus(), + sizeof(union cfs_trace_data_union), + GFP_KERNEL); + if (!cfs_trace_data[i]) + goto out_trace_data; + } + + /* arch related info initialized */ + cfs_tcd_for_each(tcd, i, j) { + int factor = pages_factor[i]; + + spin_lock_init(&tcd->tcd_lock); + tcd->tcd_pages_factor = factor; + tcd->tcd_type = i; + tcd->tcd_cpu = j; + + INIT_LIST_HEAD(&tcd->tcd_pages); + INIT_LIST_HEAD(&tcd->tcd_stock_pages); + INIT_LIST_HEAD(&tcd->tcd_daemon_pages); + tcd->tcd_cur_pages = 0; + tcd->tcd_cur_stock_pages = 0; + tcd->tcd_cur_daemon_pages = 0; + tcd->tcd_max_pages = (max_pages * factor) / 100; + LASSERT(tcd->tcd_max_pages > 0); + tcd->tcd_shutting_down = 0; + } + + return 0; + +out_trace_data: + for (i = 0; cfs_trace_data[i]; i++) { + kfree(cfs_trace_data[i]); + cfs_trace_data[i] = NULL; + } + pr_err("lnet: Not enough memory\n"); + return -ENOMEM; } static void trace_cleanup_on_all_cpus(void) { - struct cfs_trace_cpu_data *tcd; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - cfs_for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; - - cfs_list_for_each_entry_safe_typed(tage, tmp, - &tcd->tcd_pages, - struct cfs_trace_page, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - cfs_list_del(&tage->linkage); - cfs_tage_free(tage); - } + struct cfs_trace_cpu_data *tcd; + struct cfs_trace_page *tage; + int i, cpu; - tcd->tcd_cur_pages = 0; - } - } + for_each_possible_cpu(cpu) { + cfs_tcd_for_each_type_lock(tcd, i, cpu) { + if (!tcd->tcd_pages_factor) + /* Not initialised */ + continue; + tcd->tcd_shutting_down = 1; + + while (!list_empty(&tcd->tcd_pages)) { + tage = list_first_entry(&tcd->tcd_pages, + struct cfs_trace_page, + linkage); + __LASSERT_TAGE_INVARIANT(tage); + + list_del(&tage->linkage); + cfs_tage_free(tage); + } + tcd->tcd_cur_pages = 0; + } + } } static void cfs_trace_cleanup(void) { struct page_collection pc; + int i; - CFS_INIT_LIST_HEAD(&pc.pc_pages); - spin_lock_init(&pc.pc_lock); + INIT_LIST_HEAD(&pc.pc_pages); trace_cleanup_on_all_cpus(); - cfs_tracefile_fini_arch(); + for (i = 0; i < CFS_TCD_TYPE_CNT && cfs_trace_data[i]; i++) { + kfree(cfs_trace_data[i]); + cfs_trace_data[i] = NULL; + } } void cfs_tracefile_exit(void)