X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Ftracefile.c;h=2baecb843ad41323a1c2a206529cc8be9a7d27e2;hb=53aee2559638e25dd6dc664ceab2b023eaac1cb5;hp=90a4a961ac9140d8b28d6b8b6ee30c1c57144af7;hpb=f111fac3f785400f892ca03bfb9078c6bd091f7d;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index 90a4a96..2baecb8 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -27,7 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2013, Intel Corporation. + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -44,6 +44,7 @@ #define LUSTRE_TRACEFILE_PRIVATE #include "tracefile.h" +#include #include /* XXX move things up to the top, comment */ @@ -52,10 +53,10 @@ union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline char cfs_tracefile[TRACEFILE_NAME_SIZE]; long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; static struct tracefiled_ctl trace_tctl; -struct mutex cfs_trace_thread_mutex; +static DEFINE_MUTEX(cfs_trace_thread_mutex); static int thread_running = 0; -atomic_t cfs_tage_allocated = ATOMIC_INIT(0); +static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); static void put_pages_on_tcd_daemon_list(struct page_collection *pc, struct cfs_trace_cpu_data *tcd); @@ -66,7 +67,7 @@ cfs_tage_from_list(struct list_head *list) return list_entry(list, struct cfs_trace_page, linkage); } -static struct cfs_trace_page *cfs_tage_alloc(int gfp) +static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) { struct page *page; struct cfs_trace_page *tage; @@ -114,7 +115,7 @@ static void cfs_tage_to_tail(struct cfs_trace_page *tage, list_move_tail(&tage->linkage, queue); } -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp, +int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, struct list_head *stock) { int i; @@ -270,12 +271,11 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, int needed = 85; /* average message length */ int max_nob; va_list ap; - int depth; int i; int remain; int mask = msgdata->msg_mask; char *file = (char *)msgdata->msg_file; - cfs_debug_limit_state_t *cdls = msgdata->msg_cdls; + struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; if (strchr(file, '/')) file = strrchr(file, '/') + 1; @@ -299,8 +299,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, goto console; } - depth = __current_nesting_level(); - known_size = strlen(file) + 1 + depth; + known_size = strlen(file) + 1; if (msgdata->msg_fn) known_size += strlen(msgdata->msg_fn) + 1; @@ -371,12 +370,6 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, debug_buf += sizeof(header); } - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++ tage->used; - } - strcpy(debug_buf, file); tage->used += strlen(file) + 1; debug_buf += strlen(file) + 1; @@ -456,7 +449,7 @@ console: cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); - cfs_trace_put_console_buffer(string_buf); + put_cpu(); } if (cdls != NULL && cdls->cdls_count != 0) { @@ -470,7 +463,7 @@ console: cfs_print_to_console(&header, mask, string_buf, needed, file, msgdata->msg_fn); - cfs_trace_put_console_buffer(string_buf); + put_cpu(); cdls->cdls_count = 0; } @@ -527,7 +520,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc) struct cfs_trace_cpu_data *tcd; int i, cpu; - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { list_splice_init(&tcd->tcd_pages, &pc->pc_pages); tcd->tcd_cur_pages = 0; @@ -558,7 +551,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc) struct cfs_trace_page *tmp; int i, cpu; - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { cur_head = tcd->tcd_pages.next; @@ -622,7 +615,7 @@ static void put_pages_on_daemon_list(struct page_collection *pc) struct cfs_trace_cpu_data *tcd; int i, cpu; - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) put_pages_on_tcd_daemon_list(pc, tcd); } @@ -671,10 +664,10 @@ int cfs_tracefile_dump_all_pages(char *filename) struct file *filp; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; + mm_segment_t __oldfs; + char *buf; int rc; - DECL_MMSPACE; - cfs_tracefile_write_lock(); filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); @@ -692,16 +685,19 @@ int cfs_tracefile_dump_all_pages(char *filename) rc = 0; goto close; } + __oldfs = get_fs(); + set_fs(get_ds()); - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ - MMSPACE_OPEN; + /* ok, for now, just write the pages. in the future we'll be building + * iobufs with the pages and calling generic_direct_IO */ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - rc = filp_write(filp, page_address(tage->page), - tage->used, filp_poff(filp)); + buf = kmap(tage->page); + rc = vfs_write(filp, (__force const char __user *)buf, + tage->used, &filp->f_pos); + kunmap(tage->page); if (rc != (int)tage->used) { printk(KERN_WARNING "wanted to write %u but wrote " "%d\n", tage->used, rc); @@ -712,8 +708,8 @@ int cfs_tracefile_dump_all_pages(char *filename) list_del(&tage->linkage); cfs_tage_free(tage); } - MMSPACE_CLOSE; - rc = filp_fsync(filp, 0, LLONG_MAX); + set_fs(__oldfs); + rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1); if (rc) printk(KERN_ERR "sync returns %d\n", rc); close: @@ -741,15 +737,14 @@ void cfs_trace_flush_pages(void) } int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char *usr_buffer, int usr_buffer_nob) + const char __user *usr_buffer, int usr_buffer_nob) { int nob; if (usr_buffer_nob > knl_buffer_nob) return -EOVERFLOW; - if (copy_from_user((void *)knl_buffer, - (void *)usr_buffer, usr_buffer_nob)) + if (copy_from_user(knl_buffer, usr_buffer, usr_buffer_nob)) return -EFAULT; nob = strnlen(knl_buffer, usr_buffer_nob); @@ -768,7 +763,7 @@ int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, } EXPORT_SYMBOL(cfs_trace_copyin_string); -int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob, +int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, const char *knl_buffer, char *append) { /* NB if 'append' != NULL, it's a single character to append to the @@ -805,12 +800,7 @@ int cfs_trace_allocate_string_buffer(char **str, int nob) return 0; } -void cfs_trace_free_string_buffer(char *str, int nob) -{ - kfree(str); -} - -int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) { char *str; int rc; @@ -824,15 +814,13 @@ int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob) if (rc != 0) goto out; -#if !defined(__WINNT__) if (str[0] != '/') { rc = -EINVAL; goto out; } -#endif rc = cfs_tracefile_dump_all_pages(str); out: - cfs_trace_free_string_buffer(str, usr_str_nob + 1); + kfree(str); return rc; } @@ -857,10 +845,8 @@ int cfs_trace_daemon_command(char *str) } else if (strlen(str) >= sizeof(cfs_tracefile)) { rc = -ENAMETOOLONG; -#ifndef __WINNT__ } else if (str[0] != '/') { rc = -EINVAL; -#endif } else { strcpy(cfs_tracefile, str); @@ -876,7 +862,7 @@ int cfs_trace_daemon_command(char *str) return rc; } -int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) { char *str; int rc; @@ -890,7 +876,7 @@ int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob) if (rc == 0) rc = cfs_trace_daemon_command(str); - cfs_trace_free_string_buffer(str, usr_str_nob + 1); + kfree(str); return rc; } @@ -929,7 +915,7 @@ int cfs_trace_set_debug_mb(int mb) return 0; } -int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob) +int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob) { char str[32]; int rc; @@ -964,12 +950,12 @@ static int tracefiled(void *arg) struct tracefiled_ctl *tctl = arg; struct cfs_trace_page *tage; struct cfs_trace_page *tmp; + mm_segment_t __oldfs; struct file *filp; + char *buf; int last_loop = 0; int rc; - DECL_MMSPACE; - /* we're started late enough that we pick up init's fs context */ /* this is so broken in uml? what on earth is going on? */ @@ -1002,21 +988,24 @@ static int tracefiled(void *arg) __LASSERT(list_empty(&pc.pc_pages)); goto end_loop; } - - MMSPACE_OPEN; + __oldfs = get_fs(); + set_fs(get_ds()); list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - static loff_t f_pos; + struct dentry *de = filp->f_path.dentry; + static loff_t f_pos; - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - if (f_pos >= (off_t)cfs_tracefile_size) - f_pos = 0; - else if (f_pos > (off_t)filp_size(filp)) - f_pos = filp_size(filp); + if (f_pos >= (off_t)cfs_tracefile_size) + f_pos = 0; + else if (f_pos > i_size_read(de->d_inode)) + f_pos = i_size_read(de->d_inode); - rc = filp_write(filp, page_address(tage->page), - tage->used, &f_pos); + buf = kmap(tage->page); + rc = vfs_write(filp, (__force const char __user *)buf, + tage->used, &f_pos); + kunmap(tage->page); if (rc != (int)tage->used) { printk(KERN_WARNING "wanted to write %u " "but wrote %d\n", tage->used, rc); @@ -1025,7 +1014,7 @@ static int tracefiled(void *arg) break; } } - MMSPACE_CLOSE; + set_fs(__oldfs); filp_close(filp, NULL); put_pages_on_daemon_list(&pc); @@ -1061,11 +1050,10 @@ end_loop: break; } } - init_waitqueue_entry_current(&__wait); + init_waitqueue_entry(&__wait, current); add_wait_queue(&tctl->tctl_waitq, &__wait); set_current_state(TASK_INTERRUPTIBLE); - waitq_timedwait(&__wait, TASK_INTERRUPTIBLE, - cfs_time_seconds(1)); + schedule_timeout(cfs_time_seconds(1)); remove_wait_queue(&tctl->tctl_waitq, &__wait); } complete(&tctl->tctl_stop); @@ -1148,7 +1136,7 @@ static void trace_cleanup_on_all_cpus(void) struct cfs_trace_page *tmp; int i, cpu; - cfs_for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) { cfs_tcd_for_each_type_lock(tcd, i, cpu) { tcd->tcd_shutting_down = 1;