#include <libcfs/libcfs.h>
/* XXX move things up to the top, comment */
-union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;
+union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
- cfs_page_t *page;
- struct cfs_trace_page *tage;
-
- /* My caller is trying to free memory */
- if (!cfs_in_interrupt() && cfs_memory_pressure_get())
- return NULL;
-
- /*
- * Don't spam console with allocation failures: they will be reported
- * by upper layer anyway.
- */
- gfp |= CFS_ALLOC_NOWARN;
- page = cfs_alloc_page(gfp);
- if (page == NULL)
- return NULL;
+ struct page *page;
+ struct cfs_trace_page *tage;
- tage = cfs_alloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- cfs_free_page(page);
- return NULL;
- }
+ /* My caller is trying to free memory */
+ if (!cfs_in_interrupt() && memory_pressure_get())
+ return NULL;
+
+ /*
+ * Don't spam console with allocation failures: they will be reported
+ * by upper layer anyway.
+ */
+ gfp |= __GFP_NOWARN;
+ page = alloc_page(gfp);
+ if (page == NULL)
+ return NULL;
+
+ tage = kmalloc(sizeof(*tage), gfp);
+ if (tage == NULL) {
+ __free_page(page);
+ return NULL;
+ }
- tage->page = page;
- cfs_atomic_inc(&cfs_tage_allocated);
- return tage;
+ tage->page = page;
+ cfs_atomic_inc(&cfs_tage_allocated);
+ return tage;
}
static void cfs_tage_free(struct cfs_trace_page *tage)
{
- __LASSERT(tage != NULL);
- __LASSERT(tage->page != NULL);
+ __LASSERT(tage != NULL);
+ __LASSERT(tage->page != NULL);
- cfs_free_page(tage->page);
- cfs_free(tage);
- cfs_atomic_dec(&cfs_tage_allocated);
+ __free_page(tage->page);
+ kfree(tage);
+ cfs_atomic_dec(&cfs_tage_allocated);
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!cfs_list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= CFS_PAGE_SIZE)
+ if (tage->used + len <= PAGE_CACHE_SIZE)
return tage;
}
--tcd->tcd_cur_stock_pages;
cfs_list_del_init(&tage->linkage);
} else {
- tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
+ tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
- if ((!cfs_memory_pressure_get() ||
+ if ((!memory_pressure_get() ||
cfs_in_interrupt()) && printk_ratelimit())
printk(CFS_KERN_WARNING
"cannot allocate a tage (%ld)\n",
}
}
- tage->used = 0;
- tage->cpu = cfs_smp_processor_id();
- tage->type = tcd->tcd_type;
- cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
+ tage->used = 0;
+ tage->cpu = smp_processor_id();
+ tage->type = tcd->tcd_type;
+ cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
+ tcd->tcd_cur_pages++;
if (tcd->tcd_cur_pages > 8 && thread_running) {
struct tracefiled_ctl *tctl = &trace_tctl;
* from here: this will lead to infinite recursion.
*/
- if (len > CFS_PAGE_SIZE) {
+ if (len > PAGE_CACHE_SIZE) {
printk(CFS_KERN_ERR
"cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
- if (needed + known_size > CFS_PAGE_SIZE)
+ if (needed + known_size > PAGE_CACHE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
goto console;
}
- string_buf = (char *)cfs_page_address(tage->page) +
+ string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = CFS_PAGE_SIZE - tage->used - known_size;
+ max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(CFS_KERN_EMERG "negative max_nob: %d\n",
max_nob);
"newline\n", file, msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
- debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
+ debug_buf = (char *)page_address(tage->page) + tage->used;
if (libcfs_debug_binary) {
memcpy(debug_buf, &header, sizeof(header));
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT (tage->used <= CFS_PAGE_SIZE);
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
void
cfs_trace_assertion_failed(const char *str,
- struct libcfs_debug_msg_data *msgdata)
+ struct libcfs_debug_msg_data *msgdata)
{
- struct ptldebug_header hdr;
+ struct ptldebug_header hdr;
- libcfs_panic_in_progress = 1;
- libcfs_catastrophe = 1;
- cfs_mb();
+ libcfs_panic_in_progress = 1;
+ libcfs_catastrophe = 1;
+ cfs_mb();
- cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
+ cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
- cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
- msgdata->msg_file, msgdata->msg_fn);
+ cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
+ msgdata->msg_file, msgdata->msg_fn);
- LIBCFS_PANIC("Lustre debug assertion failure\n");
+ panic("Lustre debug assertion failure\n");
- /* not reached */
+ /* not reached */
}
static void
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
- char *p, *file, *fn;
- cfs_page_t *page;
+ char *p, *file, *fn;
+ struct page *page;
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- page = tage->page;
- p = cfs_page_address(page);
- while (p < ((char *)cfs_page_address(page) + tage->used)) {
+ page = tage->page;
+ p = page_address(page);
+ while (p < ((char *)page_address(page) + tage->used)) {
struct ptldebug_header *hdr;
int len;
hdr = (void *)p;
struct cfs_trace_page *tmp;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
cfs_tracefile_write_lock();
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO */
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, filp_poff(filp));
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u but wrote "
cfs_list_del(&tage->linkage);
cfs_tage_free(tage);
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
rc = filp_fsync(filp);
if (rc)
printk(CFS_KERN_ERR "sync returns %d\n", rc);
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
- if (cfs_copy_from_user((void *)knl_buffer,
+ if (copy_from_user((void *)knl_buffer,
(void *)usr_buffer, usr_buffer_nob))
return -EFAULT;
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
- if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
+ if (copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
if (append != NULL && nob < usr_buffer_nob) {
- if (cfs_copy_to_user(usr_buffer + nob, append, 1))
+ if (copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
nob++;
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
return -EINVAL;
- *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
+ *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
if (*str == NULL)
return -ENOMEM;
void cfs_trace_free_string_buffer(char *str, int nob)
{
- cfs_free(str);
+ kfree(str);
}
int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
int cfs_trace_set_debug_mb(int mb)
{
- int i;
- int j;
- int pages;
- int limit = cfs_trace_max_debug_mb();
- struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
+ int pages;
+ int limit = cfs_trace_max_debug_mb();
+ struct cfs_trace_cpu_data *tcd;
- if (mb < cfs_num_possible_cpus()) {
- printk(CFS_KERN_WARNING
- "Lustre: %d MB is too small for debug buffer size, "
- "setting it to %d MB.\n", mb, cfs_num_possible_cpus());
- mb = cfs_num_possible_cpus();
- }
+ if (mb < num_possible_cpus()) {
+ printk(CFS_KERN_WARNING
+ "Lustre: %d MB is too small for debug buffer size, "
+ "setting it to %d MB.\n", mb, num_possible_cpus());
+ mb = num_possible_cpus();
+ }
- if (mb > limit) {
- printk(CFS_KERN_WARNING
- "Lustre: %d MB is too large for debug buffer size, "
- "setting it to %d MB.\n", mb, limit);
- mb = limit;
- }
+ if (mb > limit) {
+ printk(CFS_KERN_WARNING
+ "Lustre: %d MB is too large for debug buffer size, "
+ "setting it to %d MB.\n", mb, limit);
+ mb = limit;
+ }
- mb /= cfs_num_possible_cpus();
- pages = mb << (20 - CFS_PAGE_SHIFT);
+ mb /= num_possible_cpus();
+ pages = mb << (20 - PAGE_CACHE_SHIFT);
- cfs_tracefile_write_lock();
+ cfs_tracefile_write_lock();
- cfs_tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
+ cfs_tcd_for_each(tcd, i, j)
+ tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
- cfs_tracefile_write_unlock();
+ cfs_tracefile_write_unlock();
- return 0;
+ return 0;
}
int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
int last_loop = 0;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
- cfs_daemonize("ktracefiled");
spin_lock_init(&pc.pc_lock);
complete(&tctl->tctl_start);
goto end_loop;
}
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page,
else if (f_pos > (off_t)filp_size(filp))
f_pos = filp_size(filp);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u "
__LASSERT(cfs_list_empty(&pc.pc_pages));
}
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
if (!cfs_list_empty(&pc.pc_pages)) {
int i;
- printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
- " empty\n");
- printk(CFS_KERN_ERR "total cpus(%d): ",
- cfs_num_possible_cpus());
- for (i = 0; i < cfs_num_possible_cpus(); i++)
- if (cpu_online(i))
- printk(CFS_KERN_ERR "%d(on) ", i);
- else
- printk(CFS_KERN_ERR "%d(off) ", i);
- printk(CFS_KERN_ERR "\n");
+ printk(CFS_KERN_ALERT "Lustre: trace pages aren't "
+ " empty\n");
+ printk(CFS_KERN_ERR "total cpus(%d): ",
+ num_possible_cpus());
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_online(i))
+ printk(CFS_KERN_ERR "%d(on) ", i);
+ else
+ printk(CFS_KERN_ERR "%d(off) ", i);
+ printk(CFS_KERN_ERR "\n");
i = 0;
cfs_list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
- cfs_waitq_init(&tctl->tctl_waitq);
- cfs_atomic_set(&tctl->tctl_shutdown, 0);
+ cfs_waitq_init(&tctl->tctl_waitq);
+ cfs_atomic_set(&tctl->tctl_shutdown, 0);
- if (cfs_create_thread(tracefiled, tctl, 0) < 0) {
- rc = -ECHILD;
- goto out;
- }
+ if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
+ rc = -ECHILD;
+ goto out;
+ }
wait_for_completion(&tctl->tctl_start);
- thread_running = 1;
+ thread_running = 1;
out:
mutex_unlock(&cfs_trace_thread_mutex);
return rc;