} else {
tage = tage_alloc(CFS_ALLOC_ATOMIC);
if (tage == NULL) {
- printk(KERN_WARNING
- "failure to allocate a tage (%ld)\n",
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "cannot allocate a tage (%ld)\n",
tcd->tcd_cur_pages);
return NULL;
}
CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock);
- cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
+ cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
struct trace_page, linkage) {
if (pgcount-- == 0)
break;
{
libcfs_debug_msg(NULL, 0, D_EMERG, file, func, line,
"ASSERTION(%s) failed\n", expr);
+ /* cfs_enter_debugger(); */
lbug_with_loc(file, func, line);
}
EXPORT_SYMBOL(libcfs_assertion_failed);
}
}
-static void collect_pages_on_cpu(void *info)
+static void collect_pages_on_all_cpus(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
- struct page_collection *pc = info;
- int i;
+ int i, cpu;
spin_lock(&pc->pc_lock);
- tcd_for_each_type_lock(tcd, i) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
- tcd->tcd_cur_daemon_pages = 0;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
}
}
spin_unlock(&pc->pc_lock);
if (libcfs_panic_in_progress)
panic_collect_pages(pc);
else
- trace_call_on_all_cpus(collect_pages_on_cpu, pc);
+ collect_pages_on_all_cpus(pc);
}
-static void put_pages_back_on_cpu(void *info)
+static void put_pages_back_on_all_cpus(struct page_collection *pc)
{
- struct page_collection *pc = info;
struct trace_cpu_data *tcd;
struct list_head *cur_head;
struct trace_page *tage;
struct trace_page *tmp;
- int i;
+ int i, cpu;
spin_lock(&pc->pc_lock);
- tcd_for_each_type_lock(tcd, i) {
- cur_head = tcd->tcd_pages.next;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ cur_head = tcd->tcd_pages.next;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
- struct trace_page, linkage) {
+ cfs_list_for_each_entry_safe_typed(tage, tmp,
+ &pc->pc_pages,
+ struct trace_page,
+ linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != smp_processor_id() || tage->type != i)
- continue;
+ if (tage->cpu != cpu || tage->type != i)
+ continue;
- tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
+ tage_to_tail(tage, cur_head);
+ tcd->tcd_cur_pages++;
+ }
}
}
spin_unlock(&pc->pc_lock);
static void put_pages_back(struct page_collection *pc)
{
if (!libcfs_panic_in_progress)
- trace_call_on_all_cpus(put_pages_back_on_cpu, pc);
+ put_pages_back_on_all_cpus(pc);
}
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
__LASSERT_TAGE_INVARIANT(tage);
- if (tage->cpu != smp_processor_id() ||
- tage->type != tcd->tcd_type)
+ if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
continue;
tage_to_tail(tage, &tcd->tcd_daemon_pages);
spin_unlock(&pc->pc_lock);
}
-static void put_pages_on_daemon_list_on_cpu(void *info)
+static void put_pages_on_daemon_list(struct page_collection *pc)
{
struct trace_cpu_data *tcd;
- int i;
+ int i, cpu;
- tcd_for_each_type_lock(tcd, i)
- put_pages_on_tcd_daemon_list(info, tcd);
-}
-
-static void put_pages_on_daemon_list(struct page_collection *pc)
-{
- trace_call_on_all_cpus(put_pages_on_daemon_list_on_cpu, pc);
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu)
+ put_pages_on_tcd_daemon_list(pc, tcd);
+ }
}
void trace_debug_print(void)
tracefile_write_lock();
if (strcmp(str, "stop") == 0) {
- trace_stop_thread();
+ tracefile_write_unlock();
+ trace_stop_thread();
+ tracefile_write_lock();
memset(tracefile, 0, sizeof(tracefile));
} else if (strncmp(str, "size=", 5) == 0) {
cfs_filp_close(filp);
put_pages_on_daemon_list(&pc);
+ if (!list_empty(&pc.pc_pages)) {
+ int i;
+
+ printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
+ printk(KERN_ERR "total cpus(%d): ", num_possible_cpus());
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_online(i))
+ printk(KERN_ERR "%d(on) ", i);
+ else
+ printk(KERN_ERR "%d(off) ", i);
+ printk(KERN_ERR "\n");
+
+ i = 0;
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+ linkage)
+ printk(KERN_ERR "page %d belongs to cpu %d\n",
+ ++i, tage->cpu);
+ printk(KERN_ERR "There are %d pages unwritten\n", i);
+ }
__LASSERT(list_empty(&pc.pc_pages));
}
complete(&tctl->tctl_stop);
return 0;
}
-static void trace_cleanup_on_cpu(void *info)
+static void trace_cleanup_on_all_cpus(void)
{
struct trace_cpu_data *tcd;
struct trace_page *tage;
struct trace_page *tmp;
- int i;
+ int i, cpu;
- tcd_for_each_type_lock(tcd, i) {
- tcd->tcd_shutting_down = 1;
+ for_each_possible_cpu(cpu) {
+ tcd_for_each_type_lock(tcd, i, cpu) {
+ tcd->tcd_shutting_down = 1;
- cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
- struct trace_page, linkage) {
- __LASSERT_TAGE_INVARIANT(tage);
+ cfs_list_for_each_entry_safe_typed(tage, tmp,
+ &tcd->tcd_pages,
+ struct trace_page,
+ linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
- list_del(&tage->linkage);
- tage_free(tage);
+ list_del(&tage->linkage);
+ tage_free(tage);
+ }
+
+ tcd->tcd_cur_pages = 0;
}
- tcd->tcd_cur_pages = 0;
}
}
CFS_INIT_LIST_HEAD(&pc.pc_pages);
spin_lock_init(&pc.pc_lock);
- trace_call_on_all_cpus(trace_cleanup_on_cpu, &pc);
+ trace_cleanup_on_all_cpus();
tracefile_fini_arch();
}