struct mutex cfs_trace_thread_mutex;
static int thread_running = 0;
-cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
+atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd);
struct cfs_trace_page *tage;
/* My caller is trying to free memory */
- if (!cfs_in_interrupt() && memory_pressure_get())
+ if (!in_interrupt() && memory_pressure_get())
return NULL;
/*
}
tage->page = page;
- cfs_atomic_inc(&cfs_tage_allocated);
+ atomic_inc(&cfs_tage_allocated);
return tage;
}
__free_page(tage->page);
kfree(tage);
- cfs_atomic_dec(&cfs_tage_allocated);
+ atomic_dec(&cfs_tage_allocated);
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
if ((!memory_pressure_get() ||
- cfs_in_interrupt()) && printk_ratelimit())
+ in_interrupt()) && printk_ratelimit())
printk(KERN_WARNING
"cannot allocate a tage (%ld)\n",
tcd->tcd_cur_pages);
cfs_list_add_tail(&tage->linkage, &tcd->tcd_pages);
tcd->tcd_cur_pages++;
- if (tcd->tcd_cur_pages > 8 && thread_running) {
- struct tracefiled_ctl *tctl = &trace_tctl;
- /*
- * wake up tracefiled to process some pages.
- */
- cfs_waitq_signal(&tctl->tctl_waitq);
- }
- return tage;
+ if (tcd->tcd_cur_pages > 8 && thread_running) {
+ struct tracefiled_ctl *tctl = &trace_tctl;
+ /*
+ * wake up tracefiled to process some pages.
+ */
+ wake_up(&tctl->tctl_waitq);
+ }
+ return tage;
}
return NULL;
}
pgcount + 1, tcd->tcd_cur_pages);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
struct cfs_trace_page, linkage) {
libcfs_panic_in_progress = 1;
libcfs_catastrophe = 1;
- cfs_mb();
+ smp_mb();
cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
struct cfs_trace_cpu_data *tcd;
int i, cpu;
- spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
}
}
}
- spin_unlock(&pc->pc_lock);
}
static void collect_pages(struct page_collection *pc)
struct cfs_trace_page *tmp;
int i, cpu;
- spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
}
}
}
- spin_unlock(&pc->pc_lock);
}
static void put_pages_back(struct page_collection *pc)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- spin_lock(&pc->pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
struct cfs_trace_page, linkage) {
tcd->tcd_cur_daemon_pages--;
}
}
- spin_unlock(&pc->pc_lock);
}
static void put_pages_on_daemon_list(struct page_collection *pc)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- spin_lock_init(&pc.pc_lock);
-
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
goto out;
}
- spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
if (cfs_list_empty(&pc.pc_pages)) {
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
- spin_lock_init(&pc.pc_lock);
-
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
- spin_lock_init(&pc.pc_lock);
complete(&tctl->tctl_start);
- while (1) {
- cfs_waitlink_t __wait;
+ while (1) {
+ wait_queue_t __wait;
pc.pc_want_daemon_pages = 0;
collect_pages(&pc);
"%d\n", ++i, tage->cpu);
printk(KERN_ERR "There are %d pages unwritten\n",
i);
- }
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ }
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
end_loop:
- if (cfs_atomic_read(&tctl->tctl_shutdown)) {
- if (last_loop == 0) {
- last_loop = 1;
- continue;
- } else {
- break;
- }
- }
- cfs_waitlink_init(&__wait);
- cfs_waitq_add(&tctl->tctl_waitq, &__wait);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_timedwait(&__wait, CFS_TASK_INTERRUPTIBLE,
- cfs_time_seconds(1));
- cfs_waitq_del(&tctl->tctl_waitq, &__wait);
+ if (atomic_read(&tctl->tctl_shutdown)) {
+ if (last_loop == 0) {
+ last_loop = 1;
+ continue;
+ } else {
+ break;
+ }
+ }
+ init_waitqueue_entry_current(&__wait);
+ add_wait_queue(&tctl->tctl_waitq, &__wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
return 0;
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
- cfs_waitq_init(&tctl->tctl_waitq);
- cfs_atomic_set(&tctl->tctl_shutdown, 0);
+ init_waitqueue_head(&tctl->tctl_waitq);
+ atomic_set(&tctl->tctl_shutdown, 0);
if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
rc = -ECHILD;
if (thread_running) {
printk(KERN_INFO
"Lustre: shutting down debug daemon thread...\n");
- cfs_atomic_set(&tctl->tctl_shutdown, 1);
+ atomic_set(&tctl->tctl_shutdown, 1);
wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
struct page_collection pc;
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- spin_lock_init(&pc.pc_lock);
trace_cleanup_on_all_cpus();