/** serialised workitems */
spinlock_t ws_lock;
/** where schedulers sleep */
- cfs_waitq_t ws_waitq;
+ wait_queue_head_t ws_waitq;
#endif
/** concurrent workitems */
cfs_list_t ws_runq;
void
cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
{
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
cfs_wi_sched_lock(sched);
{
int rc;
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
/*
void
cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
{
- LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_lock(sched);
- if (!wi->wi_scheduled) {
- LASSERT (cfs_list_empty(&wi->wi_list));
+ if (!wi->wi_scheduled) {
+ LASSERT (cfs_list_empty(&wi->wi_list));
- wi->wi_scheduled = 1;
+ wi->wi_scheduled = 1;
sched->ws_nscheduled++;
- if (!wi->wi_running) {
- cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
+ if (!wi->wi_running) {
+ cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
- cfs_waitq_signal(&sched->ws_waitq);
+ wake_up(&sched->ws_waitq);
#endif
- } else {
- cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
- }
- }
+ } else {
+ cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
+ }
+ }
- LASSERT (!cfs_list_empty(&wi->wi_list));
- cfs_wi_sched_unlock(sched);
- return;
+ LASSERT (!cfs_list_empty(&wi->wi_list));
+ cfs_wi_sched_unlock(sched);
+ return;
}
EXPORT_SYMBOL(cfs_wi_schedule);
cfs_list_move_tail(&wi->wi_list, &sched->ws_runq);
}
- if (!cfs_list_empty(&sched->ws_runq)) {
- cfs_wi_sched_unlock(sched);
- /* don't sleep because some workitems still
- * expect me to come back soon */
- cfs_cond_resched();
- cfs_wi_sched_lock(sched);
- continue;
- }
+ if (!cfs_list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ /* don't sleep because some workitems still
+ * expect me to come back soon */
+ cond_resched();
+ cfs_wi_sched_lock(sched);
+ continue;
+ }
- cfs_wi_sched_unlock(sched);
- cfs_wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched), rc);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_unlock(sched);
+ rc = wait_event_interruptible_exclusive(sched->ws_waitq,
+ !cfs_wi_sched_cansleep(sched));
+ cfs_wi_sched_lock(sched);
}
cfs_wi_sched_unlock(sched);
spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
spin_lock(&cfs_wi_data.wi_glock);
{
if (sched == NULL)
return -ENOMEM;
- strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
+ if (strlen(name) > sizeof(sched->ws_name)-1) {
+ LIBCFS_FREE(sched, sizeof(*sched));
+ return -E2BIG;
+ }
+ strncpy(sched->ws_name, name, sizeof(sched->ws_name));
+
sched->ws_cptab = cptab;
sched->ws_cpt = cpt;
#ifdef __KERNEL__
spin_lock_init(&sched->ws_lock);
- cfs_waitq_init(&sched->ws_waitq);
+ init_waitqueue_head(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
#ifdef __KERNEL__
for (; nthrs > 0; nthrs--) {
- char name[16];
- cfs_task_t *task;
+ char name[16];
+ struct task_struct *task;
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- cfs_schedule();
+ schedule();
spin_lock(&cfs_wi_data.wi_glock);
}
/* nobody should contend on this list */
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
sched->ws_stopping = 1;
- cfs_waitq_broadcast(&sched->ws_waitq);
+ wake_up_all(&sched->ws_waitq);
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {