X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=libcfs%2Flibcfs%2Fworkitem.c;h=168fea5ca74293b721f922db56ee2fbd71ee2854;hb=53aee2559638e25dd6dc664ceab2b023eaac1cb5;hp=d93c3e957d2efebd0fbe326afff02390e1aa45e8;hpb=f6995cf04407dff15d6ca79ca44cfa97dc6eb014;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/workitem.c b/libcfs/libcfs/workitem.c index d93c3e9..168fea5 100644 --- a/libcfs/libcfs/workitem.c +++ b/libcfs/libcfs/workitem.c @@ -46,14 +46,12 @@ #define CFS_WS_NAME_LEN 16 -typedef struct cfs_wi_sched { +struct cfs_wi_sched { struct list_head ws_list; /* chain on global list */ -#ifdef __KERNEL__ /** serialised workitems */ spinlock_t ws_lock; /** where schedulers sleep */ wait_queue_head_t ws_waitq; -#endif /** concurrent workitems */ struct list_head ws_runq; /** rescheduled running-workitems, a workitem can be rescheduled @@ -76,80 +74,50 @@ typedef struct cfs_wi_sched { unsigned int ws_starting:1; /** scheduler name */ char ws_name[CFS_WS_NAME_LEN]; -} cfs_wi_sched_t; +}; static struct cfs_workitem_data { /** serialize */ spinlock_t wi_glock; /** list of all schedulers */ - struct list_head wi_scheds; + struct list_head wi_scheds; /** WI module is initialized */ int wi_init; /** shutting down the whole WI module */ int wi_stopping; } cfs_wi_data; -#ifdef __KERNEL__ -static inline void -cfs_wi_sched_lock(cfs_wi_sched_t *sched) -{ - spin_lock(&sched->ws_lock); -} - -static inline void -cfs_wi_sched_unlock(cfs_wi_sched_t *sched) -{ - spin_unlock(&sched->ws_lock); -} - static inline int -cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) +cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) { - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); if (sched->ws_stopping) { - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return 0; } if (!list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return 0; } - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return 1; } -#else /* !__KERNEL__ */ - -static inline void -cfs_wi_sched_lock(cfs_wi_sched_t *sched) -{ - spin_lock(&cfs_wi_data.wi_glock); -} - -static inline void -cfs_wi_sched_unlock(cfs_wi_sched_t *sched) -{ - spin_unlock(&cfs_wi_data.wi_glock); -} - -#endif /* __KERNEL__ */ - /* XXX: * 0. it only works when called from wi->wi_action. * 1. when it returns no one shall try to schedule the workitem. */ void -cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); -#ifdef __KERNEL__ LASSERT(wi->wi_running); -#endif + if (wi->wi_scheduled) { /* cancel pending schedules */ LASSERT(!list_empty(&wi->wi_list)); list_del_init(&wi->wi_list); @@ -161,7 +129,7 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) LASSERT(list_empty(&wi->wi_list)); wi->wi_scheduled = 1; /* LBUG future schedule attempts */ - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return; } @@ -171,7 +139,7 @@ EXPORT_SYMBOL(cfs_wi_exit); * cancel schedule request of workitem \a wi */ int -cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { int rc; @@ -183,7 +151,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) * means the workitem will not be scheduled and will not have * any race with wi_action. */ - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); rc = !(wi->wi_running); @@ -199,7 +167,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) LASSERT (list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return rc; } EXPORT_SYMBOL(cfs_wi_deschedule); @@ -212,12 +180,12 @@ EXPORT_SYMBOL(cfs_wi_deschedule); * be added, and even dynamic creation of serialised queues might be supported. */ void -cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) +cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi) { LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); if (!wi->wi_scheduled) { LASSERT (list_empty(&wi->wi_list)); @@ -226,26 +194,22 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) sched->ws_nscheduled++; if (!wi->wi_running) { list_add_tail(&wi->wi_list, &sched->ws_runq); -#ifdef __KERNEL__ wake_up(&sched->ws_waitq); -#endif } else { list_add(&wi->wi_list, &sched->ws_rerunq); } } LASSERT (!list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); return; } EXPORT_SYMBOL(cfs_wi_schedule); -#ifdef __KERNEL__ - static int -cfs_wi_scheduler (void *arg) +cfs_wi_scheduler(void *arg) { - struct cfs_wi_sched *sched = (cfs_wi_sched_t *)arg; + struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; cfs_block_allsigs(); @@ -263,17 +227,17 @@ cfs_wi_scheduler (void *arg) spin_unlock(&cfs_wi_data.wi_glock); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); while (!sched->ws_stopping) { int nloops = 0; int rc; - cfs_workitem_t *wi; + struct cfs_workitem *wi; while (!list_empty(&sched->ws_runq) && nloops < CFS_WI_RESCHED) { wi = list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); + struct cfs_workitem, wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); list_del_init(&wi->wi_list); @@ -284,13 +248,12 @@ cfs_wi_scheduler (void *arg) wi->wi_running = 1; wi->wi_scheduled = 0; - - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); nloops++; rc = (*wi->wi_action) (wi); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); if (rc != 0) /* WI should be dead, even be freed! */ continue; @@ -305,21 +268,21 @@ cfs_wi_scheduler (void *arg) } if (!list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); /* don't sleep because some workitems still * expect me to come back soon */ cond_resched(); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); continue; } - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); rc = wait_event_interruptible_exclusive(sched->ws_waitq, !cfs_wi_sched_cansleep(sched)); - cfs_wi_sched_lock(sched); + spin_lock(&sched->ws_lock); } - cfs_wi_sched_unlock(sched); + spin_unlock(&sched->ws_lock); spin_lock(&cfs_wi_data.wi_glock); sched->ws_nthreads--; @@ -328,54 +291,6 @@ cfs_wi_scheduler (void *arg) return 0; } -#else /* __KERNEL__ */ - -int -cfs_wi_check_events (void) -{ - int n = 0; - cfs_workitem_t *wi; - - spin_lock(&cfs_wi_data.wi_glock); - - for (;;) { - struct cfs_wi_sched *sched = NULL; - struct cfs_wi_sched *tmp; - - /** rerunq is always empty for userspace */ - list_for_each_entry(tmp, &cfs_wi_data.wi_scheds, ws_list) { - if (!list_empty(&tmp->ws_runq)) { - sched = tmp; - break; - } - } - - if (sched == NULL) - break; - - wi = list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - - LASSERT(wi->wi_scheduled); - wi->wi_scheduled = 0; - spin_unlock(&cfs_wi_data.wi_glock); - - n++; - (*wi->wi_action) (wi); - - spin_lock(&cfs_wi_data.wi_glock); - } - - spin_unlock(&cfs_wi_data.wi_glock); - return n; -} - -#endif - void cfs_wi_sched_destroy(struct cfs_wi_sched *sched) { @@ -395,7 +310,6 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) spin_unlock(&cfs_wi_data.wi_glock); -#ifdef __KERNEL__ wake_up_all(&sched->ws_waitq); spin_lock(&cfs_wi_data.wi_glock); @@ -418,7 +332,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) list_del(&sched->ws_list); spin_unlock(&cfs_wi_data.wi_glock); -#endif + LASSERT(sched->ws_nscheduled == 0); LIBCFS_FREE(sched, sizeof(*sched)); @@ -449,15 +363,13 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, sched->ws_cptab = cptab; sched->ws_cpt = cpt; -#ifdef __KERNEL__ spin_lock_init(&sched->ws_lock); init_waitqueue_head(&sched->ws_waitq); -#endif + INIT_LIST_HEAD(&sched->ws_runq); INIT_LIST_HEAD(&sched->ws_rerunq); INIT_LIST_HEAD(&sched->ws_list); -#ifdef __KERNEL__ for (; nthrs > 0; nthrs--) { char name[16]; struct task_struct *task; @@ -500,7 +412,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, return rc; } } -#endif + spin_lock(&cfs_wi_data.wi_glock); list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); spin_unlock(&cfs_wi_data.wi_glock); @@ -513,7 +425,7 @@ EXPORT_SYMBOL(cfs_wi_sched_create); int cfs_wi_startup(void) { - memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); + memset(&cfs_wi_data, 0, sizeof(struct cfs_workitem_data)); spin_lock_init(&cfs_wi_data.wi_glock); INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); @@ -531,7 +443,6 @@ cfs_wi_shutdown (void) cfs_wi_data.wi_stopping = 1; spin_unlock(&cfs_wi_data.wi_glock); -#ifdef __KERNEL__ /* nobody should contend on this list */ list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { sched->ws_stopping = 1; @@ -549,7 +460,7 @@ cfs_wi_shutdown (void) } spin_unlock(&cfs_wi_data.wi_glock); } -#endif + while (!list_empty(&cfs_wi_data.wi_scheds)) { sched = list_entry(cfs_wi_data.wi_scheds.next, struct cfs_wi_sched, ws_list);