X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Fworkitem.c;h=722b25dea859ff72fa730e3e5ddb6b1029c5e384;hb=f9ced98580ddf3e4d7e5c989448aff307049bdb3;hp=5b66e6022ee4be063013d2f595a0af7b6d476e24;hpb=da09be15c52c15ab42aec658c848fa17453855a2;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/workitem.c b/libcfs/libcfs/workitem.c index 5b66e60..722b25d 100644 --- a/libcfs/libcfs/workitem.c +++ b/libcfs/libcfs/workitem.c @@ -46,21 +46,21 @@ #define CFS_WS_NAME_LEN 16 typedef struct cfs_wi_sched { - cfs_list_t ws_list; /* chain on global list */ + struct list_head ws_list; /* chain on global list */ #ifdef __KERNEL__ /** serialised workitems */ - spinlock_t ws_lock; + spinlock_t ws_lock; /** where schedulers sleep */ - cfs_waitq_t ws_waitq; + wait_queue_head_t ws_waitq; #endif /** concurrent workitems */ - cfs_list_t ws_runq; + struct list_head ws_runq; /** rescheduled running-workitems, a workitem can be rescheduled * while running in wi_action(), but we don't to execute it again * unless it returns from wi_action(), so we put it on ws_rerunq * while rescheduling, and move it to runq after it returns * from wi_action() */ - cfs_list_t ws_rerunq; + struct list_head ws_rerunq; /** CPT-table for this scheduler */ struct cfs_cpt_table *ws_cptab; /** CPT id for affinity */ @@ -81,7 +81,7 @@ struct cfs_workitem_data { /** serialize */ spinlock_t wi_glock; /** list of all schedulers */ - cfs_list_t wi_scheds; + struct list_head wi_scheds; /** WI module is initialized */ int wi_init; /** shutting down the whole WI module */ @@ -106,16 +106,16 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); if (sched->ws_stopping) { - cfs_wi_sched_unlock(sched); - return 0; - } + cfs_wi_sched_unlock(sched); + return 0; + } - if (!cfs_list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); - return 0; - } - cfs_wi_sched_unlock(sched); - return 1; + if (!list_empty(&sched->ws_runq)) { + cfs_wi_sched_unlock(sched); + return 0; + } + cfs_wi_sched_unlock(sched); + return 1; } #else /* !__KERNEL__ */ @@ -141,7 +141,7 @@ cfs_wi_sched_unlock(cfs_wi_sched_t *sched) void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); cfs_wi_sched_lock(sched); @@ -150,14 +150,14 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) LASSERT(wi->wi_running); #endif if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; } - LASSERT(cfs_list_empty(&wi->wi_list)); + LASSERT(list_empty(&wi->wi_list)); wi->wi_scheduled = 1; /* LBUG future schedule attempts */ cfs_wi_sched_unlock(sched); @@ -174,7 +174,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { int rc; - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); /* @@ -187,19 +187,19 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) rc = !(wi->wi_running); if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; - wi->wi_scheduled = 0; - } + wi->wi_scheduled = 0; + } - LASSERT (cfs_list_empty(&wi->wi_list)); + LASSERT (list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); - return rc; + cfs_wi_sched_unlock(sched); + return rc; } EXPORT_SYMBOL(cfs_wi_deschedule); @@ -213,29 +213,29 @@ EXPORT_SYMBOL(cfs_wi_deschedule); void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); - cfs_wi_sched_lock(sched); + cfs_wi_sched_lock(sched); - if (!wi->wi_scheduled) { - LASSERT (cfs_list_empty(&wi->wi_list)); + if (!wi->wi_scheduled) { + LASSERT (list_empty(&wi->wi_list)); - wi->wi_scheduled = 1; + wi->wi_scheduled = 1; sched->ws_nscheduled++; - if (!wi->wi_running) { - cfs_list_add_tail(&wi->wi_list, &sched->ws_runq); + if (!wi->wi_running) { + list_add_tail(&wi->wi_list, &sched->ws_runq); #ifdef __KERNEL__ - cfs_waitq_signal(&sched->ws_waitq); + wake_up(&sched->ws_waitq); #endif - } else { - cfs_list_add(&wi->wi_list, &sched->ws_rerunq); - } - } + } else { + list_add(&wi->wi_list, &sched->ws_rerunq); + } + } - LASSERT (!cfs_list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); - return; + LASSERT (!list_empty(&wi->wi_list)); + cfs_wi_sched_unlock(sched); + return; } EXPORT_SYMBOL(cfs_wi_schedule); @@ -250,7 +250,9 @@ cfs_wi_scheduler (void *arg) /* CPT affinity scheduler? */ if (sched->ws_cptab != NULL) - cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt); + if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) + CWARN("Failed to bind %s on CPT %d\n", + sched->ws_name, sched->ws_cpt); spin_lock(&cfs_wi_data.wi_glock); @@ -263,17 +265,17 @@ cfs_wi_scheduler (void *arg) cfs_wi_sched_lock(sched); while (!sched->ws_stopping) { - int nloops = 0; - int rc; - cfs_workitem_t *wi; - - while (!cfs_list_empty(&sched->ws_runq) && - nloops < CFS_WI_RESCHED) { - wi = cfs_list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); + int nloops = 0; + int rc; + cfs_workitem_t *wi; + + while (!list_empty(&sched->ws_runq) && + nloops < CFS_WI_RESCHED) { + wi = list_entry(sched->ws_runq.next, + cfs_workitem_t, wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -291,29 +293,29 @@ cfs_wi_scheduler (void *arg) if (rc != 0) /* WI should be dead, even be freed! */ continue; - wi->wi_running = 0; - if (cfs_list_empty(&wi->wi_list)) + wi->wi_running = 0; + if (list_empty(&wi->wi_list)) continue; LASSERT(wi->wi_scheduled); - /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ - cfs_list_move_tail(&wi->wi_list, &sched->ws_runq); + /* wi is rescheduled, should be on rerunq now, we + * move it to runq so it can run action now */ + list_move_tail(&wi->wi_list, &sched->ws_runq); } - if (!cfs_list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); - /* don't sleep because some workitems still - * expect me to come back soon */ - cfs_cond_resched(); - cfs_wi_sched_lock(sched); - continue; - } + if (!list_empty(&sched->ws_runq)) { + cfs_wi_sched_unlock(sched); + /* don't sleep because some workitems still + * expect me to come back soon */ + cond_resched(); + cfs_wi_sched_lock(sched); + continue; + } - cfs_wi_sched_unlock(sched); - cfs_wait_event_interruptible_exclusive(sched->ws_waitq, - !cfs_wi_sched_cansleep(sched), rc); - cfs_wi_sched_lock(sched); + cfs_wi_sched_unlock(sched); + rc = wait_event_interruptible_exclusive(sched->ws_waitq, + !cfs_wi_sched_cansleep(sched)); + cfs_wi_sched_lock(sched); } cfs_wi_sched_unlock(sched); @@ -340,9 +342,8 @@ cfs_wi_check_events (void) struct cfs_wi_sched *tmp; /** rerunq is always empty for userspace */ - cfs_list_for_each_entry(tmp, - &cfs_wi_data.wi_scheds, ws_list) { - if (!cfs_list_empty(&tmp->ws_runq)) { + list_for_each_entry(tmp, &cfs_wi_data.wi_scheds, ws_list) { + if (!list_empty(&tmp->ws_runq)) { sched = tmp; break; } @@ -351,9 +352,9 @@ cfs_wi_check_events (void) if (sched == NULL) break; - wi = cfs_list_entry(sched->ws_runq.next, + wi = list_entry(sched->ws_runq.next, cfs_workitem_t, wi_list); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -388,13 +389,13 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) return; } - LASSERT(!cfs_list_empty(&sched->ws_list)); + LASSERT(!list_empty(&sched->ws_list)); sched->ws_stopping = 1; spin_unlock(&cfs_wi_data.wi_glock); #ifdef __KERNEL__ - cfs_waitq_broadcast(&sched->ws_waitq); + wake_up_all(&sched->ws_waitq); spin_lock(&cfs_wi_data.wi_glock); { @@ -412,7 +413,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) } } - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); spin_unlock(&cfs_wi_data.wi_glock); #endif @@ -437,27 +438,32 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, if (sched == NULL) return -ENOMEM; - strncpy(sched->ws_name, name, CFS_WS_NAME_LEN); + if (strlen(name) > sizeof(sched->ws_name)-1) { + LIBCFS_FREE(sched, sizeof(*sched)); + return -E2BIG; + } + strlcpy(sched->ws_name, name, sizeof(sched->ws_name)); + sched->ws_cptab = cptab; sched->ws_cpt = cpt; #ifdef __KERNEL__ spin_lock_init(&sched->ws_lock); - cfs_waitq_init(&sched->ws_waitq); + init_waitqueue_head(&sched->ws_waitq); #endif - CFS_INIT_LIST_HEAD(&sched->ws_runq); - CFS_INIT_LIST_HEAD(&sched->ws_rerunq); - CFS_INIT_LIST_HEAD(&sched->ws_list); + INIT_LIST_HEAD(&sched->ws_runq); + INIT_LIST_HEAD(&sched->ws_rerunq); + INIT_LIST_HEAD(&sched->ws_list); #ifdef __KERNEL__ for (; nthrs > 0; nthrs--) { - char name[16]; - cfs_task_t *task; + char name[16]; + struct task_struct *task; spin_lock(&cfs_wi_data.wi_glock); while (sched->ws_starting > 0) { spin_unlock(&cfs_wi_data.wi_glock); - cfs_schedule(); + schedule(); spin_lock(&cfs_wi_data.wi_glock); } @@ -483,7 +489,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, spin_lock(&cfs_wi_data.wi_glock); /* make up for cfs_wi_sched_destroy */ - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); sched->ws_starting--; spin_unlock(&cfs_wi_data.wi_glock); @@ -494,7 +500,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, } #endif spin_lock(&cfs_wi_data.wi_glock); - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); spin_unlock(&cfs_wi_data.wi_glock); *sched_pp = sched; @@ -508,7 +514,7 @@ cfs_wi_startup(void) memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); spin_lock_init(&cfs_wi_data.wi_glock); - CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); + INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); cfs_wi_data.wi_init = 1; return 0; @@ -525,12 +531,12 @@ cfs_wi_shutdown (void) #ifdef __KERNEL__ /* nobody should contend on this list */ - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { sched->ws_stopping = 1; - cfs_waitq_broadcast(&sched->ws_waitq); + wake_up_all(&sched->ws_waitq); } - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { spin_lock(&cfs_wi_data.wi_glock); while (sched->ws_nthreads != 0) { @@ -541,10 +547,10 @@ cfs_wi_shutdown (void) spin_unlock(&cfs_wi_data.wi_glock); } #endif - while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) { - sched = cfs_list_entry(cfs_wi_data.wi_scheds.next, + while (!list_empty(&cfs_wi_data.wi_scheds)) { + sched = list_entry(cfs_wi_data.wi_scheds.next, struct cfs_wi_sched, ws_list); - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); LIBCFS_FREE(sched, sizeof(*sched)); }