X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Fworkitem.c;h=722b25dea859ff72fa730e3e5ddb6b1029c5e384;hb=4df63615669a69b51c752cc4e416f705f8a56197;hp=ecbe855d3411c9418531538adbd85452adf95f06;hpb=a3850a044ef7cc2658b355672338b9865ed59fe4;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/workitem.c b/libcfs/libcfs/workitem.c index ecbe855..722b25d 100644 --- a/libcfs/libcfs/workitem.c +++ b/libcfs/libcfs/workitem.c @@ -46,21 +46,21 @@ #define CFS_WS_NAME_LEN 16 typedef struct cfs_wi_sched { - cfs_list_t ws_list; /* chain on global list */ + struct list_head ws_list; /* chain on global list */ #ifdef __KERNEL__ /** serialised workitems */ - spinlock_t ws_lock; + spinlock_t ws_lock; /** where schedulers sleep */ wait_queue_head_t ws_waitq; #endif /** concurrent workitems */ - cfs_list_t ws_runq; + struct list_head ws_runq; /** rescheduled running-workitems, a workitem can be rescheduled * while running in wi_action(), but we don't to execute it again * unless it returns from wi_action(), so we put it on ws_rerunq * while rescheduling, and move it to runq after it returns * from wi_action() */ - cfs_list_t ws_rerunq; + struct list_head ws_rerunq; /** CPT-table for this scheduler */ struct cfs_cpt_table *ws_cptab; /** CPT id for affinity */ @@ -81,7 +81,7 @@ struct cfs_workitem_data { /** serialize */ spinlock_t wi_glock; /** list of all schedulers */ - cfs_list_t wi_scheds; + struct list_head wi_scheds; /** WI module is initialized */ int wi_init; /** shutting down the whole WI module */ @@ -106,16 +106,16 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) { cfs_wi_sched_lock(sched); if (sched->ws_stopping) { - cfs_wi_sched_unlock(sched); - return 0; - } + cfs_wi_sched_unlock(sched); + return 0; + } - if (!cfs_list_empty(&sched->ws_runq)) { - cfs_wi_sched_unlock(sched); - return 0; - } - cfs_wi_sched_unlock(sched); - return 1; + if (!list_empty(&sched->ws_runq)) { + cfs_wi_sched_unlock(sched); + return 0; + } + cfs_wi_sched_unlock(sched); + return 1; } #else /* !__KERNEL__ */ @@ -141,7 +141,7 @@ cfs_wi_sched_unlock(cfs_wi_sched_t *sched) void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); cfs_wi_sched_lock(sched); @@ -150,14 +150,14 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) LASSERT(wi->wi_running); #endif if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; } - LASSERT(cfs_list_empty(&wi->wi_list)); + LASSERT(list_empty(&wi->wi_list)); wi->wi_scheduled = 1; /* LBUG future schedule attempts */ cfs_wi_sched_unlock(sched); @@ -174,7 +174,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { int rc; - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); /* @@ -187,19 +187,19 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) rc = !(wi->wi_running); if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!cfs_list_empty(&wi->wi_list)); - cfs_list_del_init(&wi->wi_list); + LASSERT(!list_empty(&wi->wi_list)); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; - wi->wi_scheduled = 0; - } + wi->wi_scheduled = 0; + } - LASSERT (cfs_list_empty(&wi->wi_list)); + LASSERT (list_empty(&wi->wi_list)); - cfs_wi_sched_unlock(sched); - return rc; + cfs_wi_sched_unlock(sched); + return rc; } EXPORT_SYMBOL(cfs_wi_deschedule); @@ -213,27 +213,27 @@ EXPORT_SYMBOL(cfs_wi_deschedule); void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) { - LASSERT(!cfs_in_interrupt()); /* because we use plain spinlock */ + LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!sched->ws_stopping); cfs_wi_sched_lock(sched); if (!wi->wi_scheduled) { - LASSERT (cfs_list_empty(&wi->wi_list)); + LASSERT (list_empty(&wi->wi_list)); wi->wi_scheduled = 1; sched->ws_nscheduled++; if (!wi->wi_running) { - cfs_list_add_tail(&wi->wi_list, &sched->ws_runq); + list_add_tail(&wi->wi_list, &sched->ws_runq); #ifdef __KERNEL__ wake_up(&sched->ws_waitq); #endif } else { - cfs_list_add(&wi->wi_list, &sched->ws_rerunq); + list_add(&wi->wi_list, &sched->ws_rerunq); } } - LASSERT (!cfs_list_empty(&wi->wi_list)); + LASSERT (!list_empty(&wi->wi_list)); cfs_wi_sched_unlock(sched); return; } @@ -265,17 +265,17 @@ cfs_wi_scheduler (void *arg) cfs_wi_sched_lock(sched); while (!sched->ws_stopping) { - int nloops = 0; - int rc; - cfs_workitem_t *wi; - - while (!cfs_list_empty(&sched->ws_runq) && - nloops < CFS_WI_RESCHED) { - wi = cfs_list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); + int nloops = 0; + int rc; + cfs_workitem_t *wi; + + while (!list_empty(&sched->ws_runq) && + nloops < CFS_WI_RESCHED) { + wi = list_entry(sched->ws_runq.next, + cfs_workitem_t, wi_list); LASSERT(wi->wi_scheduled && !wi->wi_running); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -293,17 +293,17 @@ cfs_wi_scheduler (void *arg) if (rc != 0) /* WI should be dead, even be freed! */ continue; - wi->wi_running = 0; - if (cfs_list_empty(&wi->wi_list)) + wi->wi_running = 0; + if (list_empty(&wi->wi_list)) continue; LASSERT(wi->wi_scheduled); - /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ - cfs_list_move_tail(&wi->wi_list, &sched->ws_runq); + /* wi is rescheduled, should be on rerunq now, we + * move it to runq so it can run action now */ + list_move_tail(&wi->wi_list, &sched->ws_runq); } - if (!cfs_list_empty(&sched->ws_runq)) { + if (!list_empty(&sched->ws_runq)) { cfs_wi_sched_unlock(sched); /* don't sleep because some workitems still * expect me to come back soon */ @@ -342,9 +342,8 @@ cfs_wi_check_events (void) struct cfs_wi_sched *tmp; /** rerunq is always empty for userspace */ - cfs_list_for_each_entry(tmp, - &cfs_wi_data.wi_scheds, ws_list) { - if (!cfs_list_empty(&tmp->ws_runq)) { + list_for_each_entry(tmp, &cfs_wi_data.wi_scheds, ws_list) { + if (!list_empty(&tmp->ws_runq)) { sched = tmp; break; } @@ -353,9 +352,9 @@ cfs_wi_check_events (void) if (sched == NULL) break; - wi = cfs_list_entry(sched->ws_runq.next, + wi = list_entry(sched->ws_runq.next, cfs_workitem_t, wi_list); - cfs_list_del_init(&wi->wi_list); + list_del_init(&wi->wi_list); LASSERT(sched->ws_nscheduled > 0); sched->ws_nscheduled--; @@ -390,7 +389,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) return; } - LASSERT(!cfs_list_empty(&sched->ws_list)); + LASSERT(!list_empty(&sched->ws_list)); sched->ws_stopping = 1; spin_unlock(&cfs_wi_data.wi_glock); @@ -414,7 +413,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) } } - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); spin_unlock(&cfs_wi_data.wi_glock); #endif @@ -439,7 +438,12 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, if (sched == NULL) return -ENOMEM; - strncpy(sched->ws_name, name, CFS_WS_NAME_LEN); + if (strlen(name) > sizeof(sched->ws_name)-1) { + LIBCFS_FREE(sched, sizeof(*sched)); + return -E2BIG; + } + strlcpy(sched->ws_name, name, sizeof(sched->ws_name)); + sched->ws_cptab = cptab; sched->ws_cpt = cpt; @@ -447,9 +451,9 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, spin_lock_init(&sched->ws_lock); init_waitqueue_head(&sched->ws_waitq); #endif - CFS_INIT_LIST_HEAD(&sched->ws_runq); - CFS_INIT_LIST_HEAD(&sched->ws_rerunq); - CFS_INIT_LIST_HEAD(&sched->ws_list); + INIT_LIST_HEAD(&sched->ws_runq); + INIT_LIST_HEAD(&sched->ws_rerunq); + INIT_LIST_HEAD(&sched->ws_list); #ifdef __KERNEL__ for (; nthrs > 0; nthrs--) { @@ -485,7 +489,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, spin_lock(&cfs_wi_data.wi_glock); /* make up for cfs_wi_sched_destroy */ - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); sched->ws_starting--; spin_unlock(&cfs_wi_data.wi_glock); @@ -496,7 +500,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, } #endif spin_lock(&cfs_wi_data.wi_glock); - cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); + list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); spin_unlock(&cfs_wi_data.wi_glock); *sched_pp = sched; @@ -510,7 +514,7 @@ cfs_wi_startup(void) memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); spin_lock_init(&cfs_wi_data.wi_glock); - CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); + INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); cfs_wi_data.wi_init = 1; return 0; @@ -527,12 +531,12 @@ cfs_wi_shutdown (void) #ifdef __KERNEL__ /* nobody should contend on this list */ - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { sched->ws_stopping = 1; wake_up_all(&sched->ws_waitq); } - cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { + list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { spin_lock(&cfs_wi_data.wi_glock); while (sched->ws_nthreads != 0) { @@ -543,10 +547,10 @@ cfs_wi_shutdown (void) spin_unlock(&cfs_wi_data.wi_glock); } #endif - while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) { - sched = cfs_list_entry(cfs_wi_data.wi_scheds.next, + while (!list_empty(&cfs_wi_data.wi_scheds)) { + sched = list_entry(cfs_wi_data.wi_scheds.next, struct cfs_wi_sched, ws_list); - cfs_list_del(&sched->ws_list); + list_del(&sched->ws_list); LIBCFS_FREE(sched, sizeof(*sched)); }