* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
cfs_list_t ws_list; /* chain on global list */
#ifdef __KERNEL__
/** serialised workitems */
- cfs_spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
cfs_waitq_t ws_waitq;
#endif
struct cfs_workitem_data {
/** serialize */
- cfs_spinlock_t wi_glock;
+ spinlock_t wi_glock;
/** list of all schedulers */
cfs_list_t wi_scheds;
/** WI module is initialized */
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&sched->ws_lock);
+ spin_lock(&sched->ws_lock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&sched->ws_lock);
+ spin_unlock(&sched->ws_lock);
}
static inline int
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif /* __KERNEL__ */
cfs_wi_scheduler (void *arg)
{
struct cfs_wi_sched *sched = (cfs_wi_sched_t *)arg;
- char name[16];
-
- if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
- snprintf(name, sizeof(name), "%s_%02d_%02d",
- sched->ws_name, sched->ws_cpt, sched->ws_nthreads);
- } else {
- snprintf(name, sizeof(name), "%s_%02d",
- sched->ws_name, sched->ws_nthreads);
- }
- cfs_daemonize(name);
cfs_block_allsigs();
/* CPT affinity scheduler? */
if (sched->ws_cptab != NULL)
- cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
+ if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
+ CWARN("Failed to bind %s on CPT %d\n",
+ sched->ws_name, sched->ws_cpt);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
LASSERT(sched->ws_starting == 1);
sched->ws_starting--;
sched->ws_nthreads++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_wi_sched_lock(sched);
continue;
}
- cfs_wi_sched_unlock(sched);
- cfs_wait_event_interruptible_exclusive(sched->ws_waitq,
- !cfs_wi_sched_cansleep(sched), rc);
- cfs_wi_sched_lock(sched);
+ cfs_wi_sched_unlock(sched);
+ rc = wait_event_interruptible_exclusive(sched->ws_waitq,
+ !cfs_wi_sched_cansleep(sched));
+ cfs_wi_sched_lock(sched);
}
cfs_wi_sched_unlock(sched);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
sched->ws_nthreads--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
- return 0;
+ return 0;
}
#else /* __KERNEL__ */
int
cfs_wi_check_events (void)
{
- int n = 0;
- cfs_workitem_t *wi;
+ int n = 0;
+ cfs_workitem_t *wi;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
- for (;;) {
+ for (;;) {
struct cfs_wi_sched *sched = NULL;
struct cfs_wi_sched *tmp;
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- LASSERT (wi->wi_scheduled);
- wi->wi_scheduled = 0;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ LASSERT(wi->wi_scheduled);
+ wi->wi_scheduled = 0;
+ spin_unlock(&cfs_wi_data.wi_glock);
- n++;
- (*wi->wi_action) (wi);
+ n++;
+ (*wi->wi_action) (wi);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
- }
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
- return n;
+ spin_unlock(&cfs_wi_data.wi_glock);
+ return n;
}
#endif
void
cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
{
- int i;
-
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
if (sched->ws_stopping) {
CDEBUG(D_INFO, "%s is in progress of stopping\n",
sched->ws_name);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
return;
}
LASSERT(!cfs_list_empty(&sched->ws_list));
sched->ws_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
- i = 2;
#ifdef __KERNEL__
cfs_waitq_broadcast(&sched->ws_waitq);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
- while (sched->ws_nthreads > 0) {
- CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
- "waiting for %d threads of WI sched[%s] to terminate\n",
- sched->ws_nthreads, sched->ws_name);
+ spin_lock(&cfs_wi_data.wi_glock);
+ {
+ int i = 2;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
- cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ while (sched->ws_nthreads > 0) {
+ CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
+ "waiting for %d threads of WI sched[%s] to "
+ "terminate\n", sched->ws_nthreads,
+ sched->ws_name);
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+ cfs_pause(cfs_time_seconds(1) / 20);
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
}
cfs_list_del(&sched->ws_list);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#endif
LASSERT(sched->ws_nscheduled == 0);
int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
{
struct cfs_wi_sched *sched;
- int rc;
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
sched->ws_cpt = cpt;
#ifdef __KERNEL__
- cfs_spin_lock_init(&sched->ws_lock);
+ spin_lock_init(&sched->ws_lock);
cfs_waitq_init(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
CFS_INIT_LIST_HEAD(&sched->ws_list);
- rc = 0;
#ifdef __KERNEL__
- while (nthrs > 0) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ for (; nthrs > 0; nthrs--) {
+ char name[16];
+ cfs_task_t *task;
+
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_schedule();
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
sched->ws_starting++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
-
- rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
- if (rc >= 0) {
- nthrs--;
- continue;
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
+ snprintf(name, sizeof(name), "%s_%02d_%02d",
+ sched->ws_name, sched->ws_cpt,
+ sched->ws_nthreads);
+ } else {
+ snprintf(name, sizeof(name), "%s_%02d",
+ sched->ws_name, sched->ws_nthreads);
}
- CERROR("Failed to create thread for WI scheduler %s: %d\n",
- name, rc);
+ task = kthread_run(cfs_wi_scheduler, sched, name);
+ if (IS_ERR(task)) {
+ int rc = PTR_ERR(task);
+
+ CERROR("Failed to create thread for "
+ "WI scheduler %s: %d\n", name, rc);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
- /* make up for cfs_wi_sched_destroy */
- cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- sched->ws_starting--;
+ /* make up for cfs_wi_sched_destroy */
+ cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
+ sched->ws_starting--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
- cfs_wi_sched_destroy(sched);
- return rc;
+ cfs_wi_sched_destroy(sched);
+ return rc;
+ }
}
#endif
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
*sched_pp = sched;
return 0;
{
memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
- cfs_spin_lock_init(&cfs_wi_data.wi_glock);
+ spin_lock_init(&cfs_wi_data.wi_glock);
CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
cfs_wi_data.wi_init = 1;
{
struct cfs_wi_sched *sched;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_wi_data.wi_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
/* nobody should contend on this list */
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads != 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif
while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {