cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
#ifdef __KERNEL__
- req->rq_set = new;
- req->rq_queued_time = cfs_time_current();
+ req->rq_set = new;
+ req->rq_queued_time = cfs_time_current();
#else
- cfs_list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- cfs_atomic_dec(&set->set_remaining);
+ cfs_list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ atomic_dec(&set->set_remaining);
#endif
- }
+ }
#ifdef __KERNEL__
spin_lock(&new->set_new_req_lock);
cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
- i = cfs_atomic_read(&set->set_remaining);
- count = cfs_atomic_add_return(i, &new->set_new_count);
- cfs_atomic_set(&set->set_remaining, 0);
+ i = atomic_read(&set->set_remaining);
+ count = atomic_add_return(i, &new->set_new_count);
+ atomic_set(&set->set_remaining, 0);
spin_unlock(&new->set_new_req_lock);
if (count == i) {
wake_up(&new->set_waitq);
}
cfs_list_splice_init(&src->set_new_requests,
&des->set_requests);
- rc = cfs_atomic_read(&src->set_new_count);
- cfs_atomic_add(rc, &des->set_remaining);
- cfs_atomic_set(&src->set_new_count, 0);
- }
+ rc = atomic_read(&src->set_new_count);
+ atomic_add(rc, &des->set_remaining);
+ atomic_set(&src->set_new_count, 0);
+ }
spin_unlock(&src->set_new_req_lock);
return rc;
}
*/
void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
{
- struct ptlrpcd_ctl *pc;
+ struct ptlrpcd_ctl *pc;
if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
spin_lock(&req->rq_lock);
- if (req->rq_invalid_rqset) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
- back_to_sleep, NULL);
+ if (req->rq_invalid_rqset) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
+ back_to_sleep, NULL);
- req->rq_invalid_rqset = 0;
+ req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
- } else if (req->rq_set) {
- /* If we have a vaid "rq_set", just reuse it to avoid double
- * linked. */
- LASSERT(req->rq_phase == RQ_PHASE_NEW);
- LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
-
- /* ptlrpc_check_set will decrease the count */
- cfs_atomic_inc(&req->rq_set->set_remaining);
+ l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+ } else if (req->rq_set) {
+ /* If we have a vaid "rq_set", just reuse it to avoid double
+ * linked. */
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
+
+ /* ptlrpc_check_set will decrease the count */
+ atomic_inc(&req->rq_set->set_remaining);
spin_unlock(&req->rq_lock);
wake_up(&req->rq_set->set_waitq);
return;
} else {
spin_unlock(&req->rq_lock);
- }
+ }
- pc = ptlrpcd_select_pc(req, policy, idx);
+ pc = ptlrpcd_select_pc(req, policy, idx);
- DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
- req, pc->pc_name, pc->pc_index);
+ DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
+ req, pc->pc_name, pc->pc_index);
- ptlrpc_set_add_new_req(pc, req);
+ ptlrpc_set_add_new_req(pc, req);
}
EXPORT_SYMBOL(ptlrpcd_add_req);
static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
{
- cfs_atomic_inc(&set->set_refcount);
+ atomic_inc(&set->set_refcount);
}
/**
int rc2;
ENTRY;
- if (cfs_atomic_read(&set->set_new_count)) {
+ if (atomic_read(&set->set_new_count)) {
spin_lock(&set->set_new_req_lock);
- if (likely(!cfs_list_empty(&set->set_new_requests))) {
- cfs_list_splice_init(&set->set_new_requests,
- &set->set_requests);
- cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
- &set->set_remaining);
- cfs_atomic_set(&set->set_new_count, 0);
- /*
- * Need to calculate its timeout.
- */
- rc = 1;
- }
+ if (likely(!cfs_list_empty(&set->set_new_requests))) {
+ cfs_list_splice_init(&set->set_new_requests,
+ &set->set_requests);
+ atomic_add(atomic_read(&set->set_new_count),
+ &set->set_remaining);
+ atomic_set(&set->set_new_count, 0);
+ /*
+ * Need to calculate its timeout.
+ */
+ rc = 1;
+ }
spin_unlock(&set->set_new_req_lock);
- }
+ }
- /* We should call lu_env_refill() before handling new requests to make
- * sure that env key the requests depending on really exists.
- */
- rc2 = lu_env_refill(env);
- if (rc2 != 0) {
- /*
- * XXX This is very awkward situation, because
- * execution can neither continue (request
- * interpreters assume that env is set up), nor repeat
- * the loop (as this potentially results in a tight
- * loop of -ENOMEM's).
- *
- * Fortunately, refill only ever does something when
- * new modules are loaded, i.e., early during boot up.
- */
- CERROR("Failure to refill session: %d\n", rc2);
- RETURN(rc);
- }
+ /* We should call lu_env_refill() before handling new requests to make
+ * sure that env key the requests depending on really exists.
+ */
+ rc2 = lu_env_refill(env);
+ if (rc2 != 0) {
+ /*
+ * XXX This is very awkward situation, because
+ * execution can neither continue (request
+ * interpreters assume that env is set up), nor repeat
+ * the loop (as this potentially results in a tight
+ * loop of -ENOMEM's).
+ *
+ * Fortunately, refill only ever does something when
+ * new modules are loaded, i.e., early during boot up.
+ */
+ CERROR("Failure to refill session: %d\n", rc2);
+ RETURN(rc);
+ }
- if (cfs_atomic_read(&set->set_remaining))
- rc |= ptlrpc_check_set(env, set);
+ if (atomic_read(&set->set_remaining))
+ rc |= ptlrpc_check_set(env, set);
if (!cfs_list_empty(&set->set_requests)) {
/*
}
}
- if (rc == 0) {
- /*
- * If new requests have been added, make sure to wake up.
- */
- rc = cfs_atomic_read(&set->set_new_count);
+ if (rc == 0) {
+ /*
+ * If new requests have been added, make sure to wake up.
+ */
+ rc = atomic_read(&set->set_new_count);
#ifdef __KERNEL__
/* If we have nothing to do, check whether we can take some
ptlrpc_reqset_get(ps);
spin_unlock(&partner->pc_lock);
- if (cfs_atomic_read(&ps->set_new_count)) {
- rc = ptlrpcd_steal_rqset(set, ps);
- if (rc > 0)
- CDEBUG(D_RPCTRACE, "transfer %d"
- " async RPCs [%d->%d]\n",
- rc, partner->pc_index,
- pc->pc_index);
- }
- ptlrpc_reqset_put(ps);
- } while (rc == 0 && pc->pc_cursor != first);
- }
+ if (atomic_read(&ps->set_new_count)) {
+ rc = ptlrpcd_steal_rqset(set, ps);
+ if (rc > 0)
+ CDEBUG(D_RPCTRACE, "transfer %d"
+ " async RPCs [%d->%d]\n",
+ rc, partner->pc_index,
+ pc->pc_index);
+ }
+ ptlrpc_reqset_put(ps);
+ } while (rc == 0 && pc->pc_cursor != first);
+ }
#endif
- }
+ }
- RETURN(rc);
+ RETURN(rc);
}
#ifdef __KERNEL__
*/
static int ptlrpcd(void *arg)
{
- struct ptlrpcd_ctl *pc = arg;
- struct ptlrpc_request_set *set = pc->pc_set;
- struct lu_env env = { .le_ses = NULL };
- int rc, exit = 0;
- ENTRY;
+ struct ptlrpcd_ctl *pc = arg;
+ struct ptlrpc_request_set *set = pc->pc_set;
+ struct lu_context ses = { 0 };
+ struct lu_env env = { .le_ses = &ses };
+ int rc, exit = 0;
+ ENTRY;
unshare_fs_struct();
#if defined(CONFIG_SMP)
if (++index >= num_possible_cpus())
index = 0;
}
- set_cpus_allowed_ptr(cfs_current(),
+ set_cpus_allowed_ptr(current,
cpumask_of_node(cpu_to_node(index)));
}
}
#endif
- /*
- * XXX So far only "client" ptlrpcd uses an environment. In
- * the future, ptlrpcd thread (or a thread-set) has to given
- * an argument, describing its "scope".
- */
- rc = lu_context_init(&env.le_ctx,
- LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ /* Both client and server (MDT/OST) may use the environment. */
+ rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
+ LCT_CL_THREAD | LCT_REMEMBER |
+ LCT_NOREF);
+ if (rc == 0) {
+ rc = lu_context_init(env.le_ses,
+ LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
+ if (rc != 0)
+ lu_context_fini(&env.le_ctx);
+ }
complete(&pc->pc_starting);
if (rc != 0)
lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
ptlrpc_expired_set, set);
- lu_context_enter(&env.le_ctx);
- l_wait_event(set->set_waitq,
- ptlrpcd_check(&env, pc), &lwi);
- lu_context_exit(&env.le_ctx);
+ lu_context_enter(&env.le_ctx);
+ lu_context_enter(env.le_ses);
+ l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
+ lu_context_exit(&env.le_ctx);
+ lu_context_exit(env.le_ses);
- /*
- * Abort inflight rpcs for forced stop case.
- */
+ /*
+ * Abort inflight rpcs for forced stop case.
+ */
if (test_bit(LIOD_STOP, &pc->pc_flags)) {
if (test_bit(LIOD_FORCE, &pc->pc_flags))
ptlrpc_abort_set(set);
*/
if (!cfs_list_empty(&set->set_requests))
ptlrpc_set_wait(set);
- lu_context_fini(&env.le_ctx);
+ lu_context_fini(&env.le_ctx);
+ lu_context_fini(env.le_ses);
complete(&pc->pc_finishing);
- return 0;
+ return 0;
}
/* XXX: We want multiple CPU cores to share the async RPC load. So we start many
int ptlrpcd_idle(void *arg)
{
- struct ptlrpcd_ctl *pc = arg;
+ struct ptlrpcd_ctl *pc = arg;
- return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
- cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
+ return (atomic_read(&pc->pc_set->set_new_count) == 0 &&
+ atomic_read(&pc->pc_set->set_remaining) == 0);
}
#endif
init_completion(&pc->pc_starting);
init_completion(&pc->pc_finishing);
spin_lock_init(&pc->pc_lock);
- strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
+ strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc = -ENOMEM);
GOTO(out_set, rc);
{
- cfs_task_t *task;
+ struct task_struct *task;
if (index >= 0) {
rc = ptlrpcd_bind(index, max);
if (rc < 0)