if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
return &ptlrpcd_rcv;
- cpt = cfs_cpt_current(cfs_cpt_table, 1);
+ cpt = cfs_cpt_current(cfs_cpt_tab, 1);
if (ptlrpcds_cpt_idx == NULL)
idx = cpt;
else
static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
struct ptlrpc_request_set *src)
{
- struct list_head *tmp, *pos;
struct ptlrpc_request *req;
int rc = 0;
spin_lock(&src->set_new_req_lock);
if (likely(!list_empty(&src->set_new_requests))) {
- list_for_each_safe(pos, tmp, &src->set_new_requests) {
- req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_for_each_entry(req, &src->set_new_requests, rq_set_chain)
req->rq_set = des;
- }
+
list_splice_init(&src->set_new_requests,
&des->set_requests);
rc = atomic_read(&src->set_new_count);
spin_lock(&req->rq_lock);
if (req->rq_invalid_rqset) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
- back_to_sleep, NULL);
-
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+ if (wait_event_idle_timeout(req->rq_set_waitq,
+ req->rq_set == NULL,
+ cfs_time_seconds(5)) == 0)
+ l_wait_event_abortable(req->rq_set_waitq,
+ req->rq_set == NULL);
} else if (req->rq_set) {
/*
* If we have a vaid "rq_set", just reuse it to avoid double
*/
static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
{
- struct list_head *tmp, *pos;
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req, *tmp;
struct ptlrpc_request_set *set = pc->pc_set;
int rc = 0;
int rc2;
* NB: ptlrpc_check_set has already moved complted request at the
* head of seq::set_requests
*/
- list_for_each_safe(pos, tmp, &set->set_requests) {
- req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) {
if (req->rq_phase != RQ_PHASE_COMPLETE)
break;
int exit = 0;
ENTRY;
-
- unshare_fs_struct();
-
- if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
+ if (cfs_cpt_bind(cfs_cpt_tab, pc->pc_cpt) != 0)
CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
/*
* new_req_list and ptlrpcd_check() moves them into the set.
*/
do {
- struct l_wait_info lwi;
time64_t timeout;
timeout = ptlrpc_set_next_timeout(set);
- lwi = LWI_TIMEOUT(cfs_time_seconds(timeout),
- ptlrpc_expired_set, set);
lu_context_enter(&env.le_ctx);
lu_context_enter(env.le_ses);
- l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
+ if (timeout == 0)
+ wait_event_idle(set->set_waitq,
+ ptlrpcd_check(&env, pc));
+ else if (wait_event_idle_timeout(set->set_waitq,
+ ptlrpcd_check(&env, pc),
+ cfs_time_seconds(timeout))
+ == 0)
+ ptlrpc_expired_set(set);
lu_context_exit(&env.le_ctx);
lu_context_exit(env.le_ses);
if (pc->pc_npartners <= 0)
GOTO(out, rc);
- OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_table, pc->pc_cpt,
+ OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_tab, pc->pc_cpt,
sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
if (pc->pc_partners == NULL) {
pc->pc_npartners = 0;
RETURN(0);
}
- task = kthread_run(ptlrpcd, pc, pc->pc_name);
+ task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
if (IS_ERR(task))
GOTO(out_set, rc = PTR_ERR(task));
if (pc->pc_npartners > 0) {
LASSERT(pc->pc_partners != NULL);
- OBD_FREE(pc->pc_partners,
- sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
+ OBD_FREE_PTR_ARRAY(pc->pc_partners, pc->pc_npartners);
pc->pc_partners = NULL;
}
pc->pc_npartners = 0;
OBD_FREE(ptlrpcds[i], ptlrpcds[i]->pd_size);
ptlrpcds[i] = NULL;
}
- OBD_FREE(ptlrpcds, sizeof(ptlrpcds[0]) * ptlrpcds_num);
+ OBD_FREE_PTR_ARRAY(ptlrpcds, ptlrpcds_num);
}
ptlrpcds_num = 0;
ptlrpcd_free(&ptlrpcd_rcv);
if (ptlrpcds_cpt_idx != NULL) {
- ncpts = cfs_cpt_number(cfs_cpt_table);
- OBD_FREE(ptlrpcds_cpt_idx, ncpts * sizeof(ptlrpcds_cpt_idx[0]));
+ ncpts = cfs_cpt_number(cfs_cpt_tab);
+ OBD_FREE_PTR_ARRAY(ptlrpcds_cpt_idx, ncpts);
ptlrpcds_cpt_idx = NULL;
}
/*
* Determine the CPTs that ptlrpcd threads will run on.
*/
- cptable = cfs_cpt_table;
+ cptable = cfs_cpt_tab;
ncpts = cfs_cpt_number(cptable);
if (ptlrpcd_cpts != NULL) {
struct cfs_expr_list *el;