*/
int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
- cfs_list_t *tmp, *next;
+ cfs_list_t *tmp, *next;
+ cfs_list_t comp_reqs;
int force_timer_recalc = 0;
ENTRY;
if (cfs_atomic_read(&set->set_remaining) == 0)
RETURN(1);
+ CFS_INIT_LIST_HEAD(&comp_reqs);
cfs_list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
cfs_list_entry(tmp, struct ptlrpc_request,
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
- if (req->rq_phase == RQ_PHASE_COMPLETE)
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ cfs_list_move_tail(&req->rq_set_chain, &comp_reqs);
continue;
+ }
if (req->rq_phase == RQ_PHASE_INTERPRET)
GOTO(interpret, req->rq_status);
if (req->rq_status != 0)
set->set_rc = req->rq_status;
ptlrpc_req_finished(req);
+ } else {
+ cfs_list_move_tail(&req->rq_set_chain, &comp_reqs);
}
}
+ /* move completed request at the head of list so it's easier for
+ * caller to find them */
+ cfs_list_splice(&comp_reqs, &set->set_requests);
/* If we hit an error, we want to recover promptly. */
RETURN(cfs_atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
}
if (cfs_atomic_read(&set->set_remaining))
rc |= ptlrpc_check_set(env, set);
- if (!cfs_list_empty(&set->set_requests)) {
- /*
- * XXX: our set never completes, so we prune the completed
- * reqs after each iteration. boy could this be smarter.
- */
- cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
- req = cfs_list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
- if (req->rq_phase != RQ_PHASE_COMPLETE)
- continue;
-
- cfs_list_del_init(&req->rq_set_chain);
- req->rq_set = NULL;
- ptlrpc_req_finished(req);
- }
- }
+ /* NB: ptlrpc_check_set has already moved completed request at the
+ * head of seq::set_requests */
+ cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
+ req = cfs_list_entry(pos, struct ptlrpc_request, rq_set_chain);
+ if (req->rq_phase != RQ_PHASE_COMPLETE)
+ break;
+
+ cfs_list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
+ }
if (rc == 0) {
/*