lustre_msg_set_service_time(req->rq_repmsg, service_time);
/* Report service time estimate for future client reqs */
lustre_msg_set_timeout(req->rq_repmsg, at_get(&svc->srv_at_estimate));
-
+
+ if (req->rq_export && req->rq_export->exp_obd)
+ target_pack_pool_reply(req);
+
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_AT_SUPPORT) {
/* early replies go to offset 0, regular replies go after that*/
if (flags & PTLRPC_REPLY_EARLY) {
lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
}
- if (req->rq_export && req->rq_export->exp_obd)
- target_pack_pool_reply(req);
-
if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
conn = ptlrpc_get_connection(req->rq_peer, req->rq_self, NULL);
else
spin_unlock(&svc->srv_at_lock);
+ /* we have a new earliest deadline, restart the timer */
+ ptlrpc_at_set_timer(svc);
+
CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
"replies\n", first, at_extra, counter);
if (first < 0)
/* ptlrpc_server_free_request may delete an entry out of the work
list */
- counter = 0;
spin_lock(&svc->srv_at_lock);
while (!list_empty(&work_list)) {
rq = list_entry(work_list.next, struct ptlrpc_request,
deleted, and is safe to take a ref to keep the req around */
atomic_inc(&rq->rq_refcount);
spin_unlock(&svc->srv_at_lock);
- if (ptlrpc_at_send_early_reply(rq, at_extra) == 0) {
- counter++;
+
+ if (ptlrpc_at_send_early_reply(rq, at_extra) == 0)
ptlrpc_at_add_timed(rq);
- }
+
ptlrpc_server_req_decref(rq);
spin_lock(&svc->srv_at_lock);
}
spin_unlock(&svc->srv_at_lock);
- if (!counter)
- /* Nothing added to timed list, so we have to kick the timer
- ourselves. */
- ptlrpc_at_set_timer(svc);
-
RETURN(0);
}