cfs_waitq_init(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
- desc->bd_md_h = LNET_INVALID_HANDLE;
+ LNetInvalidateHandle(&desc->bd_md_h);
desc->bd_portal = portal;
desc->bd_type = type;
idx = import_at_get_index(req->rq_import,
req->rq_request_portal);
serv_est = at_get(&at->iat_service_estimate[idx]);
- /* add an arbitrary minimum: 125% +5 sec */
- req->rq_timeout = serv_est + (serv_est >> 2) + 5;
+ req->rq_timeout = at_est2timeout(serv_est);
/* We could get even fancier here, using history to predict increased
loading... */
unsigned int oldse;
struct imp_at *at;
+ /* do estimate only if is not in recovery */
+ if (!(req->rq_send_state & (LUSTRE_IMP_FULL | LUSTRE_IMP_CONNECTING)))
+ return;
+
LASSERT(req->rq_import);
at = &req->rq_import->imp_at;
if (req->rq_phase == RQ_PHASE_NEW) {
if (req->rq_interpret_reply != NULL) {
- int (*interpreter)(struct ptlrpc_request *,
- void *, int) =
+ ptlrpc_interpterer_t interpreter =
req->rq_interpret_reply;
/* higher level (i.e. LOV) failed;
* let the sub reqs clean up */
req->rq_status = -EBADR;
- interpreter(req, &req->rq_async_args,
+ interpreter(NULL, req, &req->rq_async_args,
req->rq_status);
}
set->set_remaining--;
}
/* this sends any unsent RPCs in @set and returns TRUE if all are sent */
-int ptlrpc_check_set(struct ptlrpc_request_set *set)
+int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp;
int force_timer_recalc = 0;
spin_unlock(&imp->imp_lock);
req->rq_waiting = 0;
- if (req->rq_resend) {
+
+ if (req->rq_timedout||req->rq_resend) {
+ /* This is re-sending anyways,
+ * let's mark req as resend. */
+ req->rq_resend = 1;
lustre_msg_add_flags(req->rq_reqmsg,
MSG_RESENT);
if (req->rq_bulk) {
ptlrpc_unregister_bulk (req);
if (req->rq_interpret_reply != NULL) {
- int (*interpreter)(struct ptlrpc_request *,void *,int) =
+ ptlrpc_interpterer_t interpreter =
req->rq_interpret_reply;
- req->rq_status = interpreter(req, &req->rq_async_args,
+ req->rq_status = interpreter(NULL, req,
+ &req->rq_async_args,
req->rq_status);
}
req->rq_phase = RQ_PHASE_COMPLETE;
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout ? timeout : 1),
ptlrpc_expired_set,
ptlrpc_interrupted_set, set);
- rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
+ rc = l_wait_event(set->set_waitq,
+ ptlrpc_check_set(NULL, set), &lwi);
LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
int praa_old_status;
};
-static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
+static int ptlrpc_replay_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
void * data, int rc)
{
struct ptlrpc_replay_async_args *aa = data;