+ spin_lock(&imp->imp_lock);
+
+ /* the very first time we assign XID. it's important to assign XID
+ * and put it on the list atomically, so that the lowest assigned
+ * XID is always known. this is vital for multislot last_rcvd */
+ if (req->rq_send_state == LUSTRE_IMP_REPLAY) {
+ LASSERT(req->rq_xid != 0);
+ } else {
+ LASSERT(req->rq_xid == 0);
+ req->rq_xid = ptlrpc_next_xid();
+ }
+
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
+
+ if (ptlrpc_import_delay_req(imp, req, &rc)) {
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+ "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+ RETURN(0);
+ }
+
+ if (rc != 0) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_status = rc;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ RETURN(rc);
+ }
+
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ atomic_inc(&req->rq_import->imp_inflight);
+
+ /* find the lowest unreplied XID */
+ list_for_each(tmp, &imp->imp_delayed_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
+ list_for_each(tmp, &imp->imp_sending_list) {
+ struct ptlrpc_request *r;
+ r = list_entry(tmp, struct ptlrpc_request, rq_list);
+ if (r->rq_xid < min_xid)
+ min_xid = r->rq_xid;
+ }
+ spin_unlock(&imp->imp_lock);
+
+ if (likely(min_xid != ~0ULL))
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1);
+
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());