*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/**
* Return PortalRPC connection for remore uud \a uuid
*/
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
+ lnet_nid_t nid4refnet)
{
- struct ptlrpc_connection *c;
- lnet_nid_t self;
- lnet_process_id_t peer;
- int err;
+ struct ptlrpc_connection *c;
+ lnet_nid_t self;
+ struct lnet_process_id peer;
+ int err;
/* ptlrpc_uuid_to_peer() initializes its 2nd parameter
* before accessing its values. */
/* coverity[uninit_use_in_call] */
- err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
- if (err != 0) {
- CNETERR("cannot find peer %s!\n", uuid->uuid);
- return NULL;
- }
+ peer.nid = nid4refnet;
+ err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
+ if (err != 0) {
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
+ return NULL;
+ }
- c = ptlrpc_connection_get(peer, self, uuid);
- if (c) {
- memcpy(c->c_remote_uuid.uuid,
- uuid->uuid, sizeof(c->c_remote_uuid.uuid));
- }
+ c = ptlrpc_connection_get(peer, self, uuid);
+ if (c) {
+ memcpy(c->c_remote_uuid.uuid,
+ uuid->uuid, sizeof(c->c_remote_uuid.uuid));
+ }
- CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
+ CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
- return c;
+ return c;
}
/**
/* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
* node. Negotiated ocd_brw_size will always be <= this number. */
for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
- LNetInvalidateHandle(&desc->bd_mds[i]);
+ LNetInvalidateMDHandle(&desc->bd_mds[i]);
return desc;
out:
LASSERT(page != NULL);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+ LASSERT(pageoffset + len <= PAGE_SIZE);
LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
desc->bd_nob += len;
if (pin)
- page_cache_get(page);
+ get_page(page);
kiov->kiov_page = page;
kiov->kiov_offset = pageoffset;
{
unsigned int nl, oldnl;
struct imp_at *at;
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
LASSERT(req->rq_import);
*/
CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
D_ADAPTTO : D_WARNING,
- "Reported service time %u > total measured time "
- CFS_DURATION_T"\n", service_time,
- cfs_time_sub(now, req->rq_sent));
+ "Reported service time %u > total measured time %lld\n",
+ service_time, now - req->rq_sent);
return;
}
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
__must_hold(&req->rq_lock)
{
- struct ptlrpc_request *early_req;
- time_t olddl;
- int rc;
- ENTRY;
+ struct ptlrpc_request *early_req;
+ time64_t olddl;
+ int rc;
+ ENTRY;
req->rq_early = 0;
spin_unlock(&req->rq_lock);
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
- cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
- cfs_time_sub(req->rq_deadline, olddl));
+ "Early reply #%d, new deadline in %llds (%llds)",
+ req->rq_early_count,
+ req->rq_deadline - ktime_get_real_seconds(),
+ req->rq_deadline - olddl);
RETURN(rc);
}
request->rq_reply_cbid.cbid_arg = request;
request->rq_reply_deadline = 0;
+ request->rq_bulk_deadline = 0;
+ request->rq_req_deadline = 0;
request->rq_phase = RQ_PHASE_NEW;
request->rq_next_phase = RQ_PHASE_UNDEFINED;
lustre_msg_set_opc(request->rq_reqmsg, opcode);
ptlrpc_assign_next_xid(request);
+ /* Let's setup deadline for req/reply/bulk unlink for opcode. */
+ if (cfs_fail_val == opcode) {
+ time64_t *fail_t = NULL, *fail2_t = NULL;
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+ fail_t = &request->rq_bulk_deadline;
+ else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+ fail_t = &request->rq_reply_deadline;
+ else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+ fail_t = &request->rq_req_deadline;
+ else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
+ fail_t = &request->rq_reply_deadline;
+ fail2_t = &request->rq_bulk_deadline;
+ }
+
+ if (fail_t) {
+ *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
+
+ if (fail2_t)
+ *fail2_t = ktime_get_real_seconds() +
+ LONG_UNLINK;
+
+ /*
+ * The RPC is infected, let the test to change the
+ * fail_loc
+ */
+ msleep(4 * MSEC_PER_SEC);
+ }
+ }
+
RETURN(0);
out_ctx:
if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
- } else if (imp->imp_dlm_fake || req->rq_no_delay) {
+ } else if (req->rq_no_delay) {
*status = -EWOULDBLOCK;
} else if (req->rq_allow_replay &&
(imp->imp_state == LUSTRE_IMP_REPLAY ||
lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- if (ptlrpc_console_allow(req))
+ /* -EAGAIN is normal when using POSIX flocks */
+ if (ptlrpc_console_allow(req) &&
+ !(opc == LDLM_ENQUEUE && err == -EAGAIN))
LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s "
"failed: rc = %d\n",
imp->imp_obd->obd_name,
LASSERT(versions);
lustre_msg_set_versions(reqmsg, versions);
- CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
+ CDEBUG(D_INFO, "Client save versions [%#llx/%#llx]\n",
versions[0], versions[1]);
EXIT;
req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
rq_unreplied_list);
- LASSERTF(req->rq_xid >= 1, "XID:"LPU64"\n", req->rq_xid);
+ LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
if (imp->imp_known_replied_xid < req->rq_xid - 1)
imp->imp_known_replied_xid = req->rq_xid - 1;
*/
static int after_reply(struct ptlrpc_request *req)
{
- struct obd_import *imp = req->rq_import;
- struct obd_device *obd = req->rq_import->imp_obd;
- int rc;
- struct timeval work_start;
- __u64 committed;
- long timediff;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ struct obd_device *obd = req->rq_import->imp_obd;
+ ktime_t work_start;
+ u64 committed;
+ s64 timediff;
+ int rc;
+ ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
RETURN(0);
}
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+ work_start = ktime_get_real();
+ timediff = ktime_us_delta(req->rq_sent_ns, work_start);
/*
* NB Until this point, the whole of the incoming message,
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
spin_lock(&req->rq_lock);
pool_is_at_full_capacity())
RETURN(-ENOMEM);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
RETURN (0);
req->rq_waiting = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
LASSERT(list_empty(&req->rq_list));
}
CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ " %s:%s:%d:%llu:%s:%d\n", current_comm(),
imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > cfs_time_current_sec())
+ req->rq_sent > ktime_get_real_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_phase == RQ_PHASE_BULK ||
req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREGISTERING)) {
- DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
- LBUG();
- }
+ req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK)) {
+ DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
+ LBUG();
+ }
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
- LASSERT(req->rq_next_phase != req->rq_phase);
- LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
+ LASSERT(req->rq_next_phase != req->rq_phase);
+ LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+
+ if (req->rq_req_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+ req->rq_req_deadline = 0;
+ if (req->rq_reply_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+ req->rq_reply_deadline = 0;
+ if (req->rq_bulk_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+ req->rq_bulk_deadline = 0;
- /*
- * Skip processing until reply is unlinked. We
- * can't return to pool before that and we can't
- * call interpret before that. We need to make
- * sure that all rdma transfers finished and will
- * not corrupt any data.
- */
- if (ptlrpc_client_recv_or_unlink(req) ||
- ptlrpc_client_bulk_active(req))
- continue;
+ /*
+ * Skip processing until reply is unlinked. We
+ * can't return to pool before that and we can't
+ * call interpret before that. We need to make
+ * sure that all rdma transfers finished and will
+ * not corrupt any data.
+ */
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
+ ptlrpc_client_recv_or_unlink(req))
+ continue;
+ if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
+ ptlrpc_client_bulk_active(req))
+ continue;
/*
* Turn fail_loc off to prevent it from looping
}
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:"LPU64":%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ if (req->rq_reqmsg != NULL)
+ CDEBUG(D_RPCTRACE,
+ "Completed RPC pname:cluuid:pid:xid:nid:"
+ "opc %s:%s:%d:%llu:%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg),
+ req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
req->rq_timedout = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
- "/real "CFS_DURATION_T"]",
+ DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
- cfs_time_before(req->rq_real_sent, req->rq_sent) ||
- cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ req->rq_real_sent < req->rq_sent ||
+ req->rq_real_sent >= req->rq_deadline) ?
"timed out for sent delay" : "timed out for slow reply"),
- req->rq_sent, req->rq_real_sent);
+ (s64)req->rq_sent, (s64)req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetDebugPeer(imp->imp_connection->c_peer);
*/
int ptlrpc_expired_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ ENTRY;
LASSERT(set != NULL);
/*
continue;
if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING &&
+ req->rq_phase != RQ_PHASE_UNREG_RPC &&
!req->rq_allow_intr)
continue;
*/
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- int timeout = 0;
- struct ptlrpc_request *req;
- int deadline;
- ENTRY;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ time64_t deadline;
+ ENTRY;
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
* Drops one reference count for request \a request.
* \a locked set indicates that caller holds import imp_lock.
* Frees the request whe reference count reaches zero.
+ *
+ * \retval 1 the request is freed
+ * \retval 0 some others still hold references on the request
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
- ENTRY;
- if (request == NULL)
- RETURN(1);
+ int count;
+ ENTRY;
- if (request == LP_POISON ||
- request->rq_reqmsg == LP_POISON) {
- CERROR("dereferencing freed request (bug 575)\n");
- LBUG();
- RETURN(1);
- }
+ if (!request)
+ RETURN(1);
+
+ LASSERT(request != LP_POISON);
+ LASSERT(request->rq_reqmsg != LP_POISON);
- DEBUG_REQ(D_INFO, request, "refcount now %u",
+ DEBUG_REQ(D_INFO, request, "refcount now %u",
atomic_read(&request->rq_refcount) - 1);
- if (atomic_dec_and_test(&request->rq_refcount)) {
- __ptlrpc_free_req(request, locked);
- RETURN(1);
- }
+ spin_lock(&request->rq_lock);
+ count = atomic_dec_return(&request->rq_refcount);
+ LASSERTF(count >= 0, "Invalid ref count %d\n", count);
- RETURN(0);
+ /* For open RPC, the client does not know the EA size (LOV, ACL, and
+ * so on) before replied, then the client has to reserve very large
+ * reply buffer. Such buffer will not be released until the RPC freed.
+ * Since The open RPC is replayable, we need to keep it in the replay
+ * list until close. If there are a lot of files opened concurrently,
+ * then the client may be OOM.
+ *
+ * If fact, it is unnecessary to keep reply buffer for open replay,
+ * related EAs have already been saved via mdc_save_lovea() before
+ * coming here. So it is safe to free the reply buffer some earlier
+ * before releasing the RPC to avoid client OOM. LU-9514 */
+ if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
+ spin_lock(&request->rq_early_free_lock);
+ sptlrpc_cli_free_repbuf(request);
+ request->rq_repbuf = NULL;
+ request->rq_repbuf_len = 0;
+ request->rq_repdata = NULL;
+ request->rq_reqdata_len = 0;
+ spin_unlock(&request->rq_early_free_lock);
+ }
+ spin_unlock(&request->rq_lock);
+
+ if (!count)
+ __ptlrpc_free_req(request, locked);
+
+ RETURN(!count);
}
/**
*/
LASSERT(!in_interrupt());
- /*
- * Let's setup deadline for reply unlink.
- */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
+ /* Let's setup deadline for reply unlink. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
+ request->rq_reply_deadline = ktime_get_real_seconds() +
+ LONG_UNLINK;
/*
* Nothing left to do.
if (!ptlrpc_client_recv_or_unlink(request))
RETURN(1);
- /*
- * Move to "Unregistering" phase as reply was not unlinked yet.
- */
- ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+ /* Move to "Unregistering" phase as reply was not unlinked yet. */
+ ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
/*
* Do not wait for unlink to finish.
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) {
- CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
+ CDEBUG(D_INFO, "%s: skip recheck: last_committed %llu\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
RETURN_EXIT;
}
- CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
+ CDEBUG(D_RPCTRACE, "%s: committing for last_committed %llu gen %d\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
imp->imp_generation);
continue;
}
- DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
+ DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)",
imp->imp_peer_committed_transno);
free_req:
ptlrpc_free_request(req);
GOTO(out, 0);
list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
- rq_replay_list) {
+ rq_replay_list) {
LASSERT(req->rq_transno != 0);
- if (req->rq_import_generation < imp->imp_generation) {
- DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
- ptlrpc_free_request(req);
- } else if (!req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "free closed open request");
+ if (req->rq_import_generation < imp->imp_generation ||
+ !req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
+ req->rq_import_generation <
+ imp->imp_generation ? "stale" : "closed");
+
+ if (imp->imp_replay_cursor == &req->rq_replay_list)
+ imp->imp_replay_cursor =
+ req->rq_replay_list.next;
+
ptlrpc_free_request(req);
}
}
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg) ||
lustre_msg_get_transno(req->rq_repmsg) == 0,
- LPX64"/"LPX64"\n",
+ "%#llx/%#llx\n",
lustre_msg_get_transno(req->rq_reqmsg),
lustre_msg_get_transno(req->rq_repmsg));
}
/* transaction number shouldn't be bigger than the latest replayed */
if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
DEBUG_REQ(D_ERROR, req,
- "Reported transno "LPU64" is bigger than the "
- "replayed one: "LPU64, req->rq_transno,
+ "Reported transno %llu is bigger than the "
+ "replayed one: %llu", req->rq_transno,
lustre_msg_get_transno(req->rq_reqmsg));
GOTO(out, rc = -EINVAL);
}
DEBUG_REQ(D_HA, req, "REPLAY");
atomic_inc(&req->rq_import->imp_replay_inflight);
+ spin_lock(&req->rq_lock);
+ req->rq_early_free_repbuf = 0;
+ spin_unlock(&req->rq_lock);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
ptlrpcd_add_req(req);
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
LASSERT(bd != NULL);
- if (!req->rq_resend) {
- /* this request has a new xid, just use it as bulk matchbits */
- req->rq_mbits = req->rq_xid;
-
- } else { /* needs to generate a new matchbits for resend */
- __u64 old_mbits = req->rq_mbits;
-
- if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_BULK_MBITS) != 0)
+ /* Generate new matchbits for all resend requests, including
+ * resend replay. */
+ if (req->rq_resend) {
+ __u64 old_mbits = req->rq_mbits;
+
+ /* First time resend on -EINPROGRESS will generate new xid,
+ * so we can actually use the rq_xid as rq_mbits in such case,
+ * however, it's bit hard to distinguish such resend with a
+ * 'resend for the -EINPROGRESS resend'. To make it simple,
+ * we opt to generate mbits for all resend cases. */
+ if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)){
req->rq_mbits = ptlrpc_next_xid();
- else /* old version transfers rq_xid to peer as matchbits */
- req->rq_mbits = req->rq_xid = ptlrpc_next_xid();
-
- CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+ } else {
+ /* Old version transfers rq_xid to peer as
+ * matchbits. */
+ spin_lock(&req->rq_import->imp_lock);
+ list_del_init(&req->rq_unreplied_list);
+ ptlrpc_assign_next_xid_nolock(req);
+ spin_unlock(&req->rq_import->imp_lock);
+ req->rq_mbits = req->rq_xid;
+ }
+ CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
old_mbits, req->rq_mbits);
+ } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
+ /* Request being sent first time, use xid as matchbits. */
+ req->rq_mbits = req->rq_xid;
+ } else {
+ /* Replay request, xid and matchbits have already been
+ * correctly assigned. */
+ return;
}
/* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
* see LU-1431 */
req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
LNET_MAX_IOV) - 1;
+
+ /* Set rq_xid as rq_mbits to indicate the final bulk for the old
+ * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
+ *
+ * It's ok to directly set the rq_xid here, since this xid bump
+ * won't affect the request position in unreplied list. */
+ if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS))
+ req->rq_xid = req->rq_mbits;
}
/**
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
req->rq_next_phase = RQ_PHASE_COMPLETE;
req->rq_xid = ptlrpc_next_xid();