* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_RPC
+#include <linux/delay.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_lib.h>
/**
* Return PortalRPC connection for remore uud \a uuid
*/
-struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid,
+ lnet_nid_t nid4refnet)
{
- struct ptlrpc_connection *c;
- lnet_nid_t self;
- lnet_process_id_t peer;
- int err;
+ struct ptlrpc_connection *c;
+ lnet_nid_t self;
+ struct lnet_process_id peer;
+ int err;
/* ptlrpc_uuid_to_peer() initializes its 2nd parameter
* before accessing its values. */
/* coverity[uninit_use_in_call] */
- err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
- if (err != 0) {
- CNETERR("cannot find peer %s!\n", uuid->uuid);
- return NULL;
- }
+ peer.nid = nid4refnet;
+ err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
+ if (err != 0) {
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
+ return NULL;
+ }
- c = ptlrpc_connection_get(peer, self, uuid);
- if (c) {
- memcpy(c->c_remote_uuid.uuid,
- uuid->uuid, sizeof(c->c_remote_uuid.uuid));
- }
+ c = ptlrpc_connection_get(peer, self, uuid);
+ if (c) {
+ memcpy(c->c_remote_uuid.uuid,
+ uuid->uuid, sizeof(c->c_remote_uuid.uuid));
+ }
- CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
+ CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
- return c;
+ return c;
}
/**
/* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
* node. Negotiated ocd_brw_size will always be <= this number. */
for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
- LNetInvalidateHandle(&desc->bd_mds[i]);
+ LNetInvalidateMDHandle(&desc->bd_mds[i]);
return desc;
out:
{
unsigned int nl, oldnl;
struct imp_at *at;
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
LASSERT(req->rq_import);
*/
CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ?
D_ADAPTTO : D_WARNING,
- "Reported service time %u > total measured time "
- CFS_DURATION_T"\n", service_time,
- cfs_time_sub(now, req->rq_sent));
+ "Reported service time %u > total measured time %lld\n",
+ service_time, now - req->rq_sent);
return;
}
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
__must_hold(&req->rq_lock)
{
- struct ptlrpc_request *early_req;
- time_t olddl;
- int rc;
- ENTRY;
+ struct ptlrpc_request *early_req;
+ time64_t olddl;
+ int rc;
+ ENTRY;
req->rq_early = 0;
spin_unlock(&req->rq_lock);
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in "CFS_DURATION_T"s "
- "("CFS_DURATION_T"s)", req->rq_early_count,
- cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
- cfs_time_sub(req->rq_deadline, olddl));
+ "Early reply #%d, new deadline in %llds (%llds)",
+ req->rq_early_count,
+ req->rq_deadline - ktime_get_real_seconds(),
+ req->rq_deadline - olddl);
RETURN(rc);
}
/* Let's setup deadline for req/reply/bulk unlink for opcode. */
if (cfs_fail_val == opcode) {
- time_t *fail_t = NULL, *fail2_t = NULL;
+ time64_t *fail_t = NULL, *fail2_t = NULL;
if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
fail_t = &request->rq_bulk_deadline;
}
if (fail_t) {
- *fail_t = cfs_time_current_sec() + LONG_UNLINK;
+ *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
if (fail2_t)
- *fail2_t = cfs_time_current_sec() + LONG_UNLINK;
+ *fail2_t = ktime_get_real_seconds() +
+ LONG_UNLINK;
- /* The RPC is infected, let the test to change the
- * fail_loc */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(2));
- set_current_state(TASK_RUNNING);
+ /*
+ * The RPC is infected, let the test to change the
+ * fail_loc
+ */
+ msleep(4 * MSEC_PER_SEC);
}
}
const struct req_format *format)
{
struct ptlrpc_request *request;
+ int connect = 0;
- request = __ptlrpc_request_alloc(imp, pool);
- if (request == NULL)
- return NULL;
+ request = __ptlrpc_request_alloc(imp, pool);
+ if (request == NULL)
+ return NULL;
+
+ /* initiate connection if needed when the import has been
+ * referenced by the new request to avoid races with disconnect */
+ if (unlikely(imp->imp_state == LUSTRE_IMP_IDLE)) {
+ int rc;
+ CDEBUG_LIMIT(imp->imp_idle_debug,
+ "%s: reconnect after %llds idle\n",
+ imp->imp_obd->obd_name, ktime_get_real_seconds() -
+ imp->imp_last_reply_time);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_IDLE) {
+ imp->imp_generation++;
+ imp->imp_initiated_at = imp->imp_generation;
+ imp->imp_state = LUSTRE_IMP_NEW;
+ connect = 1;
+ }
+ spin_unlock(&imp->imp_lock);
+ if (connect) {
+ rc = ptlrpc_connect_import(imp);
+ if (rc < 0) {
+ ptlrpc_request_free(request);
+ return NULL;
+ }
+ ptlrpc_pinger_add_import(imp);
+ }
+ }
req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
req_capsule_set(&request->rq_pill, format);
atomic_set(&set->set_remaining, 0);
spin_lock_init(&set->set_new_req_lock);
INIT_LIST_HEAD(&set->set_new_requests);
- INIT_LIST_HEAD(&set->set_cblist);
set->set_max_inflight = UINT_MAX;
set->set_producer = NULL;
set->set_producer_arg = NULL;
EXPORT_SYMBOL(ptlrpc_set_destroy);
/**
- * Add a callback function \a fn to the set.
- * This function would be called when all requests on this set are completed.
- * The function will be passed \a data argument.
- */
-int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
- set_interpreter_func fn, void *data)
-{
- struct ptlrpc_set_cbdata *cbdata;
-
- OBD_ALLOC_PTR(cbdata);
- if (cbdata == NULL)
- RETURN(-ENOMEM);
-
- cbdata->psc_interpret = fn;
- cbdata->psc_data = data;
- list_add_tail(&cbdata->psc_item, &set->set_cblist);
-
- RETURN(0);
-}
-
-/**
* Add a new request to the general purpose request set.
* Assumes request reference from the caller.
*/
void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
struct ptlrpc_request *req)
{
+ LASSERT(req->rq_import->imp_state != LUSTRE_IMP_IDLE);
LASSERT(list_empty(&req->rq_set_chain));
if (req->rq_allow_intr)
list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
atomic_inc(&set->set_remaining);
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = ktime_get_seconds();
if (req->rq_reqmsg != NULL)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
* The set takes over the caller's request reference.
*/
req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
+ req->rq_queued_time = ktime_get_seconds();
list_add_tail(&req->rq_set_chain, &set->set_new_requests);
count = atomic_inc_return(&set->set_new_count);
spin_unlock(&set->set_new_req_lock);
LASSERT (status != NULL);
*status = 0;
- if (req->rq_ctx_init || req->rq_ctx_fini) {
- /* always allow ctx init/fini rpc go through */
- } else if (imp->imp_state == LUSTRE_IMP_NEW) {
- DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
- *status = -EIO;
+ if (req->rq_ctx_init || req->rq_ctx_fini) {
+ /* always allow ctx init/fini rpc go through */
+ } else if (imp->imp_state == LUSTRE_IMP_NEW) {
+ DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
+ *status = -EIO;
} else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- /* pings may safely race with umount */
- DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
+ unsigned int opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ /* pings or MDS-equivalent STATFS may safely race with umount */
+ DEBUG_REQ((opc == OBD_PING || opc == OST_STATFS) ?
D_HA : D_ERROR, req, "IMP_CLOSED ");
*status = -EIO;
- } else if (ptlrpc_send_limit_expired(req)) {
+ } else if (ptlrpc_send_limit_expired(req)) {
/* probably doesn't need to be a D_ERROR after initial testing*/
DEBUG_REQ(D_HA, req, "send limit expired ");
*status = -ETIMEDOUT;
if (atomic_read(&imp->imp_inval_count) != 0) {
DEBUG_REQ(D_ERROR, req, "invalidate in flight");
*status = -EIO;
- } else if (imp->imp_dlm_fake || req->rq_no_delay) {
+ } else if (req->rq_no_delay &&
+ imp->imp_generation != imp->imp_initiated_at) {
+ /* ignore nodelay for requests initiating connections */
*status = -EWOULDBLOCK;
} else if (req->rq_allow_replay &&
(imp->imp_state == LUSTRE_IMP_REPLAY ||
* \retval false if no message should be printed
* \retval true if console message should be printed
*/
-static bool ptlrpc_console_allow(struct ptlrpc_request *req)
+static bool ptlrpc_console_allow(struct ptlrpc_request *req, __u32 opc, int err)
{
- __u32 opc;
-
LASSERT(req->rq_reqmsg != NULL);
- opc = lustre_msg_get_opc(req->rq_reqmsg);
/* Suppress particular reconnect errors which are to be expected. */
if (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT) {
- int err;
/* Suppress timed out reconnect requests */
if (lustre_handle_is_used(&req->rq_import->imp_remote_handle) ||
/* Suppress most unavailable/again reconnect requests, but
* print occasionally so it is clear client is trying to
* connect to a server where no target is running. */
- err = lustre_msg_get_status(req->rq_repmsg);
if ((err == -ENODEV || err == -EAGAIN) &&
req->rq_import->imp_conn_cnt % 30 != 20)
return false;
}
+ if (opc == LDLM_ENQUEUE && err == -EAGAIN)
+ /* -EAGAIN is normal when using POSIX flocks */
+ return false;
+
+ if (opc == OBD_PING && (err == -ENODEV || err == -ENOTCONN) &&
+ (req->rq_xid & 0xf) != 10)
+ /* Suppress most ping requests, they may fail occasionally */
+ return false;
+
return true;
}
lnet_nid_t nid = imp->imp_connection->c_peer.nid;
__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
- /* -EAGAIN is normal when using POSIX flocks */
- if (ptlrpc_console_allow(req) &&
- !(opc == LDLM_ENQUEUE && err == -EAGAIN))
+ if (ptlrpc_console_allow(req, opc, err))
LCONSOLE_ERROR_MSG(0x11, "%s: operation %s to node %s "
"failed: rc = %d\n",
imp->imp_obd->obd_name,
*/
static int after_reply(struct ptlrpc_request *req)
{
- struct obd_import *imp = req->rq_import;
- struct obd_device *obd = req->rq_import->imp_obd;
- int rc;
- struct timeval work_start;
- __u64 committed;
- long timediff;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ struct obd_device *obd = req->rq_import->imp_obd;
+ ktime_t work_start;
+ u64 committed;
+ s64 timediff;
+ int rc;
+ ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
RETURN(0);
}
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_sent_tv, NULL);
+ work_start = ktime_get_real();
+ timediff = ktime_us_delta(work_start, req->rq_sent_ns);
/*
* NB Until this point, the whole of the incoming message,
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
spin_lock(&req->rq_lock);
pool_is_at_full_capacity())
RETURN(-ENOMEM);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
RETURN (0);
req->rq_waiting = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ DEBUG_REQ(D_HA, req, "req waiting for recovery: (%s != %s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
LASSERT(list_empty(&req->rq_list));
" %s:%s:%d:%llu:%s:%d\n", current_comm(),
imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ obd_import_nid2str(imp), lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
if (rc == -ENOMEM) {
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > cfs_time_current_sec())
+ req->rq_sent > ktime_get_real_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
spin_unlock(&imp->imp_lock);
GOTO(interpret, req->rq_status);
}
+ /* ignore on just initiated connections */
if (ptlrpc_no_resend(req) &&
- !req->rq_wait_ctx) {
+ !req->rq_wait_ctx &&
+ imp->imp_generation !=
+ imp->imp_initiated_at) {
req->rq_status = -ENOTCONN;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
}
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
- "Completed RPC pname:cluuid:pid:xid:nid:"
- "opc %s:%s:%d:%llu:%s:%d\n",
- current_comm(), imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ if (req->rq_reqmsg != NULL)
+ CDEBUG(D_RPCTRACE,
+ "Completed RPC pname:cluuid:pid:xid:nid:"
+ "opc %s:%s:%d:%llu:%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg),
+ req->rq_xid,
+ obd_import_nid2str(imp),
+ lustre_msg_get_opc(req->rq_reqmsg));
spin_lock(&imp->imp_lock);
/* Request already may be not on sending or delaying list. This
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
struct obd_import *imp = req->rq_import;
+ unsigned int debug_mask = D_RPCTRACE;
int rc = 0;
ENTRY;
req->rq_timedout = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
- "/real "CFS_DURATION_T"]",
- req->rq_net_err ? "failed due to network error" :
- ((req->rq_real_sent == 0 ||
- cfs_time_before(req->rq_real_sent, req->rq_sent) ||
- cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
- "timed out for sent delay" : "timed out for slow reply"),
- req->rq_sent, req->rq_real_sent);
+ if (ptlrpc_console_allow(req, lustre_msg_get_opc(req->rq_reqmsg),
+ lustre_msg_get_status(req->rq_reqmsg)))
+ debug_mask = D_WARNING;
+ DEBUG_REQ(debug_mask, req, "Request sent has %s: [sent %lld/real %lld]",
+ req->rq_net_err ? "failed due to network error" :
+ ((req->rq_real_sent == 0 ||
+ req->rq_real_sent < req->rq_sent ||
+ req->rq_real_sent >= req->rq_deadline) ?
+ "timed out for sent delay" : "timed out for slow reply"),
+ (s64)req->rq_sent, (s64)req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetDebugPeer(imp->imp_connection->c_peer);
*/
int ptlrpc_expired_set(void *data)
{
- struct ptlrpc_request_set *set = data;
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ ENTRY;
LASSERT(set != NULL);
/*
/**
* Get the smallest timeout in the set; this does NOT set a timeout.
*/
-int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
+time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
- struct list_head *tmp;
- time_t now = cfs_time_current_sec();
- int timeout = 0;
- struct ptlrpc_request *req;
- int deadline;
- ENTRY;
+ struct list_head *tmp;
+ time64_t now = ktime_get_real_seconds();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ time64_t deadline;
+ ENTRY;
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
struct list_head *tmp;
struct ptlrpc_request *req;
struct l_wait_info lwi;
- int rc, timeout;
- ENTRY;
+ time64_t timeout;
+ int rc;
+ ENTRY;
if (set->set_producer)
(void)ptlrpc_set_producer(set);
/* wait until all complete, interrupted, or an in-flight
* req times out */
- CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
- set, timeout);
+ CDEBUG(D_RPCTRACE, "set %p going to sleep for %lld seconds\n",
+ set, timeout);
if ((timeout == 0 && !signal_pending(current)) ||
set->set_allow_intr)
rc = req->rq_status;
}
- if (set->set_interpret != NULL) {
- int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
- set->set_interpret;
- rc = interpreter (set, set->set_arg, rc);
- } else {
- struct ptlrpc_set_cbdata *cbdata, *n;
- int err;
-
- list_for_each_entry_safe(cbdata, n,
- &set->set_cblist, psc_item) {
- list_del_init(&cbdata->psc_item);
- err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
- if (err && !rc)
- rc = err;
- OBD_FREE_PTR(cbdata);
- }
- }
-
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(ptlrpc_set_wait);
* Drops one reference count for request \a request.
* \a locked set indicates that caller holds import imp_lock.
* Frees the request whe reference count reaches zero.
+ *
+ * \retval 1 the request is freed
+ * \retval 0 some others still hold references on the request
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
- ENTRY;
- if (request == NULL)
- RETURN(1);
+ int count;
+ ENTRY;
- if (request == LP_POISON ||
- request->rq_reqmsg == LP_POISON) {
- CERROR("dereferencing freed request (bug 575)\n");
- LBUG();
- RETURN(1);
- }
+ if (!request)
+ RETURN(1);
+
+ LASSERT(request != LP_POISON);
+ LASSERT(request->rq_reqmsg != LP_POISON);
- DEBUG_REQ(D_INFO, request, "refcount now %u",
+ DEBUG_REQ(D_INFO, request, "refcount now %u",
atomic_read(&request->rq_refcount) - 1);
- if (atomic_dec_and_test(&request->rq_refcount)) {
- __ptlrpc_free_req(request, locked);
- RETURN(1);
- }
+ spin_lock(&request->rq_lock);
+ count = atomic_dec_return(&request->rq_refcount);
+ LASSERTF(count >= 0, "Invalid ref count %d\n", count);
- RETURN(0);
+ /* For open RPC, the client does not know the EA size (LOV, ACL, and
+ * so on) before replied, then the client has to reserve very large
+ * reply buffer. Such buffer will not be released until the RPC freed.
+ * Since The open RPC is replayable, we need to keep it in the replay
+ * list until close. If there are a lot of files opened concurrently,
+ * then the client may be OOM.
+ *
+ * If fact, it is unnecessary to keep reply buffer for open replay,
+ * related EAs have already been saved via mdc_save_lovea() before
+ * coming here. So it is safe to free the reply buffer some earlier
+ * before releasing the RPC to avoid client OOM. LU-9514 */
+ if (count == 1 && request->rq_early_free_repbuf && request->rq_repbuf) {
+ spin_lock(&request->rq_early_free_lock);
+ sptlrpc_cli_free_repbuf(request);
+ request->rq_repbuf = NULL;
+ request->rq_repbuf_len = 0;
+ request->rq_repdata = NULL;
+ request->rq_reqdata_len = 0;
+ spin_unlock(&request->rq_early_free_lock);
+ }
+ spin_unlock(&request->rq_lock);
+
+ if (!count)
+ __ptlrpc_free_req(request, locked);
+
+ RETURN(!count);
}
/**
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
- request->rq_reply_deadline =
- cfs_time_current_sec() + LONG_UNLINK;
+ request->rq_reply_deadline = ktime_get_real_seconds() +
+ LONG_UNLINK;
/*
* Nothing left to do.
return;
}
- if (force || req->rq_transno <= imp->imp_peer_committed_transno)
+ if (force || req->rq_transno <= imp->imp_peer_committed_transno) {
+ if (imp->imp_replay_cursor == &req->rq_replay_list)
+ imp->imp_replay_cursor = req->rq_replay_list.next;
ptlrpc_free_request(req);
+ }
spin_unlock(&imp->imp_lock);
}
GOTO(out, 0);
list_for_each_entry_safe(req, saved, &imp->imp_committed_list,
- rq_replay_list) {
+ rq_replay_list) {
LASSERT(req->rq_transno != 0);
- if (req->rq_import_generation < imp->imp_generation) {
- DEBUG_REQ(D_RPCTRACE, req, "free stale open request");
- ptlrpc_free_request(req);
- } else if (!req->rq_replay) {
- DEBUG_REQ(D_RPCTRACE, req, "free closed open request");
+ if (req->rq_import_generation < imp->imp_generation ||
+ !req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "free %s open request",
+ req->rq_import_generation <
+ imp->imp_generation ? "stale" : "closed");
+
+ if (imp->imp_replay_cursor == &req->rq_replay_list)
+ imp->imp_replay_cursor =
+ req->rq_replay_list.next;
+
ptlrpc_free_request(req);
}
}
*/
void ptlrpc_resend_req(struct ptlrpc_request *req)
{
- DEBUG_REQ(D_HA, req, "going to resend");
+ DEBUG_REQ(D_HA, req, "going to resend");
spin_lock(&req->rq_lock);
/* Request got reply but linked to the import list still.
return;
}
- lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
- req->rq_status = -EAGAIN;
+ req->rq_status = -EAGAIN;
- req->rq_resend = 1;
- req->rq_net_err = 0;
- req->rq_timedout = 0;
+ req->rq_resend = 1;
+ req->rq_net_err = 0;
+ req->rq_timedout = 0;
- ptlrpc_client_wake_req(req);
+ ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
spin_lock(&imp->imp_lock);
imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
}
spin_lock(&imp->imp_lock);
- /** if replays by version then gap occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
*/
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
- struct ptlrpc_replay_async_args *aa;
- ENTRY;
+ struct ptlrpc_replay_async_args *aa;
- LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
+ ENTRY;
- LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- memset(aa, 0, sizeof *aa);
+ LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ memset(aa, 0, sizeof(*aa));
/* Prepare request to be resent with ptlrpcd */
aa->praa_old_state = req->rq_send_state;
DEBUG_REQ(D_HA, req, "REPLAY");
atomic_inc(&req->rq_import->imp_replay_inflight);
+ spin_lock(&req->rq_lock);
+ req->rq_early_free_repbuf = 0;
+ spin_unlock(&req->rq_lock);
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
ptlrpcd_add_req(req);
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
LASSERT(bd != NULL);
- if (!req->rq_resend) {
- /* this request has a new xid, just use it as bulk matchbits */
- req->rq_mbits = req->rq_xid;
-
- } else { /* needs to generate a new matchbits for resend */
- __u64 old_mbits = req->rq_mbits;
+ /* Generate new matchbits for all resend requests, including
+ * resend replay. */
+ if (req->rq_resend) {
+ __u64 old_mbits = req->rq_mbits;
+ /* First time resend on -EINPROGRESS will generate new xid,
+ * so we can actually use the rq_xid as rq_mbits in such case,
+ * however, it's bit hard to distinguish such resend with a
+ * 'resend for the -EINPROGRESS resend'. To make it simple,
+ * we opt to generate mbits for all resend cases. */
if (OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS)){
req->rq_mbits = ptlrpc_next_xid();
- } else {/* old version transfers rq_xid to peer as matchbits */
+ } else {
+ /* Old version transfers rq_xid to peer as
+ * matchbits. */
spin_lock(&req->rq_import->imp_lock);
list_del_init(&req->rq_unreplied_list);
ptlrpc_assign_next_xid_nolock(req);
- req->rq_mbits = req->rq_xid;
spin_unlock(&req->rq_import->imp_lock);
+ req->rq_mbits = req->rq_xid;
}
CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
old_mbits, req->rq_mbits);
+ } else if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
+ /* Request being sent first time, use xid as matchbits. */
+ req->rq_mbits = req->rq_xid;
+ } else {
+ /* Replay request, xid and matchbits have already been
+ * correctly assigned. */
+ return;
}
/* For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
LNET_MAX_IOV) - 1;
/* Set rq_xid as rq_mbits to indicate the final bulk for the old
- * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808 */
+ * server which does not support OBD_CONNECT_BULK_MBITS. LU-6808.
+ *
+ * It's ok to directly set the rq_xid here, since this xid bump
+ * won't affect the request position in unreplied list. */
if (!OCD_HAS_FLAG(&bd->bd_import->imp_connect_data, BULK_MBITS))
req->rq_xid = req->rq_mbits;
}
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
req->rq_phase = RQ_PHASE_INTERPRET;
req->rq_next_phase = RQ_PHASE_COMPLETE;
req->rq_no_delay = req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
- CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->cb = cb;
args->cbdata = cbdata;