* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
spin_unlock(&pool->prp_lock);
}
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode,
- int count, __u32 *lengths, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
+void ptlrpc_add_unreplied(struct ptlrpc_request *req)
{
- struct obd_import *imp = request->rq_import;
- int rc;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ struct list_head *tmp;
+ struct ptlrpc_request *iter;
- if (unlikely(ctx))
- request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- else {
- rc = sptlrpc_req_get_ctx(request);
- if (rc)
- GOTO(out_free, rc);
- }
+ assert_spin_locked(&imp->imp_lock);
+ LASSERT(list_empty(&req->rq_unreplied_list));
- sptlrpc_req_set_flavor(request, opcode);
+ /* unreplied list is sorted by xid in ascending order */
+ list_for_each_prev(tmp, &imp->imp_unreplied_list) {
+ iter = list_entry(tmp, struct ptlrpc_request,
+ rq_unreplied_list);
- rc = lustre_pack_request(request, imp->imp_msg_magic, count,
- lengths, bufs);
- if (rc) {
- LASSERT(!request->rq_pool);
- GOTO(out_ctx, rc);
- }
+ LASSERT(req->rq_xid != iter->rq_xid);
+ if (req->rq_xid < iter->rq_xid)
+ continue;
+ list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
+ return;
+ }
+ list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
+}
+
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
+{
+ req->rq_xid = ptlrpc_next_xid();
+ ptlrpc_add_unreplied(req);
+}
+
+static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_import->imp_lock);
+ ptlrpc_assign_next_xid_nolock(req);
+ spin_unlock(&req->rq_import->imp_lock);
+}
+
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode, char **bufs,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ int count;
+ struct obd_import *imp;
+ __u32 *lengths;
+ int rc;
+
+ ENTRY;
+
+ count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+ imp = request->rq_import;
+ lengths = request->rq_pill.rc_area[RCL_CLIENT];
+
+ if (ctx != NULL) {
+ request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
+ } else {
+ rc = sptlrpc_req_get_ctx(request);
+ if (rc)
+ GOTO(out_free, rc);
+ }
+ sptlrpc_req_set_flavor(request, opcode);
+
+ rc = lustre_pack_request(request, imp->imp_msg_magic, count,
+ lengths, bufs);
+ if (rc)
+ GOTO(out_ctx, rc);
- lustre_msg_add_version(request->rq_reqmsg, version);
- request->rq_send_state = LUSTRE_IMP_FULL;
- request->rq_type = PTL_RPC_MSG_REQUEST;
+ lustre_msg_add_version(request->rq_reqmsg, version);
+ request->rq_send_state = LUSTRE_IMP_FULL;
+ request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_req_cbid.cbid_fn = request_out_callback;
- request->rq_req_cbid.cbid_arg = request;
+ request->rq_req_cbid.cbid_fn = request_out_callback;
+ request->rq_req_cbid.cbid_arg = request;
- request->rq_reply_cbid.cbid_fn = reply_in_callback;
- request->rq_reply_cbid.cbid_arg = request;
+ request->rq_reply_cbid.cbid_fn = reply_in_callback;
+ request->rq_reply_cbid.cbid_arg = request;
- request->rq_reply_deadline = 0;
- request->rq_phase = RQ_PHASE_NEW;
- request->rq_next_phase = RQ_PHASE_UNDEFINED;
+ request->rq_reply_deadline = 0;
+ request->rq_phase = RQ_PHASE_NEW;
+ request->rq_next_phase = RQ_PHASE_UNDEFINED;
- request->rq_request_portal = imp->imp_client->cli_request_portal;
- request->rq_reply_portal = imp->imp_client->cli_reply_portal;
+ request->rq_request_portal = imp->imp_client->cli_request_portal;
+ request->rq_reply_portal = imp->imp_client->cli_reply_portal;
- ptlrpc_at_set_req_timeout(request);
+ ptlrpc_at_set_req_timeout(request);
lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ ptlrpc_assign_next_xid(request);
RETURN(0);
+
out_ctx:
+ LASSERT(!request->rq_pool);
sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
class_import_put(imp);
- return rc;
-}
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- int count;
+ return rc;
- count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
- return __ptlrpc_request_bufs_pack(request, version, opcode, count,
- request->rq_pill.rc_area[RCL_CLIENT],
- bufs, ctx);
}
EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
/**
- * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
- * for operation \a opcode. Request would contain \a count buffers.
- * Sizes of buffers are described in array \a lengths and buffers themselves
- * are provided by a pointer \a bufs.
- * Returns prepared request structure pointer or NULL on error.
- */
-struct ptlrpc_request *
-ptlrpc_prep_req_pool(struct obd_import *imp,
- __u32 version, int opcode,
- int count, __u32 *lengths, char **bufs,
- struct ptlrpc_request_pool *pool)
-{
- struct ptlrpc_request *request;
- int rc;
-
- request = __ptlrpc_request_alloc(imp, pool);
- if (!request)
- return NULL;
-
- rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
- lengths, bufs, NULL);
- if (rc) {
- ptlrpc_request_free(request);
- request = NULL;
- }
- return request;
-}
-
-/**
- * Same as ptlrpc_prep_req_pool, but without pool
- */
-struct ptlrpc_request *
-ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
- __u32 *lengths, char **bufs)
-{
- return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
- NULL);
-}
-
-/**
* Allocate and initialize new request set structure on the current CPT.
* Returns a pointer to the newly allocated set structure or NULL on error.
*/
{
LASSERT(list_empty(&req->rq_set_chain));
+ if (req->rq_allow_intr)
+ set->set_allow_intr = 1;
+
/* The set takes over the caller's request reference */
list_add_tail(&req->rq_set_chain, &set->set_requests);
req->rq_set = set;
EXIT;
}
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ assert_spin_locked(&imp->imp_lock);
+ if (list_empty(&imp->imp_unreplied_list))
+ return 0;
+
+ req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
+ rq_unreplied_list);
+ LASSERTF(req->rq_xid >= 1, "XID:"LPU64"\n", req->rq_xid);
+
+ if (imp->imp_known_replied_xid < req->rq_xid - 1)
+ imp->imp_known_replied_xid = req->rq_xid - 1;
+
+ return req->rq_xid - 1;
+}
+
/**
* Callback function called when client receives RPC reply for \a req.
* Returns 0 on success or error code.
else
req->rq_sent = now + req->rq_nr_resend;
+ /* Resend for EINPROGRESS will use a new XID */
+ spin_lock(&imp->imp_lock);
+ list_del_init(&req->rq_unreplied_list);
+ spin_unlock(&imp->imp_lock);
+
RETURN(0);
}
static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
- struct list_head *tmp;
- __u64 min_xid = ~0ULL;
+ __u64 min_xid = 0;
int rc;
ENTRY;
spin_lock(&imp->imp_lock);
- /* the very first time we assign XID. it's important to assign XID
- * and put it on the list atomically, so that the lowest assigned
- * XID is always known. this is vital for multislot last_rcvd */
- if (req->rq_send_state == LUSTRE_IMP_REPLAY) {
- LASSERT(req->rq_xid != 0);
- } else {
- LASSERT(req->rq_xid == 0);
- req->rq_xid = ptlrpc_next_xid();
- }
+ LASSERT(req->rq_xid != 0);
+ LASSERT(!list_empty(&req->rq_unreplied_list));
if (!req->rq_generation_set)
req->rq_import_generation = imp->imp_generation;
list_add_tail(&req->rq_list, &imp->imp_sending_list);
atomic_inc(&req->rq_import->imp_inflight);
- /* find the lowest unreplied XID */
- list_for_each(tmp, &imp->imp_delayed_list) {
- struct ptlrpc_request *r;
- r = list_entry(tmp, struct ptlrpc_request, rq_list);
- if (r->rq_xid < min_xid)
- min_xid = r->rq_xid;
- }
- list_for_each(tmp, &imp->imp_sending_list) {
- struct ptlrpc_request *r;
- r = list_entry(tmp, struct ptlrpc_request, rq_list);
- if (r->rq_xid < min_xid)
- min_xid = r->rq_xid;
- }
+ /* find the known replied XID from the unreplied list, CONNECT
+ * and DISCONNECT requests are skipped to make the sanity check
+ * on server side happy. see process_req_last_xid().
+ *
+ * For CONNECT: Because replay requests have lower XID, it'll
+ * break the sanity check if CONNECT bump the exp_last_xid on
+ * server.
+ *
+ * For DISCONNECT: Since client will abort inflight RPC before
+ * sending DISCONNECT, DISCONNECT may carry an XID which higher
+ * than the inflight RPC.
+ */
+ if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
+ min_xid = ptlrpc_known_replied_xid(imp);
spin_unlock(&imp->imp_lock);
- if (likely(min_xid != ~0ULL))
- lustre_msg_set_last_xid(req->rq_reqmsg, min_xid - 1);
+ lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
lustre_msg_set_status(req->rq_reqmsg, current_pid());
rq_set_chain);
struct obd_import *imp = req->rq_import;
int unregistered = 0;
+ int async = 1;
int rc = 0;
+ if (req->rq_phase == RQ_PHASE_COMPLETE) {
+ list_move_tail(&req->rq_set_chain, &comp_reqs);
+ continue;
+ }
+
/* This schedule point is mainly for the ptlrpcd caller of this
* function. Most ptlrpc sets are not long-lived and unbounded
* in length, but at the least the set used by the ptlrpcd is.
*/
cond_resched();
- if (req->rq_phase == RQ_PHASE_NEW &&
- ptlrpc_send_new_req(req)) {
- force_timer_recalc = 1;
- }
+ /* If the caller requires to allow to be interpreted by force
+ * and it has really been interpreted, then move the request
+ * to RQ_PHASE_INTERPRET phase in spite of what the current
+ * phase is. */
+ if (unlikely(req->rq_allow_intr && req->rq_intr)) {
+ req->rq_status = -EINTR;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+
+ /* Since it is interpreted and we have to wait for
+ * the reply to be unlinked, then use sync mode. */
+ async = 0;
+
+ GOTO(interpret, req->rq_status);
+ }
+
+ if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
+ force_timer_recalc = 1;
- /* delayed send - skip */
- if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
+ /* delayed send - skip */
+ if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
continue;
/* delayed resend - skip */
req->rq_sent > cfs_time_current_sec())
continue;
- if (!(req->rq_phase == RQ_PHASE_RPC ||
- req->rq_phase == RQ_PHASE_BULK ||
- req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREGISTERING ||
- req->rq_phase == RQ_PHASE_COMPLETE)) {
+ if (!(req->rq_phase == RQ_PHASE_RPC ||
+ req->rq_phase == RQ_PHASE_BULK ||
+ req->rq_phase == RQ_PHASE_INTERPRET ||
+ req->rq_phase == RQ_PHASE_UNREGISTERING)) {
DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
LBUG();
}
ptlrpc_rqphase_move(req, req->rq_next_phase);
}
- if (req->rq_phase == RQ_PHASE_COMPLETE) {
- list_move_tail(&req->rq_set_chain, &comp_reqs);
- continue;
- }
-
if (req->rq_phase == RQ_PHASE_INTERPRET)
GOTO(interpret, req->rq_status);
req->rq_status = -EIO;
}
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- interpret:
- LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
+ interpret:
+ LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
- /* This moves to "unregistering" phase we need to wait for
- * reply unlink. */
- if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
- /* start async bulk unlink too */
- ptlrpc_unregister_bulk(req, 1);
- continue;
- }
+ /* This moves to "unregistering" phase we need to wait for
+ * reply unlink. */
+ if (!unregistered && !ptlrpc_unregister_reply(req, async)) {
+ /* start async bulk unlink too */
+ ptlrpc_unregister_bulk(req, 1);
+ continue;
+ }
- if (!ptlrpc_unregister_bulk(req, 1))
- continue;
+ if (!ptlrpc_unregister_bulk(req, async))
+ continue;
- /* When calling interpret receiving already should be
- * finished. */
- LASSERT(!req->rq_receiving_reply);
+ /* When calling interpret receiving already should be
+ * finished. */
+ LASSERT(!req->rq_receiving_reply);
- ptlrpc_req_interpret(env, req, req->rq_status);
+ ptlrpc_req_interpret(env, req, req->rq_status);
if (ptlrpcd_check_work(req)) {
atomic_dec(&set->set_remaining);
list_del_init(&req->rq_list);
atomic_dec(&imp->imp_inflight);
}
+ list_del_init(&req->rq_unreplied_list);
spin_unlock(&imp->imp_lock);
atomic_dec(&set->set_remaining);
"timed out for sent delay" : "timed out for slow reply"),
req->rq_sent, req->rq_real_sent);
- if (imp != NULL && obd_debug_peer_on_timeout)
- LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+ if (imp != NULL && obd_debug_peer_on_timeout)
+ LNetDebugPeer(imp->imp_connection->c_peer);
ptlrpc_unregister_reply(req, async_unlink);
ptlrpc_unregister_bulk(req, async_unlink);
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+ if (req->rq_intr)
+ continue;
+
if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
+ req->rq_phase != RQ_PHASE_UNREGISTERING &&
+ !req->rq_allow_intr)
continue;
ptlrpc_mark_interrupted(req);
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !signal_pending(current))
- /*
- * No requests are in-flight (ether timed out
- * or delayed), so we can allow interrupts.
- * We still want to block for a limited time,
- * so we allow interrupts during the timeout.
- */
- lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
- ptlrpc_expired_set,
- ptlrpc_interrupted_set, set);
+ if ((timeout == 0 && !signal_pending(current)) ||
+ set->set_allow_intr)
+ /* No requests are in-flight (ether timed out
+ * or delayed), so we can allow interrupts.
+ * We still want to block for a limited time,
+ * so we allow interrupts during the timeout. */
+ lwi = LWI_TIMEOUT_INTR_ALL(
+ cfs_time_seconds(timeout ? timeout : 1),
+ ptlrpc_expired_set,
+ ptlrpc_interrupted_set, set);
else
/*
* At least one request is in flight, so no
/* LU-769 - if we ignored the signal because it was already
* pending when we started, we need to handle it now or we risk
* it being ignored forever */
- if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ if (rc == -ETIMEDOUT &&
+ (!lwi.lwi_allow_intr || set->set_allow_intr) &&
signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
if (!locked)
spin_lock(&request->rq_import->imp_lock);
list_del_init(&request->rq_replay_list);
+ list_del_init(&request->rq_unreplied_list);
if (!locked)
spin_unlock(&request->rq_import->imp_lock);
}
DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
lustre_msg_get_status(req->rq_repmsg),
aa->praa_old_status);
+
+ /* Note: If the replay fails for MDT-MDT recovery, let's
+ * abort all of the following requests in the replay
+ * and sending list, because MDT-MDT update requests
+ * are dependent on each other, see LU-7039 */
+ if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS) {
+ struct ptlrpc_request *free_req;
+ struct ptlrpc_request *tmp;
+
+ spin_lock(&imp->imp_lock);
+ list_for_each_entry_safe(free_req, tmp,
+ &imp->imp_replay_list,
+ rq_replay_list) {
+ ptlrpc_free_request(free_req);
+ }
+
+ list_for_each_entry_safe(free_req, tmp,
+ &imp->imp_committed_list,
+ rq_replay_list) {
+ ptlrpc_free_request(free_req);
+ }
+
+ list_for_each_entry_safe(free_req, tmp,
+ &imp->imp_delayed_list,
+ rq_list) {
+ spin_lock(&free_req->rq_lock);
+ free_req->rq_err = 1;
+ free_req->rq_status = -EIO;
+ ptlrpc_client_wake_req(free_req);
+ spin_unlock(&free_req->rq_lock);
+ }
+
+ list_for_each_entry_safe(free_req, tmp,
+ &imp->imp_sending_list,
+ rq_list) {
+ spin_lock(&free_req->rq_lock);
+ free_req->rq_err = 1;
+ free_req->rq_status = -EIO;
+ ptlrpc_client_wake_req(free_req);
+ spin_unlock(&free_req->rq_lock);
+ }
+ spin_unlock(&imp->imp_lock);
+ }
} else {
/* Put it back for re-replay. */
lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
LASSERT(bd != NULL);
- if (!req->rq_resend || req->rq_nr_resend != 0) {
+ if (!req->rq_resend) {
/* this request has a new xid, just use it as bulk matchbits */
req->rq_mbits = req->rq_xid;