*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <lustre_net.h>
#include <lustre_lib.h>
#include <obd.h>
#include <obd_class.h>
#include "ptlrpc_internal.h"
+#include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
/**
* Helper function. Sends \a len bytes from \a base at offset \a offset
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- struct ptlrpc_connection *conn, int portal, __u64 xid,
- unsigned int offset)
+static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
+ enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
+ lnet_nid_t self, struct lnet_process_id peer_id,
+ int portal, __u64 xid, unsigned int offset,
+ struct lnet_handle_md *bulk_cookie)
{
- int rc;
- lnet_md_t md;
- ENTRY;
+ int rc;
+ struct lnet_md md;
+ ENTRY;
- LASSERT (portal != 0);
- LASSERT (conn != NULL);
- CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
- md.start = base;
- md.length = len;
- md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
- md.options = PTLRPC_MD_OPTIONS;
- md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq_h;
+ LASSERT (portal != 0);
+ CDEBUG (D_INFO, "peer_id %s\n", libcfs_id2str(peer_id));
+ md.start = base;
+ md.length = len;
+ md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
+ md.options = PTLRPC_MD_OPTIONS;
+ md.user_ptr = cbid;
+ md.eq_handle = ptlrpc_eq_h;
+ LNetInvalidateMDHandle(&md.bulk_handle);
- if (unlikely(ack == LNET_ACK_REQ &&
- OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
- /* don't ask for the ack to simulate failing client */
- ack = LNET_NOACK_REQ;
- }
+ if (bulk_cookie) {
+ md.bulk_handle = *bulk_cookie;
+ md.options |= LNET_MD_BULK_HANDLE;
+ }
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
- if (unlikely(rc != 0)) {
- CERROR ("LNetMDBind failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
- RETURN (-ENOMEM);
- }
+ if (unlikely(ack == LNET_ACK_REQ &&
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
+ /* don't ask for the ack to simulate failing client */
+ ack = LNET_NOACK_REQ;
+ }
- CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
- len, portal, xid, offset);
-
- rc = LNetPut (conn->c_self, *mdh, ack,
- conn->c_peer, portal, xid, offset, 0);
- if (unlikely(rc != 0)) {
- int rc2;
- /* We're going to get an UNLINK event when I unlink below,
- * which will complete just like any other failed send, so
- * I fall through and return success here! */
- CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
- libcfs_id2str(conn->c_peer), portal, xid, rc);
- rc2 = LNetMDUnlink(*mdh);
- LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
- }
+ rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ if (unlikely(rc != 0)) {
+ CERROR ("LNetMDBind failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ RETURN (-ENOMEM);
+ }
- RETURN (0);
+ CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
+ len, portal, xid, offset);
+
+ rc = LNetPut(self, *mdh, ack,
+ peer_id, portal, xid, offset, 0);
+ if (unlikely(rc != 0)) {
+ int rc2;
+ /* We're going to get an UNLINK event when I unlink below,
+ * which will complete just like any other failed send, so
+ * I fall through and return success here! */
+ CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
+ libcfs_id2str(peer_id), portal, xid, rc);
+ rc2 = LNetMDUnlink(*mdh);
+ LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
+ }
+
+ RETURN (0);
}
-static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
+static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
{
int i;
#ifdef HAVE_SERVER_SUPPORT
/**
* Prepare bulk descriptor for specified incoming request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
* the bulk to be sent. Used on server-side after request was already
* received.
* Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
* error.
*/
struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
- unsigned npages, unsigned max_brw,
- unsigned type, unsigned portal)
+ unsigned nfrags, unsigned max_brw,
+ unsigned int type,
+ unsigned portal,
+ const struct ptlrpc_bulk_frag_ops
+ *ops)
{
struct obd_export *exp = req->rq_export;
struct ptlrpc_bulk_desc *desc;
ENTRY;
- LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
+ LASSERT(ptlrpc_is_bulk_op_active(type));
- desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
if (desc == NULL)
RETURN(NULL);
int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
{
struct obd_export *exp = desc->bd_export;
- struct ptlrpc_connection *conn = exp->exp_connection;
+ lnet_nid_t self_nid;
+ struct lnet_process_id peer_id;
int rc = 0;
- __u64 xid;
+ __u64 mbits;
int posted_md;
int total_md;
- lnet_md_t md;
+ struct lnet_md md;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
/* NB no locking required until desc is on the network */
LASSERT(desc->bd_md_count == 0);
- LASSERT(desc->bd_type == BULK_PUT_SOURCE ||
- desc->bd_type == BULK_GET_SINK);
+ LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
+ /*
+ * Multi-Rail: get the preferred self and peer NIDs from the
+ * request, so they are based on the route taken by the
+ * message.
+ */
+ self_nid = desc->bd_req->rq_self;
+ peer_id = desc->bd_req->rq_source;
+
/* NB total length may be 0 for a read past EOF, so we send 0
* length bulks, since the client expects bulk events.
*
- * The client may not need all of the bulk XIDs for the RPC. The RPC
- * used the XID of the highest bulk XID needed, and the server masks
+ * The client may not need all of the bulk mbits for the RPC. The RPC
+ * used the mbits of the highest bulk mbits needed, and the server masks
* off high bits to get bulk count for this RPC. LU-1431 */
- xid = desc->bd_req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
- total_md = desc->bd_req->rq_xid - xid + 1;
+ mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
+ total_md = desc->bd_req->rq_mbits - mbits + 1;
desc->bd_md_count = total_md;
desc->bd_failure = 0;
md.eq_handle = ptlrpc_eq_h;
md.threshold = 2; /* SENT and ACK/REPLY */
- for (posted_md = 0; posted_md < total_md; xid++) {
+ for (posted_md = 0; posted_md < total_md; mbits++) {
md.options = PTLRPC_MD_OPTIONS;
/* NB it's assumed that source and sink buffer frags are
}
break;
}
+
+ /* sanity.sh 224c: lets skip last md */
+ if (posted_md == desc->bd_md_max_brw - 1)
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
+ CFS_FAIL_PTLRPC_OST_BULK_CB2);
+
/* Network is about to get at the memory */
- if (desc->bd_type == BULK_PUT_SOURCE)
- rc = LNetPut(conn->c_self, desc->bd_mds[posted_md],
- LNET_ACK_REQ, conn->c_peer,
- desc->bd_portal, xid, 0, 0);
+ if (ptlrpc_is_bulk_put_source(desc->bd_type))
+ rc = LNetPut(self_nid, desc->bd_mds[posted_md],
+ LNET_ACK_REQ, peer_id,
+ desc->bd_portal, mbits, 0, 0);
else
- rc = LNetGet(conn->c_self, desc->bd_mds[posted_md],
- conn->c_peer, desc->bd_portal, xid, 0);
+ rc = LNetGet(self_nid, desc->bd_mds[posted_md],
+ peer_id, desc->bd_portal, mbits, 0, false);
posted_md++;
if (rc != 0) {
- CERROR("%s: failed bulk transfer with %s:%u x"LPU64": "
+ CERROR("%s: failed bulk transfer with %s:%u x%llu: "
"rc = %d\n", exp->exp_obd->obd_name,
- libcfs_id2str(conn->c_peer), desc->bd_portal,
- xid, rc);
+ libcfs_id2str(peer_id), desc->bd_portal,
+ mbits, rc);
break;
}
}
}
CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
- "id %s xid "LPX64"-"LPX64"\n", desc->bd_iov_count,
- desc->bd_nob, desc->bd_portal, libcfs_id2str(conn->c_peer),
- xid - posted_md, xid - 1);
+ "id %s mbits %#llx-%#llx\n", desc->bd_iov_count,
+ desc->bd_nob, desc->bd_portal, libcfs_id2str(peer_id),
+ mbits - posted_md, mbits - 1);
RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_start_bulk_transfer);
/**
* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
* one. If it fails, it must be because completion just happened,
* but we must still l_wait_event() in this case, to give liblustre
* a chance to run server_bulk_callback()*/
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
for (;;) {
/* Network access will complete in finite time but the HUGE
CWARN("Unexpectedly long timeout: desc %p\n", desc);
}
}
-EXPORT_SYMBOL(ptlrpc_abort_bulk);
#endif /* HAVE_SERVER_SUPPORT */
/**
int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- lnet_process_id_t peer;
+ struct lnet_process_id peer;
int rc = 0;
int rc2;
int posted_md;
int total_md;
- __u64 xid;
- lnet_handle_me_t me_h;
- lnet_md_t md;
+ __u64 mbits;
+ struct lnet_handle_me me_h;
+ struct lnet_md md;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(desc->bd_req != NULL);
- LASSERT(desc->bd_type == BULK_PUT_SINK ||
- desc->bd_type == BULK_GET_SOURCE);
+ LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
/* cleanup the state of the bulk for it will be reused */
if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
desc->bd_nob_transferred = 0;
- else
- LASSERT(desc->bd_nob_transferred == 0);
+ else if (desc->bd_nob_transferred != 0)
+ /* If the network failed after an RPC was sent, this condition
+ * could happen. Rather than assert (was here before), return
+ * an EIO error. */
+ RETURN(-EIO);
desc->bd_failure = 0;
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
- /* An XID is only used for a single request from the client.
- * For retried bulk transfers, a new XID will be allocated in
- * in ptlrpc_check_set() if it needs to be resent, so it is not
- * using the same RDMA match bits after an error.
- *
- * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
- * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
- xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ /* rq_mbits is matchbits of the final bulk */
+ mbits = req->rq_mbits - total_md + 1;
+
+ LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
+ "first mbits = x%llu, last mbits = x%llu\n",
+ mbits, req->rq_mbits);
LASSERTF(!(desc->bd_registered &&
req->rq_send_state != LUSTRE_IMP_REPLAY) ||
- xid != desc->bd_last_xid,
- "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
- desc->bd_registered, xid, desc->bd_last_xid);
+ mbits != desc->bd_last_mbits,
+ "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
+ desc->bd_registered, mbits, desc->bd_last_mbits);
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
desc->bd_registered = 1;
- desc->bd_last_xid = xid;
+ desc->bd_last_mbits = mbits;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
md.eq_handle = ptlrpc_eq_h;
md.threshold = 1; /* PUT or GET */
- for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+ for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
md.options = PTLRPC_MD_OPTIONS |
- ((desc->bd_type == BULK_GET_SOURCE) ?
+ (ptlrpc_is_bulk_op_get(desc->bd_type) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+ if (posted_md > 0 && posted_md + 1 == total_md &&
+ OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
+ rc = -ENOMEM;
+ } else {
+ rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ }
if (rc != 0) {
- CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
break;
}
rc = LNetMDAttach(me_h, md, LNET_UNLINK,
&desc->bd_mds[posted_md]);
if (rc != 0) {
- CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
- desc->bd_import->imp_obd->obd_name, xid,
+ CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
+ desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
rc2 = LNetMEUnlink(me_h);
LASSERT(rc2 == 0);
LASSERT(desc->bd_md_count >= 0);
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
req->rq_status = -ENOMEM;
+ desc->bd_registered = 0;
RETURN(-ENOMEM);
}
- /* Set rq_xid to matchbits of the final bulk so that server can
- * infer the number of bulks that were prepared */
- req->rq_xid = --xid;
- LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
- "bd_last_xid = x"LPU64", rq_xid = x"LPU64"\n",
- desc->bd_last_xid, req->rq_xid);
-
spin_lock(&desc->bd_lock);
- /* Holler if peer manages to touch buffers before he knows the xid */
+ /* Holler if peer manages to touch buffers before he knows the mbits */
if (desc->bd_md_count != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
spin_unlock(&desc->bd_lock);
CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
- "xid x"LPX64"-"LPX64", portal %u\n", desc->bd_md_count,
- desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+ "mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count,
+ ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
- desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+ desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_register_bulk);
/**
* Disconnect a bulk desc from the network. Idempotent. Not
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
+ req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
- /* Move to "Unregistering" phase as bulk was not unlinked yet. */
- ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
+ /* Move to "Unregistering" phase as bulk was not unlinked yet. */
+ ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
- /* Do not wait for unlink to finish. */
- if (async)
- RETURN(0);
+ /* Do not wait for unlink to finish. */
+ if (async)
+ RETURN(0);
- for (;;) {
-#ifdef __KERNEL__
+ for (;;) {
/* The wq argument is ignored by user-space wait_event macros */
wait_queue_head_t *wq = (req->rq_set != NULL) ?
&req->rq_set->set_waitq :
&req->rq_reply_waitq;
-#endif
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
- if (rc == 0) {
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- RETURN(1);
- }
+ /*
+ * Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs.
+ */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
+ if (rc == 0) {
+ ptlrpc_rqphase_move(req, req->rq_next_phase);
+ RETURN(1);
+ }
- LASSERT(rc == -ETIMEDOUT);
- DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
- desc);
- }
- RETURN(0);
+ LASSERT(rc == -ETIMEDOUT);
+ DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
+ desc);
+ }
+ RETURN(0);
}
-EXPORT_SYMBOL(ptlrpc_unregister_bulk);
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, cfs_time_current_sec() -
+ int service_time = max_t(int, ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
}
/* Report actual service time for client latency calc */
lustre_msg_set_service_time(req->rq_repmsg, service_time);
- /* Report service time estimate for future client reqs, but report 0
- * (to be ignored by client) if it's a error reply during recovery.
- * (bz15815) */
- if (req->rq_type == PTL_RPC_MSG_ERR &&
- (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
- lustre_msg_set_timeout(req->rq_repmsg, 0);
- else
- lustre_msg_set_timeout(req->rq_repmsg,
- at_get(&svcpt->scp_at_estimate));
-
- if (req->rq_reqmsg &&
- !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
- CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
- "req_flags=%#x magic=%d:%x/%x len=%d\n",
- flags, lustre_msg_get_flags(req->rq_reqmsg),
- lustre_msg_is_v1(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_reqmsg),
- lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
- }
+ /* Report service time estimate for future client reqs, but report 0
+ * (to be ignored by client) if it's an error reply during recovery.
+ * b=15815
+ */
+ if (req->rq_type == PTL_RPC_MSG_ERR &&
+ (req->rq_export == NULL ||
+ req->rq_export->exp_obd->obd_recovering)) {
+ lustre_msg_set_timeout(req->rq_repmsg, 0);
+ } else {
+ time64_t timeout;
+
+ if (req->rq_export && req->rq_reqmsg != NULL &&
+ (flags & PTLRPC_REPLY_EARLY) &&
+ lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
+ struct obd_device *exp_obd = req->rq_export->exp_obd;
+
+ timeout = ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec +
+ min_t(time64_t, at_extra,
+ exp_obd->obd_recovery_timeout / 4);
+ } else {
+ timeout = at_get(&svcpt->scp_at_estimate);
+ }
+ lustre_msg_set_timeout(req->rq_repmsg, timeout);
+ }
+
+ if (req->rq_reqmsg &&
+ !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
+ CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
+ "req_flags=%#x magic=%x/%x len=%d\n",
+ flags, lustre_msg_get_flags(req->rq_reqmsg),
+ lustre_msg_get_magic(req->rq_reqmsg),
+ lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
+ }
}
/**
* Send request reply from request \a req reply buffer.
* \a flags defines reply types
- * Returns 0 on sucess or error code
+ * Returns 0 on success or error code
*/
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- struct ptlrpc_connection *conn;
- int rc;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_connection *conn;
+ int rc;
/* We must already have a reply buffer (only ptlrpc_error() may be
* called without one). The reply generated by sptlrpc layer (e.g.
req->rq_export->exp_obd->obd_minor);
}
- /* In order to keep interoprability with the client (< 2.3) which
- * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
- * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
- * reply buffer on client will be overflow.
- *
- * XXX Remove this whenver we drop the interoprability with such client.
- */
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
- sizeof(struct ptlrpc_body_v2), 1);
-
if (req->rq_type != PTL_RPC_MSG_ERR)
req->rq_type = PTL_RPC_MSG_REPLY;
if (unlikely(rc))
goto out;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
- rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
- (rs->rs_difficult && !rs->rs_no_ack) ?
- LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn,
- ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off);
+ rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
+ (rs->rs_difficult && !rs->rs_no_ack) ?
+ LNET_ACK_REQ : LNET_NOACK_REQ,
+ &rs->rs_cb_id, req->rq_self, req->rq_source,
+ ptlrpc_req2svc(req)->srv_rep_portal,
+ req->rq_xid, req->rq_reply_off, NULL);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
ptlrpc_connection_put(conn);
return rc;
}
-EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply (struct ptlrpc_request *req)
{
else
return (ptlrpc_send_reply(req, 0));
}
-EXPORT_SYMBOL(ptlrpc_reply);
/**
* For request \a req send an error reply back. Create empty
rc = ptlrpc_send_reply(req, may_be_difficult);
RETURN(rc);
}
-EXPORT_SYMBOL(ptlrpc_send_error);
int ptlrpc_error(struct ptlrpc_request *req)
{
return ptlrpc_send_error(req, 0);
}
-EXPORT_SYMBOL(ptlrpc_error);
/**
* Send request \a request.
*/
int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
{
- int rc;
- int rc2;
- int mpflag = 0;
- struct ptlrpc_connection *connection;
- lnet_handle_me_t reply_me_h;
- lnet_md_t reply_md;
- struct obd_device *obd = request->rq_import->imp_obd;
- ENTRY;
+ int rc;
+ int rc2;
+ int mpflag = 0;
+ struct lnet_handle_md bulk_cookie;
+ struct ptlrpc_connection *connection;
+ struct lnet_handle_me reply_me_h;
+ struct lnet_md reply_md;
+ struct obd_import *imp = request->rq_import;
+ struct obd_device *obd = imp->imp_obd;
+ ENTRY;
+
+ LNetInvalidateMDHandle(&bulk_cookie);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
RETURN(0);
/* If this is a re-transmit, we're required to have disengaged
* cleanly from the previous attempt */
LASSERT(!request->rq_receiving_reply);
+ LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
+ (imp->imp_state == LUSTRE_IMP_FULL)));
- if (request->rq_import->imp_obd &&
- request->rq_import->imp_obd->obd_fail) {
- CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- request->rq_import->imp_obd->obd_name);
- /* this prevents us from waiting in ptlrpc_queue_wait */
+ if (unlikely(obd != NULL && obd->obd_fail)) {
+ CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
+ obd->obd_name);
+ /* this prevents us from waiting in ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
RETURN(-ENODEV);
}
- connection = request->rq_import->imp_connection;
-
- lustre_msg_set_handle(request->rq_reqmsg,
- &request->rq_import->imp_remote_handle);
- lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
- lustre_msg_set_conn_cnt(request->rq_reqmsg,
- request->rq_import->imp_conn_cnt);
- lustre_msghdr_set_flags(request->rq_reqmsg,
- request->rq_import->imp_msghdr_flags);
-
- if (request->rq_resend)
- lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+ connection = imp->imp_connection;
+
+ lustre_msg_set_handle(request->rq_reqmsg,
+ &imp->imp_remote_handle);
+ lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
+ lustre_msg_set_conn_cnt(request->rq_reqmsg,
+ imp->imp_conn_cnt);
+ lustre_msghdr_set_flags(request->rq_reqmsg,
+ imp->imp_msghdr_flags);
+
+ /* If it's the first time to resend the request for EINPROGRESS,
+ * we need to allocate a new XID (see after_reply()), it's different
+ * from the resend for reply timeout. */
+ if (request->rq_nr_resend != 0 &&
+ list_empty(&request->rq_unreplied_list)) {
+ __u64 min_xid = 0;
+ /* resend for EINPROGRESS, allocate new xid to avoid reply
+ * reconstruction */
+ spin_lock(&imp->imp_lock);
+ ptlrpc_assign_next_xid_nolock(request);
+ min_xid = ptlrpc_known_replied_xid(imp);
+ spin_unlock(&imp->imp_lock);
+
+ lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
+ DEBUG_REQ(D_RPCTRACE, request,
+ "Allocating new XID for resend on EINPROGRESS");
+ }
- if (request->rq_memalloc)
- mpflag = cfs_memory_pressure_get_and_set();
+ if (request->rq_bulk != NULL) {
+ ptlrpc_set_bulk_mbits(request);
+ lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
+ }
- rc = sptlrpc_cli_wrap_request(request);
- if (rc)
- GOTO(out, rc);
+ if (list_empty(&request->rq_unreplied_list) ||
+ request->rq_xid <= imp->imp_known_replied_xid) {
+ DEBUG_REQ(D_ERROR, request,
+ "xid=%llu, replied=%llu, list_empty=%d",
+ request->rq_xid, imp->imp_known_replied_xid,
+ list_empty(&request->rq_unreplied_list));
+ LBUG();
+ }
- /* bulk register should be done after wrap_request() */
- if (request->rq_bulk != NULL) {
- rc = ptlrpc_register_bulk (request);
- if (rc != 0)
- GOTO(out, rc);
- }
+ /** For enabled AT all request should have AT_SUPPORT in the
+ * FULL import state when OBD_CONNECT_AT is set */
+ LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
+ (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
+ !(imp->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_AT));
+
+ if (request->rq_resend) {
+ lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+ if (request->rq_resend_cb != NULL)
+ request->rq_resend_cb(request, &request->rq_async_args);
+ }
+ if (request->rq_memalloc)
+ mpflag = cfs_memory_pressure_get_and_set();
+
+ rc = sptlrpc_cli_wrap_request(request);
+ if (rc)
+ GOTO(out, rc);
+
+ /* bulk register should be done after wrap_request() */
+ if (request->rq_bulk != NULL) {
+ rc = ptlrpc_register_bulk (request);
+ if (rc != 0)
+ GOTO(cleanup_bulk, rc);
+ /*
+ * All the mds in the request will have the same cpt
+ * encoded in the cookie. So we can just get the first
+ * one.
+ */
+ bulk_cookie = request->rq_bulk->bd_mds[0];
+ }
if (!noreply) {
LASSERT (request->rq_replen != 0);
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
- request->rq_status = rc;
- GOTO(cleanup_bulk, rc);
- }
- } else {
- request->rq_repdata = NULL;
- request->rq_repmsg = NULL;
- }
+ request->rq_status = rc;
+ GOTO(cleanup_bulk, rc);
+ }
+ } else {
+ request->rq_repdata = NULL;
+ request->rq_repmsg = NULL;
+ }
rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
connection->c_peer, request->rq_xid, 0,
}
spin_lock(&request->rq_lock);
- /* If the MD attach succeeds, there _will_ be a reply_in callback */
- request->rq_receiving_reply = !noreply;
- /* We are responsible for unlinking the reply buffer */
- request->rq_must_unlink = !noreply;
- /* Clear any flags that may be present from previous sends. */
+ /* We are responsible for unlinking the reply buffer */
+ request->rq_reply_unlinked = noreply;
+ request->rq_receiving_reply = !noreply;
+ /* Clear any flags that may be present from previous sends. */
+ request->rq_req_unlinked = 0;
request->rq_replied = 0;
request->rq_err = 0;
request->rq_timedout = 0;
request->rq_net_err = 0;
request->rq_resend = 0;
request->rq_restart = 0;
- request->rq_reply_truncate = 0;
+ request->rq_reply_truncated = 0;
spin_unlock(&request->rq_lock);
if (!noreply) {
reply_md.user_ptr = &request->rq_reply_cbid;
reply_md.eq_handle = ptlrpc_eq_h;
- /* We must see the unlink callback to unset rq_must_unlink,
- so we can't auto-unlink */
+ /* We must see the unlink callback to set rq_reply_unlinked,
+ * so we can't auto-unlink */
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
&request->rq_reply_md_h);
if (rc != 0) {
GOTO(cleanup_me, rc = -ENOMEM);
}
- CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
+ CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu"
", portal %u\n",
request->rq_repbuf_len, request->rq_xid,
request->rq_reply_portal);
/* add references on request for request_out_callback */
ptlrpc_request_addref(request);
- if (obd->obd_svc_stats != NULL)
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&request->rq_import->imp_inflight));
+ if (obd != NULL && obd->obd_svc_stats != NULL)
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
+ atomic_read(&imp->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_arrival_time);
- request->rq_sent = cfs_time_current_sec();
+ request->rq_sent_ns = ktime_get_real();
+ request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
- ptlrpc_pinger_sending_on_import(request->rq_import);
+ ptlrpc_pinger_sending_on_import(imp);
- DEBUG_REQ(D_INFO, request, "send flg=%x",
- lustre_msg_get_flags(request->rq_reqmsg));
- rc = ptl_send_buf(&request->rq_req_md_h,
- request->rq_reqbuf, request->rq_reqdata_len,
- LNET_NOACK_REQ, &request->rq_req_cbid,
- connection,
- request->rq_request_portal,
- request->rq_xid, 0);
- if (rc == 0)
- GOTO(out, rc);
+ DEBUG_REQ(D_INFO, request, "send flags=%x",
+ lustre_msg_get_flags(request->rq_reqmsg));
+ rc = ptl_send_buf(&request->rq_req_md_h,
+ request->rq_reqbuf, request->rq_reqdata_len,
+ LNET_NOACK_REQ, &request->rq_req_cbid,
+ LNET_NID_ANY, connection->c_peer,
+ request->rq_request_portal,
+ request->rq_xid, 0, &bulk_cookie);
+ if (likely(rc == 0))
+ GOTO(out, rc);
+ request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
if (noreply)
GOTO(out, rc);
cleanup_me:
- /* MEUnlink is safe; the PUT didn't even get off the ground, and
- * nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer. */
- rc2 = LNetMEUnlink(reply_me_h);
- LASSERT (rc2 == 0);
- /* UNLINKED callback called synchronously */
- LASSERT(!request->rq_receiving_reply);
+ /* MEUnlink is safe; the PUT didn't even get off the ground, and
+ * nobody apart from the PUT's target has the right nid+XID to
+ * access the reply buffer. */
+ rc2 = LNetMEUnlink(reply_me_h);
+ LASSERT (rc2 == 0);
+ /* UNLINKED callback called synchronously */
+ LASSERT(!request->rq_receiving_reply);
cleanup_bulk:
- /* We do sync unlink here as there was no real transfer here so
- * the chance to have long unlink to sluggish net is smaller here. */
+ /* We do sync unlink here as there was no real transfer here so
+ * the chance to have long unlink to sluggish net is smaller here. */
ptlrpc_unregister_bulk(request, 0);
+ if (request->rq_bulk != NULL)
+ request->rq_bulk->bd_registered = 0;
out:
- if (request->rq_memalloc)
- cfs_memory_pressure_restore(mpflag);
- return rc;
+ if (rc == -ENOMEM) {
+ /* set rq_sent so that this request is treated
+ * as a delayed send in the upper layers */
+ request->rq_sent = ktime_get_real_seconds();
+ }
+
+ if (request->rq_memalloc)
+ cfs_memory_pressure_restore(mpflag);
+
+ return rc;
}
EXPORT_SYMBOL(ptl_send_rpc);
*/
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
- struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
- int rc;
- lnet_md_t md;
- lnet_handle_me_t me_h;
+ struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
+ static struct lnet_process_id match_id = {
+ .nid = LNET_NID_ANY,
+ .pid = LNET_PID_ANY
+ };
+ int rc;
+ struct lnet_md md;
+ struct lnet_handle_me me_h;
CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
service->srv_req_portal);