-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
#include <obd_support.h>
#include <lustre_net.h>
#include <lustre_lib.h>
#include <obd.h>
+#include <obd_class.h>
#include "ptlrpc_internal.h"
+/**
+ * Helper function. Sends \a len bytes from \a base at offset \a offset
+ * over \a conn connection to portal \a portal.
+ * Returns 0 on success or error code.
+ */
static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
RETURN (0);
}
-int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
+static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
{
- struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
- int rc;
- int rc2;
- lnet_md_t md;
- __u64 xid;
- ENTRY;
-
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
- RETURN(0);
-
- /* NB no locking required until desc is on the network */
- LASSERT (!desc->bd_network_rw);
- LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
- desc->bd_type == BULK_GET_SINK);
- desc->bd_success = 0;
+ int i;
- md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
- md.threshold = 2; /* SENT and ACK/REPLY */
- md.options = PTLRPC_MD_OPTIONS;
- ptlrpc_fill_bulk_md(&md, desc);
+ for (i = 0; i < count; i++)
+ LNetMDUnlink(bd_mds[i]);
+}
- LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
- LASSERT (desc->bd_cbid.cbid_arg == desc);
+#ifdef HAVE_SERVER_SUPPORT
+/**
+ * Prepare bulk descriptor for specified incoming request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on server-side after request was already
+ * received.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal)
+{
+ struct obd_export *exp = req->rq_export;
+ struct ptlrpc_bulk_desc *desc;
- /* NB total length may be 0 for a read past EOF, so we send a 0
- * length bulk, since the client expects a bulk event. */
+ ENTRY;
+ LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
- rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
- if (rc != 0) {
- CERROR("LNetMDBind failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
- RETURN(-ENOMEM);
- }
+ desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ if (desc == NULL)
+ RETURN(NULL);
- /* Client's bulk and reply matchbits are the same */
- xid = desc->bd_req->rq_xid;
- CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
- "id %s xid "LPX64"\n", desc->bd_iov_count,
- desc->bd_nob, desc->bd_portal,
- libcfs_id2str(conn->c_peer), xid);
+ desc->bd_export = class_export_get(exp);
+ desc->bd_req = req;
- /* Network is about to get at the memory */
- desc->bd_network_rw = 1;
+ desc->bd_cbid.cbid_fn = server_bulk_callback;
+ desc->bd_cbid.cbid_arg = desc;
- if (desc->bd_type == BULK_PUT_SOURCE)
- rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
- conn->c_peer, desc->bd_portal, xid, 0, 0);
- else
- rc = LNetGet (conn->c_self, desc->bd_md_h,
- conn->c_peer, desc->bd_portal, xid, 0);
+ /* NB we don't assign rq_bulk here; server-side requests are
+ * re-used, and the handler frees the bulk desc explicitly. */
- if (rc != 0) {
- /* Can't send, so we unlink the MD bound above. The UNLINK
- * event this creates will signal completion with failure,
- * so we return SUCCESS here! */
- CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
- libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
- rc2 = LNetMDUnlink(desc->bd_md_h);
- LASSERT (rc2 == 0);
- }
+ return desc;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
- RETURN(0);
+/**
+ * Starts bulk transfer for descriptor \a desc on the server.
+ * Returns 0 on success or error code.
+ */
+int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
+{
+ struct obd_export *exp = desc->bd_export;
+ struct ptlrpc_connection *conn = exp->exp_connection;
+ int rc = 0;
+ __u64 xid;
+ int posted_md;
+ int total_md;
+ lnet_md_t md;
+ ENTRY;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
+ RETURN(0);
+
+ /* NB no locking required until desc is on the network */
+ LASSERT(desc->bd_md_count == 0);
+ LASSERT(desc->bd_type == BULK_PUT_SOURCE ||
+ desc->bd_type == BULK_GET_SINK);
+
+ LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
+ LASSERT(desc->bd_cbid.cbid_arg == desc);
+
+ /* NB total length may be 0 for a read past EOF, so we send 0
+ * length bulks, since the client expects bulk events.
+ *
+ * The client may not need all of the bulk XIDs for the RPC. The RPC
+ * used the XID of the highest bulk XID needed, and the server masks
+ * off high bits to get bulk count for this RPC. LU-1431 */
+ xid = desc->bd_req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ total_md = desc->bd_req->rq_xid - xid + 1;
+
+ desc->bd_md_count = total_md;
+ desc->bd_failure = 0;
+
+ md.user_ptr = &desc->bd_cbid;
+ md.eq_handle = ptlrpc_eq_h;
+ md.threshold = 2; /* SENT and ACK/REPLY */
+
+ for (posted_md = 0; posted_md < total_md; xid++) {
+ md.options = PTLRPC_MD_OPTIONS;
+
+ /* NB it's assumed that source and sink buffer frags are
+ * page-aligned. Otherwise we'd have to send client bulk
+ * sizes over and split server buffer accordingly */
+ ptlrpc_fill_bulk_md(&md, desc, posted_md);
+ rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
+ if (rc != 0) {
+ CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
+ exp->exp_obd->obd_name, posted_md, rc);
+ LASSERT(rc == -ENOMEM);
+ if (posted_md == 0) {
+ desc->bd_md_count = 0;
+ RETURN(-ENOMEM);
+ }
+ break;
+ }
+ /* Network is about to get at the memory */
+ if (desc->bd_type == BULK_PUT_SOURCE)
+ rc = LNetPut(conn->c_self, desc->bd_mds[posted_md],
+ LNET_ACK_REQ, conn->c_peer,
+ desc->bd_portal, xid, 0, 0);
+ else
+ rc = LNetGet(conn->c_self, desc->bd_mds[posted_md],
+ conn->c_peer, desc->bd_portal, xid, 0);
+
+ posted_md++;
+ if (rc != 0) {
+ CERROR("%s: failed bulk transfer with %s:%u x"LPU64": "
+ "rc = %d\n", exp->exp_obd->obd_name,
+ libcfs_id2str(conn->c_peer), desc->bd_portal,
+ xid, rc);
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ /* Can't send, so we unlink the MD bound above. The UNLINK
+ * event this creates will signal completion with failure,
+ * so we return SUCCESS here! */
+ spin_lock(&desc->bd_lock);
+ desc->bd_md_count -= total_md - posted_md;
+ spin_unlock(&desc->bd_lock);
+ LASSERT(desc->bd_md_count >= 0);
+
+ mdunlink_iterate_helper(desc->bd_mds, posted_md);
+ RETURN(0);
+ }
+
+ CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
+ "id %s xid "LPX64"-"LPX64"\n", desc->bd_iov_count,
+ desc->bd_nob, desc->bd_portal, libcfs_id2str(conn->c_peer),
+ xid - posted_md, xid - 1);
+
+ RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_start_bulk_transfer);
-/* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
- * serialises with completion callback) */
+/**
+ * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
+ * serialises with completion callback)
+ */
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
{
- struct l_wait_info lwi;
- int rc;
-
- LASSERT(!in_interrupt()); /* might sleep */
-
- if (!ptlrpc_server_bulk_active(desc)) /* completed or */
- return; /* never started */
-
- /* Do not send any meaningful data over the wire for evicted clients */
- if (desc->bd_export && desc->bd_export->exp_failed)
- ptl_rpc_wipe_bulk_pages(desc);
-
- /* The unlink ensures the callback happens ASAP and is the last
- * one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case, to give liblustre
- * a chance to run server_bulk_callback()*/
-
- LNetMDUnlink(desc->bd_md_h);
-
- for (;;) {
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(desc->bd_waitq,
- !ptlrpc_server_bulk_active(desc), &lwi);
- if (rc == 0)
- return;
-
- LASSERT(rc == -ETIMEDOUT);
- CWARN("Unexpectedly long timeout: desc %p\n", desc);
- }
+ struct l_wait_info lwi;
+ int rc;
+
+ LASSERT(!in_interrupt()); /* might sleep */
+
+ if (!ptlrpc_server_bulk_active(desc)) /* completed or */
+ return; /* never started */
+
+ /* We used to poison the pages with 0xab here because we did not want to
+ * send any meaningful data over the wire for evicted clients (bug 9297)
+ * However, this is no longer safe now that we use the page cache on the
+ * OSS (bug 20560) */
+
+ /* The unlink ensures the callback happens ASAP and is the last
+ * one. If it fails, it must be because completion just happened,
+ * but we must still l_wait_event() in this case, to give liblustre
+ * a chance to run server_bulk_callback()*/
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);
+
+ for (;;) {
+ /* Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(desc->bd_waitq,
+ !ptlrpc_server_bulk_active(desc), &lwi);
+ if (rc == 0)
+ return;
+
+ LASSERT(rc == -ETIMEDOUT);
+ CWARN("Unexpectedly long timeout: desc %p\n", desc);
+ }
}
+EXPORT_SYMBOL(ptlrpc_abort_bulk);
+#endif /* HAVE_SERVER_SUPPORT */
+/**
+ * Register bulk at the sender for later transfer.
+ * Returns 0 on success or error code.
+ */
int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- lnet_process_id_t peer;
- int rc;
- int rc2;
- lnet_handle_me_t me_h;
- lnet_md_t md;
- ENTRY;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ lnet_process_id_t peer;
+ int rc = 0;
+ int rc2;
+ int posted_md;
+ int total_md;
+ __u64 xid;
+ lnet_handle_me_t me_h;
+ lnet_md_t md;
+ ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
RETURN(0);
- /* NB no locking required until desc is on the network */
- LASSERT (desc->bd_nob > 0);
- LASSERT (!desc->bd_network_rw);
- LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT (desc->bd_req != NULL);
- LASSERT (desc->bd_type == BULK_PUT_SINK ||
- desc->bd_type == BULK_GET_SOURCE);
-
- desc->bd_success = 0;
-
- peer = desc->bd_import->imp_connection->c_peer;
-
- md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
- md.threshold = 1; /* PUT or GET */
- md.options = PTLRPC_MD_OPTIONS |
- ((desc->bd_type == BULK_GET_SOURCE) ?
- LNET_MD_OP_GET : LNET_MD_OP_PUT);
- ptlrpc_fill_bulk_md(&md, desc);
-
- LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
- LASSERT (desc->bd_cbid.cbid_arg == desc);
-
- /* XXX Registering the same xid on retried bulk makes my head
- * explode trying to understand how the original request's bulk
- * might interfere with the retried request -eeb */
- LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
- "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
- desc->bd_registered, req->rq_xid, desc->bd_last_xid);
- desc->bd_registered = 1;
- desc->bd_last_xid = req->rq_xid;
-
- rc = LNetMEAttach(desc->bd_portal, peer,
- req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
- RETURN (-ENOMEM);
- }
-
- /* About to let the network at it... */
- desc->bd_network_rw = 1;
- rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
- if (rc != 0) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
- desc->bd_network_rw = 0;
- rc2 = LNetMEUnlink (me_h);
- LASSERT (rc2 == 0);
- RETURN (-ENOMEM);
- }
-
- CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPX64", "
- "portal %u\n",
- desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
- desc->bd_iov_count, desc->bd_nob,
- req->rq_xid, desc->bd_portal);
- RETURN(0);
+ /* NB no locking required until desc is on the network */
+ LASSERT(desc->bd_nob > 0);
+ LASSERT(desc->bd_md_count == 0);
+ LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
+ LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
+ LASSERT(desc->bd_req != NULL);
+ LASSERT(desc->bd_type == BULK_PUT_SINK ||
+ desc->bd_type == BULK_GET_SOURCE);
+
+ /* cleanup the state of the bulk for it will be reused */
+ if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
+ desc->bd_nob_transferred = 0;
+ else
+ LASSERT(desc->bd_nob_transferred == 0);
+
+ desc->bd_failure = 0;
+
+ peer = desc->bd_import->imp_connection->c_peer;
+
+ LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
+ LASSERT(desc->bd_cbid.cbid_arg == desc);
+
+ /* An XID is only used for a single request from the client.
+ * For retried bulk transfers, a new XID will be allocated in
+ * in ptlrpc_check_set() if it needs to be resent, so it is not
+ * using the same RDMA match bits after an error.
+ *
+ * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
+ * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
+ xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ LASSERTF(!(desc->bd_registered &&
+ req->rq_send_state != LUSTRE_IMP_REPLAY) ||
+ xid != desc->bd_last_xid,
+ "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
+ desc->bd_registered, xid, desc->bd_last_xid);
+
+ total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ desc->bd_registered = 1;
+ desc->bd_last_xid = xid;
+ desc->bd_md_count = total_md;
+ md.user_ptr = &desc->bd_cbid;
+ md.eq_handle = ptlrpc_eq_h;
+ md.threshold = 1; /* PUT or GET */
+
+ for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+ md.options = PTLRPC_MD_OPTIONS |
+ ((desc->bd_type == BULK_GET_SOURCE) ?
+ LNET_MD_OP_GET : LNET_MD_OP_PUT);
+ ptlrpc_fill_bulk_md(&md, desc, posted_md);
+
+ rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+ LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ if (rc != 0) {
+ CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
+ desc->bd_import->imp_obd->obd_name, xid,
+ posted_md, rc);
+ break;
+ }
+
+ /* About to let the network at it... */
+ rc = LNetMDAttach(me_h, md, LNET_UNLINK,
+ &desc->bd_mds[posted_md]);
+ if (rc != 0) {
+ CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
+ desc->bd_import->imp_obd->obd_name, xid,
+ posted_md, rc);
+ rc2 = LNetMEUnlink(me_h);
+ LASSERT(rc2 == 0);
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ LASSERT(rc == -ENOMEM);
+ spin_lock(&desc->bd_lock);
+ desc->bd_md_count -= total_md - posted_md;
+ spin_unlock(&desc->bd_lock);
+ LASSERT(desc->bd_md_count >= 0);
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
+ req->rq_status = -ENOMEM;
+ RETURN(-ENOMEM);
+ }
+
+ /* Set rq_xid to matchbits of the final bulk so that server can
+ * infer the number of bulks that were prepared */
+ req->rq_xid = --xid;
+ LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
+ "bd_last_xid = x"LPU64", rq_xid = x"LPU64"\n",
+ desc->bd_last_xid, req->rq_xid);
+
+ spin_lock(&desc->bd_lock);
+ /* Holler if peer manages to touch buffers before he knows the xid */
+ if (desc->bd_md_count != total_md)
+ CWARN("%s: Peer %s touched %d buffers while I registered\n",
+ desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
+ total_md - desc->bd_md_count);
+ spin_unlock(&desc->bd_lock);
+
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
+ "xid x"LPX64"-"LPX64", portal %u\n", desc->bd_md_count,
+ desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+ desc->bd_iov_count, desc->bd_nob,
+ desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+
+ RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_register_bulk);
-/* Disconnect a bulk desc from the network. Idempotent. Not
- * thread-safe (i.e. only interlocks with completion callback). */
+/**
+ * Disconnect a bulk desc from the network. Idempotent. Not
+ * thread-safe (i.e. only interlocks with completion callback).
+ * Returns 1 on success or 0 if network unregistration failed for whatever
+ * reason.
+ */
int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
{
- struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- cfs_waitq_t *wq;
- struct l_wait_info lwi;
- int rc;
- ENTRY;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ struct l_wait_info lwi;
+ int rc;
+ ENTRY;
- LASSERT(!in_interrupt()); /* might sleep */
+ LASSERT(!in_interrupt()); /* might sleep */
- /* Let's setup deadline for reply unlink. */
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ /* Let's setup deadline for reply unlink. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
+ async && req->rq_bulk_deadline == 0)
+ req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
- if (!ptlrpc_client_bulk_active(req)) /* completed or */
- RETURN(1); /* never registered */
+ if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
+ RETURN(1); /* never registered */
- LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
+ LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
- /* the unlink ensures the callback happens ASAP and is the last
- * one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case to give liblustre
- * a chance to run client_bulk_callback() */
+ /* the unlink ensures the callback happens ASAP and is the last
+ * one. If it fails, it must be because completion just happened,
+ * but we must still l_wait_event() in this case to give liblustre
+ * a chance to run client_bulk_callback() */
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
- LNetMDUnlink(desc->bd_md_h);
-
- if (!ptlrpc_client_bulk_active(req)) /* completed or */
- RETURN(1); /* never registered */
+ if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
+ RETURN(1); /* never registered */
/* Move to "Unregistering" phase as bulk was not unlinked yet. */
ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
if (async)
RETURN(0);
- if (req->rq_set != NULL)
- wq = &req->rq_set->set_waitq;
- else
- wq = &req->rq_reply_waitq;
-
for (;;) {
+ /* The wq argument is ignored by user-space wait_event macros */
+ wait_queue_head_t *wq = (req->rq_set != NULL) ?
+ &req->rq_set->set_waitq :
+ &req->rq_reply_waitq;
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
}
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_unregister_bulk);
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
- struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
int service_time = max_t(int, cfs_time_current_sec() -
req->rq_arrival_time.tv_sec, 1);
(req->rq_type != PTL_RPC_MSG_ERR) &&
(req->rq_reqmsg != NULL) &&
!(lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_RESENT | MSG_REPLAY | MSG_LAST_REPLAY))) {
+ (MSG_RESENT | MSG_REPLAY |
+ MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
/* early replies, errors and recovery requests don't count
* toward our service time estimate */
- int oldse = at_add(&svc->srv_at_estimate, service_time);
- if (oldse != 0)
- DEBUG_REQ(D_ADAPTTO, req,
- "svc %s changed estimate from %d to %d",
- svc->srv_name, oldse,
- at_get(&svc->srv_at_estimate));
+ int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
+
+ if (oldse != 0) {
+ DEBUG_REQ(D_ADAPTTO, req,
+ "svc %s changed estimate from %d to %d",
+ svc->srv_name, oldse,
+ at_get(&svcpt->scp_at_estimate));
+ }
}
/* Report actual service time for client latency calc */
lustre_msg_set_service_time(req->rq_repmsg, service_time);
lustre_msg_set_timeout(req->rq_repmsg, 0);
else
lustre_msg_set_timeout(req->rq_repmsg,
- at_get(&svc->srv_at_estimate));
+ at_get(&svcpt->scp_at_estimate));
if (req->rq_reqmsg &&
!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
}
}
+/**
+ * Send request reply from request \a req reply buffer.
+ * \a flags defines reply types
+ * Returns 0 on sucess or error code
+ */
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
- struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct ptlrpc_connection *conn;
int rc;
req->rq_export->exp_obd->obd_minor);
}
+ /* In order to keep interoprability with the client (< 2.3) which
+ * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
+ * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
+ * reply buffer on client will be overflow.
+ *
+ * XXX Remove this whenver we drop the interoprability with such client.
+ */
+ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
+ sizeof(struct ptlrpc_body_v2), 1);
+
if (req->rq_type != PTL_RPC_MSG_ERR)
req->rq_type = PTL_RPC_MSG_REPLY;
lustre_msg_set_type(req->rq_repmsg, req->rq_type);
- lustre_msg_set_status(req->rq_repmsg, req->rq_status);
+ lustre_msg_set_status(req->rq_repmsg,
+ ptlrpc_status_hton(req->rq_status));
lustre_msg_set_opc(req->rq_repmsg,
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
CERROR("not replying on NULL connection\n"); /* bug 9635 */
return -ENOTCONN;
}
- atomic_inc (&svc->srv_outstanding_replies);
ptlrpc_rs_addref(rs); /* +1 ref for the network */
rc = sptlrpc_svc_wrap_reply(req);
rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
LNET_ACK_REQ : LNET_NOACK_REQ,
- &rs->rs_cb_id, conn, svc->srv_rep_portal,
+ &rs->rs_cb_id, conn,
+ ptlrpc_req2svc(req)->srv_rep_portal,
req->rq_xid, req->rq_reply_off);
out:
- if (unlikely(rc != 0)) {
- atomic_dec (&svc->srv_outstanding_replies);
+ if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
- }
ptlrpc_connection_put(conn);
return rc;
}
+EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply (struct ptlrpc_request *req)
{
else
return (ptlrpc_send_reply(req, 0));
}
+EXPORT_SYMBOL(ptlrpc_reply);
+/**
+ * For request \a req send an error reply back. Create empty
+ * reply buffers if necessary.
+ */
int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
{
int rc;
RETURN(rc);
}
- req->rq_type = PTL_RPC_MSG_ERR;
+ if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
+ req->rq_status != -EPERM && req->rq_status != -ENOENT &&
+ req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
+ req->rq_type = PTL_RPC_MSG_ERR;
rc = ptlrpc_send_reply(req, may_be_difficult);
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_send_error);
int ptlrpc_error(struct ptlrpc_request *req)
{
return ptlrpc_send_error(req, 0);
}
+EXPORT_SYMBOL(ptlrpc_error);
+/**
+ * Send request \a request.
+ * if \a noreply is set, don't expect any reply back and don't set up
+ * reply buffers.
+ * Returns 0 on success or error code.
+ */
int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
{
int rc;
int rc2;
+ int mpflag = 0;
struct ptlrpc_connection *connection;
lnet_handle_me_t reply_me_h;
lnet_md_t reply_md;
- struct obd_device *obd = request->rq_import->imp_obd;
+ struct obd_import *imp = request->rq_import;
+ struct obd_device *obd = imp->imp_obd;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
/* If this is a re-transmit, we're required to have disengaged
* cleanly from the previous attempt */
LASSERT(!request->rq_receiving_reply);
-
- if (request->rq_import->imp_obd &&
- request->rq_import->imp_obd->obd_fail) {
- CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- request->rq_import->imp_obd->obd_name);
- /* this prevents us from waiting in ptlrpc_queue_wait */
- request->rq_err = 1;
+ LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
+ (imp->imp_state == LUSTRE_IMP_FULL)));
+
+ if (unlikely(obd != NULL && obd->obd_fail)) {
+ CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
+ obd->obd_name);
+ /* this prevents us from waiting in ptlrpc_queue_wait */
+ spin_lock(&request->rq_lock);
+ request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = -ENODEV;
RETURN(-ENODEV);
}
- connection = request->rq_import->imp_connection;
-
- lustre_msg_set_handle(request->rq_reqmsg,
- &request->rq_import->imp_remote_handle);
- lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
- lustre_msg_set_conn_cnt(request->rq_reqmsg,
- request->rq_import->imp_conn_cnt);
- lustre_msghdr_set_flags(request->rq_reqmsg,
- request->rq_import->imp_msghdr_flags);
+ connection = imp->imp_connection;
+
+ lustre_msg_set_handle(request->rq_reqmsg,
+ &imp->imp_remote_handle);
+ lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
+ lustre_msg_set_conn_cnt(request->rq_reqmsg,
+ imp->imp_conn_cnt);
+ lustre_msghdr_set_flags(request->rq_reqmsg,
+ imp->imp_msghdr_flags);
+
+ /** For enabled AT all request should have AT_SUPPORT in the
+ * FULL import state when OBD_CONNECT_AT is set */
+ LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
+ (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
+ !(imp->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_AT));
+
+ if (request->rq_resend) {
+ lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+ if (request->rq_resend_cb != NULL)
+ request->rq_resend_cb(request, &request->rq_async_args);
+ }
+ if (request->rq_memalloc)
+ mpflag = cfs_memory_pressure_get_and_set();
rc = sptlrpc_cli_wrap_request(request);
if (rc)
- RETURN(rc);
+ GOTO(out, rc);
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
rc = ptlrpc_register_bulk (request);
if (rc != 0)
- RETURN(rc);
+ GOTO(out, rc);
}
- if (request->rq_resend)
- lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
-
if (!noreply) {
LASSERT (request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
if (rc) {
/* this prevents us from looping in
* ptlrpc_queue_wait */
- request->rq_err = 1;
+ spin_lock(&request->rq_lock);
+ request->rq_err = 1;
+ spin_unlock(&request->rq_lock);
request->rq_status = rc;
GOTO(cleanup_bulk, rc);
}
}
}
- spin_lock(&request->rq_lock);
- /* If the MD attach succeeds, there _will_ be a reply_in callback */
- request->rq_receiving_reply = !noreply;
- /* We are responsible for unlinking the reply buffer */
- request->rq_must_unlink = !noreply;
- /* Clear any flags that may be present from previous sends. */
+ spin_lock(&request->rq_lock);
+ /* We are responsible for unlinking the reply buffer */
+ request->rq_reply_unlinked = noreply;
+ request->rq_receiving_reply = !noreply;
+ /* Clear any flags that may be present from previous sends. */
+ request->rq_req_unlinked = 0;
request->rq_replied = 0;
request->rq_err = 0;
request->rq_timedout = 0;
request->rq_net_err = 0;
request->rq_resend = 0;
request->rq_restart = 0;
- spin_unlock(&request->rq_lock);
+ request->rq_reply_truncated = 0;
+ spin_unlock(&request->rq_lock);
if (!noreply) {
reply_md.start = request->rq_repbuf;
reply_md.threshold = LNET_MD_THRESH_INF;
/* Manage remote for early replies */
reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
- LNET_MD_MANAGE_REMOTE;
+ LNET_MD_MANAGE_REMOTE |
+ LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
reply_md.user_ptr = &request->rq_reply_cbid;
reply_md.eq_handle = ptlrpc_eq_h;
- /* We must see the unlink callback to unset rq_must_unlink,
- so we can't auto-unlink */
+ /* We must see the unlink callback to set rq_reply_unlinked,
+ * so we can't auto-unlink */
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
- spin_lock(&request->rq_lock);
- /* ...but the MD attach didn't succeed... */
- request->rq_receiving_reply = 0;
- spin_unlock(&request->rq_lock);
+ spin_lock(&request->rq_lock);
+ /* ...but the MD attach didn't succeed... */
+ request->rq_receiving_reply = 0;
+ spin_unlock(&request->rq_lock);
GOTO(cleanup_me, rc = -ENOMEM);
}
/* add references on request for request_out_callback */
ptlrpc_request_addref(request);
- if (obd->obd_svc_stats != NULL)
- lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- request->rq_import->imp_inflight.counter);
+ if (obd != NULL && obd->obd_svc_stats != NULL)
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
+ atomic_read(&imp->imp_inflight));
- OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_arrival_time);
- request->rq_sent = cfs_time_current_sec();
- /* We give the server rq_timeout secs to process the req, and
- add the network latency for our local timeout. */
+ do_gettimeofday(&request->rq_sent_tv);
+ request->rq_sent = cfs_time_current_sec();
+ /* We give the server rq_timeout secs to process the req, and
+ add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
- ptlrpc_pinger_sending_on_import(request->rq_import);
+ ptlrpc_pinger_sending_on_import(imp);
DEBUG_REQ(D_INFO, request, "send flg=%x",
lustre_msg_get_flags(request->rq_reqmsg));
connection,
request->rq_request_portal,
request->rq_xid, 0);
- if (rc == 0) {
- ptlrpc_lprocfs_rpc_sent(request);
- RETURN(rc);
- }
+ if (likely(rc == 0))
+ GOTO(out, rc);
+ request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
if (noreply)
- RETURN(rc);
+ GOTO(out, rc);
cleanup_me:
/* MEUnlink is safe; the PUT didn't even get off the ground, and
/* We do sync unlink here as there was no real transfer here so
* the chance to have long unlink to sluggish net is smaller here. */
ptlrpc_unregister_bulk(request, 0);
+ out:
+ if (request->rq_memalloc)
+ cfs_memory_pressure_restore(mpflag);
return rc;
}
+EXPORT_SYMBOL(ptl_send_rpc);
+/**
+ * Register request buffer descriptor for request receiving.
+ */
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
- struct ptlrpc_service *service = rqbd->rqbd_service;
- static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
- int rc;
+ struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
+ static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
+ int rc;
lnet_md_t md;
lnet_handle_me_t me_h;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
return (-ENOMEM);
- rc = LNetMEAttach(service->srv_req_portal,
- match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
+ * which means buffer can only be attached on local CPT, and LND
+ * threads can find it by grabbing a local lock */
+ rc = LNetMEAttach(service->srv_req_portal,
+ match_id, 0, ~0, LNET_UNLINK,
+ rqbd->rqbd_svcpt->scp_cpt >= 0 ?
+ LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
return (-ENOMEM);