* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
+#include <libcfs/linux/linux-mem.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <lustre_lib.h>
#include <obd.h>
#include <obd_class.h>
#include "ptlrpc_internal.h"
+#include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
/**
* Helper function. Sends \a len bytes from \a base at offset \a offset
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
-static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
- lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
- lnet_nid_t self, lnet_process_id_t peer_id,
+static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
+ enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
+ lnet_nid_t self, struct lnet_process_id peer_id,
int portal, __u64 xid, unsigned int offset,
- lnet_handle_md_t *bulk_cookie)
+ struct lnet_handle_md *bulk_cookie)
{
int rc;
- lnet_md_t md;
+ struct lnet_md md;
ENTRY;
LASSERT (portal != 0);
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
md.options = PTLRPC_MD_OPTIONS;
md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq_h;
- LNetInvalidateHandle(&md.bulk_handle);
+ md.handler = ptlrpc_handler;
+ LNetInvalidateMDHandle(&md.bulk_handle);
if (bulk_cookie) {
md.bulk_handle = *bulk_cookie;
ack = LNET_NOACK_REQ;
}
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ rc = LNetMDBind(&md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
CERROR ("LNetMDBind failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
len, portal, xid, offset);
+ percpu_ref_get(&ptlrpc_pending);
+
rc = LNetPut(self, *mdh, ack,
peer_id, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
RETURN (0);
}
-static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
+static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
{
int i;
{
struct obd_export *exp = desc->bd_export;
lnet_nid_t self_nid;
- lnet_process_id_t peer_id;
+ struct lnet_process_id peer_id;
int rc = 0;
__u64 mbits;
int posted_md;
int total_md;
- lnet_md_t md;
+ struct lnet_md md;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
desc->bd_failure = 0;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
+ md.handler = ptlrpc_handler;
md.threshold = 2; /* SENT and ACK/REPLY */
for (posted_md = 0; posted_md < total_md; mbits++) {
* page-aligned. Otherwise we'd have to send client bulk
* sizes over and split server buffer accordingly */
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
+ rc = LNetMDBind(&md, LNET_UNLINK, &desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
exp->exp_obd->obd_name, posted_md, rc);
}
break;
}
+ percpu_ref_get(&ptlrpc_pending);
- /* LU-6441: last md is not sent and desc->bd_md_count == 1 */
- if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
- CFS_FAIL_ONCE) &&
- total_md > 1 && posted_md == total_md - 1) {
- posted_md++;
- continue;
- }
+ /* sanity.sh 224c: lets skip last md */
+ if (posted_md == desc->bd_md_max_brw - 1)
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
+ CFS_FAIL_PTLRPC_OST_BULK_CB2);
/* Network is about to get at the memory */
if (ptlrpc_is_bulk_put_source(desc->bd_type))
desc->bd_portal, mbits, 0, 0);
else
rc = LNetGet(self_nid, desc->bd_mds[posted_md],
- peer_id, desc->bd_portal, mbits, 0);
+ peer_id, desc->bd_portal, mbits, 0, false);
posted_md++;
if (rc != 0) {
*/
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
{
- struct l_wait_info lwi;
- int rc;
-
LASSERT(!in_interrupt()); /* might sleep */
if (!ptlrpc_server_bulk_active(desc)) /* completed or */
/* The unlink ensures the callback happens ASAP and is the last
* one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case, to give liblustre
- * a chance to run server_bulk_callback()*/
+ * but we must still wait_event_idle_timeout() in this case, to give
+ * us a chance to run server_bulk_callback()
+ */
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
for (;;) {
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(desc->bd_waitq,
- !ptlrpc_server_bulk_active(desc), &lwi);
- if (rc == 0)
+ int seconds = PTLRPC_REQ_LONG_UNLINK;
+
+ while (seconds > 0 &&
+ wait_event_idle_timeout(desc->bd_waitq,
+ !ptlrpc_server_bulk_active(desc),
+ cfs_time_seconds(1)) == 0)
+ seconds -= 1;
+ if (seconds > 0)
return;
- LASSERT(rc == -ETIMEDOUT);
CWARN("Unexpectedly long timeout: desc %p\n", desc);
}
}
int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- lnet_process_id_t peer;
+ struct lnet_process_id peer;
int rc = 0;
- int rc2;
int posted_md;
int total_md;
__u64 mbits;
- lnet_handle_me_t me_h;
- lnet_md_t md;
+ struct lnet_me *me;
+ struct lnet_md md;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
/* cleanup the state of the bulk for it will be reused */
if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
desc->bd_nob_transferred = 0;
- else
- LASSERT(desc->bd_nob_transferred == 0);
+ else if (desc->bd_nob_transferred != 0)
+ /* If the network failed after an RPC was sent, this condition
+ * could happen. Rather than assert (was here before), return
+ * an EIO error. */
+ RETURN(-EIO);
desc->bd_failure = 0;
desc->bd_last_mbits = mbits;
desc->bd_md_count = total_md;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq_h;
+ md.handler = ptlrpc_handler;
md.threshold = 1; /* PUT or GET */
for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
- LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ if (posted_md > 0 && posted_md + 1 == total_md &&
+ OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
+ rc = -ENOMEM;
+ } else {
+ me = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
+ LNET_UNLINK, LNET_INS_AFTER);
+ rc = PTR_ERR_OR_ZERO(me);
+ }
if (rc != 0) {
CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
break;
}
+ percpu_ref_get(&ptlrpc_pending);
/* About to let the network at it... */
- rc = LNetMDAttach(me_h, md, LNET_UNLINK,
+ rc = LNetMDAttach(me, &md, LNET_UNLINK,
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
- rc2 = LNetMEUnlink(me_h);
- LASSERT(rc2 == 0);
+ LNetMEUnlink(me);
break;
}
}
LASSERT(desc->bd_md_count >= 0);
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
req->rq_status = -ENOMEM;
+ desc->bd_registered = 0;
RETURN(-ENOMEM);
}
int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct l_wait_info lwi;
- int rc;
ENTRY;
LASSERT(!in_interrupt()); /* might sleep */
+ if (desc)
+ desc->bd_registered = 0;
+
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ req->rq_bulk_deadline = ktime_get_real_seconds() +
+ PTLRPC_REQ_LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
/* the unlink ensures the callback happens ASAP and is the last
* one. If it fails, it must be because completion just happened,
- * but we must still l_wait_event() in this case to give liblustre
- * a chance to run client_bulk_callback() */
+ * but we must still wait_event_idle_timeout() in this case to give
+ * us a chance to run client_bulk_callback()
+ */
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
wait_queue_head_t *wq = (req->rq_set != NULL) ?
&req->rq_set->set_waitq :
&req->rq_reply_waitq;
- /* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
- lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
- cfs_time_seconds(1), NULL, NULL);
- rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
- if (rc == 0) {
- ptlrpc_rqphase_move(req, req->rq_next_phase);
- RETURN(1);
- }
-
- LASSERT(rc == -ETIMEDOUT);
- DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
- desc);
- }
- RETURN(0);
+ /*
+ * Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs.
+ */
+ int seconds = PTLRPC_REQ_LONG_UNLINK;
+
+ while (seconds > 0 &&
+ wait_event_idle_timeout(*wq,
+ !ptlrpc_client_bulk_active(req),
+ cfs_time_seconds(1)) == 0)
+ seconds -= 1;
+ if (seconds > 0) {
+ ptlrpc_rqphase_move(req, req->rq_next_phase);
+ RETURN(1);
+ }
+
+ DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
+ desc);
+ }
+ RETURN(0);
}
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, cfs_time_current_sec() -
- req->rq_arrival_time.tv_sec, 1);
+ timeout_t service_timeout;
+ service_timeout = clamp_t(timeout_t, ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec, 1,
+ (AT_OFF ? obd_timeout * 3 / 2 : at_max));
if (!(flags & PTLRPC_REPLY_EARLY) &&
(req->rq_type != PTL_RPC_MSG_ERR) &&
(req->rq_reqmsg != NULL) &&
(MSG_RESENT | MSG_REPLAY |
MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
/* early replies, errors and recovery requests don't count
- * toward our service time estimate */
- int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
+ * toward our service time estimate
+ */
+ timeout_t oldse = at_measured(&svcpt->scp_at_estimate,
+ service_timeout);
if (oldse != 0) {
DEBUG_REQ(D_ADAPTTO, req,
}
}
/* Report actual service time for client latency calc */
- lustre_msg_set_service_time(req->rq_repmsg, service_time);
- /* Report service time estimate for future client reqs, but report 0
+ lustre_msg_set_service_timeout(req->rq_repmsg, service_timeout);
+ /* Report service time estimate for future client reqs, but report 0
* (to be ignored by client) if it's an error reply during recovery.
- * (bz15815) */
- if (req->rq_type == PTL_RPC_MSG_ERR &&
+ * b=15815
+ */
+ if (req->rq_type == PTL_RPC_MSG_ERR &&
(req->rq_export == NULL ||
req->rq_export->exp_obd->obd_recovering)) {
- lustre_msg_set_timeout(req->rq_repmsg, 0);
+ lustre_msg_set_timeout(req->rq_repmsg, 0);
} else {
- __u32 timeout;
+ timeout_t timeout;
if (req->rq_export && req->rq_reqmsg != NULL &&
(flags & PTLRPC_REPLY_EARLY) &&
lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))
- timeout = cfs_time_current_sec() -
- req->rq_arrival_time.tv_sec +
- min(at_extra,
- req->rq_export->exp_obd->
- obd_recovery_timeout / 4);
- else
+ (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
+ struct obd_device *exp_obd = req->rq_export->exp_obd;
+
+ timeout = ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec +
+ min_t(timeout_t, at_extra,
+ exp_obd->obd_recovery_timeout / 4);
+ } else {
timeout = at_get(&svcpt->scp_at_estimate);
+ }
lustre_msg_set_timeout(req->rq_repmsg, timeout);
}
req->rq_export->exp_obd->obd_minor);
}
- /* In order to keep interoprability with the client (< 2.3) which
- * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
- * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
- * reply buffer on client will be overflow.
- *
- * XXX Remove this whenver we drop the interoprability with such client.
- */
- req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
- sizeof(struct ptlrpc_body_v2), 1);
-
if (req->rq_type != PTL_RPC_MSG_ERR)
req->rq_type = PTL_RPC_MSG_REPLY;
if (unlikely(rc))
goto out;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = ktime_get_real_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
{
int rc;
- int rc2;
int mpflag = 0;
- lnet_handle_md_t bulk_cookie;
+ struct lnet_handle_md bulk_cookie;
struct ptlrpc_connection *connection;
- lnet_handle_me_t reply_me_h;
- lnet_md_t reply_md;
+ struct lnet_me *reply_me = NULL;
+ struct lnet_md reply_md;
struct obd_import *imp = request->rq_import;
struct obd_device *obd = imp->imp_obd;
ENTRY;
- LNetInvalidateHandle(&bulk_cookie);
+ LNetInvalidateMDHandle(&bulk_cookie);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
RETURN(0);
spin_unlock(&imp->imp_lock);
lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
- DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for "
- "resend on EINPROGRESS");
+ DEBUG_REQ(D_RPCTRACE, request,
+ "Allocating new XID for resend on EINPROGRESS");
}
if (request->rq_bulk != NULL) {
if (list_empty(&request->rq_unreplied_list) ||
request->rq_xid <= imp->imp_known_replied_xid) {
- DEBUG_REQ(D_ERROR, request, "xid: %llu, replied: %llu, "
- "list_empty:%d\n", request->rq_xid,
- imp->imp_known_replied_xid,
+ DEBUG_REQ(D_ERROR, request,
+ "xid=%llu, replied=%llu, list_empty=%d",
+ request->rq_xid, imp->imp_known_replied_xid,
list_empty(&request->rq_unreplied_list));
LBUG();
}
if (request->rq_resend_cb != NULL)
request->rq_resend_cb(request, &request->rq_async_args);
}
- if (request->rq_memalloc)
- mpflag = cfs_memory_pressure_get_and_set();
+ if (request->rq_memalloc)
+ mpflag = memalloc_noreclaim_save();
rc = sptlrpc_cli_wrap_request(request);
- if (rc == -ENOMEM)
- /* set rq_sent so that this request is treated
- * as a delayed send in the upper layers */
- request->rq_sent = cfs_time_current_sec();
if (rc)
GOTO(out, rc);
if (request->rq_bulk != NULL) {
rc = ptlrpc_register_bulk (request);
if (rc != 0)
- GOTO(out, rc);
+ GOTO(cleanup_bulk, rc);
/*
* All the mds in the request will have the same cpt
* encoded in the cookie. So we can just get the first
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
- request->rq_status = rc;
- GOTO(cleanup_bulk, rc);
- }
- } else {
- request->rq_repdata = NULL;
- request->rq_repmsg = NULL;
- }
-
- rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
- connection->c_peer, request->rq_xid, 0,
- LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
- GOTO(cleanup_bulk, rc = -ENOMEM);
- }
- }
+ request->rq_status = rc;
+ GOTO(cleanup_bulk, rc);
+ }
+ } else {
+ request->rq_repdata = NULL;
+ request->rq_repmsg = NULL;
+ }
+
+ if (request->rq_bulk &&
+ OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_REPLY_ATTACH)) {
+ reply_me = ERR_PTR(-ENOMEM);
+ } else {
+ reply_me = LNetMEAttach(request->rq_reply_portal,
+ connection->c_peer,
+ request->rq_xid, 0,
+ LNET_UNLINK, LNET_INS_AFTER);
+ }
+
+ if (IS_ERR(reply_me)) {
+ rc = PTR_ERR(reply_me);
+ CERROR("LNetMEAttach failed: %d\n", rc);
+ LASSERT(rc == -ENOMEM);
+ GOTO(cleanup_bulk, rc = -ENOMEM);
+ }
+ }
spin_lock(&request->rq_lock);
/* We are responsible for unlinking the reply buffer */
reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
LNET_MD_MANAGE_REMOTE |
LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
- reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.eq_handle = ptlrpc_eq_h;
+ reply_md.user_ptr = &request->rq_reply_cbid;
+ reply_md.handler = ptlrpc_handler;
/* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink */
- rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
- &request->rq_reply_md_h);
- if (rc != 0) {
- CERROR("LNetMDAttach failed: %d\n", rc);
- LASSERT (rc == -ENOMEM);
+ rc = LNetMDAttach(reply_me, &reply_md, LNET_RETAIN,
+ &request->rq_reply_md_h);
+ if (rc != 0) {
+ CERROR("LNetMDAttach failed: %d\n", rc);
+ LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
spin_unlock(&request->rq_lock);
- GOTO(cleanup_me, rc = -ENOMEM);
- }
+ GOTO(cleanup_me, rc = -ENOMEM);
+ }
+ percpu_ref_get(&ptlrpc_pending);
- CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu"
- ", portal %u\n",
- request->rq_repbuf_len, request->rq_xid,
- request->rq_reply_portal);
- }
+ CDEBUG(D_NET,
+ "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
+ request->rq_repbuf_len, request->rq_xid,
+ request->rq_reply_portal);
+ }
/* add references on request for request_out_callback */
ptlrpc_request_addref(request);
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_sent_tv);
- request->rq_sent = cfs_time_current_sec();
+ request->rq_sent_ns = ktime_get_real();
+ request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_pinger_sending_on_import(imp);
- DEBUG_REQ(D_INFO, request, "send flg=%x",
+ DEBUG_REQ(D_INFO, request, "send flags=%x",
lustre_msg_get_flags(request->rq_reqmsg));
rc = ptl_send_buf(&request->rq_req_md_h,
request->rq_reqbuf, request->rq_reqdata_len,
GOTO(out, rc);
cleanup_me:
- /* MEUnlink is safe; the PUT didn't even get off the ground, and
- * nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer. */
- rc2 = LNetMEUnlink(reply_me_h);
- LASSERT (rc2 == 0);
- /* UNLINKED callback called synchronously */
- LASSERT(!request->rq_receiving_reply);
+ /* MEUnlink is safe; the PUT didn't even get off the ground, and
+ * nobody apart from the PUT's target has the right nid+XID to
+ * access the reply buffer.
+ */
+ LNetMEUnlink(reply_me);
+ /* UNLINKED callback called synchronously */
+ LASSERT(!request->rq_receiving_reply);
cleanup_bulk:
- /* We do sync unlink here as there was no real transfer here so
- * the chance to have long unlink to sluggish net is smaller here. */
- ptlrpc_unregister_bulk(request, 0);
+ /* We do sync unlink here as there was no real transfer here so
+ * the chance to have long unlink to sluggish net is smaller here. */
+ ptlrpc_unregister_bulk(request, 0);
out:
- if (request->rq_memalloc)
- cfs_memory_pressure_restore(mpflag);
- return rc;
+ if (rc == -ENOMEM) {
+ /* set rq_sent so that this request is treated
+ * as a delayed send in the upper layers */
+ request->rq_sent = ktime_get_real_seconds();
+ }
+
+ if (request->rq_memalloc)
+ memalloc_noreclaim_restore(mpflag);
+
+ return rc;
}
EXPORT_SYMBOL(ptl_send_rpc);
*/
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
- struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
- int rc;
- lnet_md_t md;
- lnet_handle_me_t me_h;
+ struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
+ static struct lnet_process_id match_id = {
+ .nid = LNET_NID_ANY,
+ .pid = LNET_PID_ANY
+ };
+ int rc;
+ struct lnet_md md;
+ struct lnet_me *me;
CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
service->srv_req_portal);
/* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
* which means buffer can only be attached on local CPT, and LND
* threads can find it by grabbing a local lock */
- rc = LNetMEAttach(service->srv_req_portal,
+ me = LNetMEAttach(service->srv_req_portal,
match_id, 0, ~0, LNET_UNLINK,
rqbd->rqbd_svcpt->scp_cpt >= 0 ?
- LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
- if (rc != 0) {
- CERROR("LNetMEAttach failed: %d\n", rc);
- return (-ENOMEM);
- }
-
- LASSERT(rqbd->rqbd_refcount == 0);
- rqbd->rqbd_refcount = 1;
-
- md.start = rqbd->rqbd_buffer;
- md.length = service->srv_buf_size;
- md.max_size = service->srv_max_req_size;
- md.threshold = LNET_MD_THRESH_INF;
- md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
- md.user_ptr = &rqbd->rqbd_cbid;
- md.eq_handle = ptlrpc_eq_h;
+ LNET_INS_LOCAL : LNET_INS_AFTER);
+ if (IS_ERR(me)) {
+ CERROR("LNetMEAttach failed: %ld\n", PTR_ERR(me));
+ return -ENOMEM;
+ }
- rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
- if (rc == 0)
- return (0);
+ LASSERT(rqbd->rqbd_refcount == 0);
+ rqbd->rqbd_refcount = 1;
+
+ md.start = rqbd->rqbd_buffer;
+ md.length = service->srv_buf_size;
+ md.max_size = service->srv_max_req_size;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
+ md.user_ptr = &rqbd->rqbd_cbid;
+ md.handler = ptlrpc_handler;
+
+ rc = LNetMDAttach(me, &md, LNET_UNLINK, &rqbd->rqbd_md_h);
+ if (rc == 0) {
+ percpu_ref_get(&ptlrpc_pending);
+ return 0;
+ }
- CERROR("LNetMDAttach failed: %d; \n", rc);
- LASSERT (rc == -ENOMEM);
- rc = LNetMEUnlink (me_h);
- LASSERT (rc == 0);
- rqbd->rqbd_refcount = 0;
+ CERROR("ptlrpc: LNetMDAttach failed: rc = %d\n", rc);
+ LASSERT(rc == -ENOMEM);
+ LNetMEUnlink(me);
+ LASSERT(rc == 0);
+ rqbd->rqbd_refcount = 0;
- return (-ENOMEM);
+ return -ENOMEM;
}