*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_RPC
+#include <libcfs/linux/linux-mem.h>
#include <obd_support.h>
#include <lustre_net.h>
#include <lustre_lib.h>
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
md.options = PTLRPC_MD_OPTIONS;
md.user_ptr = cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
LNetInvalidateMDHandle(&md.bulk_handle);
if (bulk_cookie) {
ack = LNET_NOACK_REQ;
}
- rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ rc = LNetMDBind(&md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
CERROR ("LNetMDBind failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
RETURN (0);
}
-static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
+#define mdunlink_iterate_helper(mds, count) \
+ __mdunlink_iterate_helper(mds, count, false)
+static void __mdunlink_iterate_helper(struct lnet_handle_md *bd_mds,
+ int count, bool discard)
{
int i;
for (i = 0; i < count; i++)
- LNetMDUnlink(bd_mds[i]);
+ __LNetMDUnlink(bd_mds[i], discard);
}
#ifdef HAVE_SERVER_SUPPORT
RETURN(0);
/* NB no locking required until desc is on the network */
- LASSERT(desc->bd_md_count == 0);
LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
* off high bits to get bulk count for this RPC. LU-1431 */
mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
total_md = desc->bd_req->rq_mbits - mbits + 1;
-
- desc->bd_md_count = total_md;
+ desc->bd_refs = total_md;
desc->bd_failure = 0;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
md.threshold = 2; /* SENT and ACK/REPLY */
for (posted_md = 0; posted_md < total_md; mbits++) {
* page-aligned. Otherwise we'd have to send client bulk
* sizes over and split server buffer accordingly */
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
+ rc = LNetMDBind(&md, LNET_UNLINK, &desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
exp->exp_obd->obd_name, posted_md, rc);
* event this creates will signal completion with failure,
* so we return SUCCESS here! */
spin_lock(&desc->bd_lock);
- desc->bd_md_count -= total_md - posted_md;
+ desc->bd_refs -= total_md - posted_md;
spin_unlock(&desc->bd_lock);
- LASSERT(desc->bd_md_count >= 0);
+ LASSERT(desc->bd_refs >= 0);
mdunlink_iterate_helper(desc->bd_mds, posted_md);
RETURN(0);
* but we must still wait_event_idle_timeout() in this case, to give
* us a chance to run server_bulk_callback()
*/
- mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
+ __mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw, true);
for (;;) {
/* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs */
- int seconds = LONG_UNLINK;
+ int seconds = PTLRPC_REQ_LONG_UNLINK;
while (seconds > 0 &&
wait_event_idle_timeout(desc->bd_waitq,
int ptlrpc_register_bulk(struct ptlrpc_request *req)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
- struct lnet_process_id peer;
+ struct lnet_processid peer;
int rc = 0;
int posted_md;
int total_md;
/* NB no locking required until desc is on the network */
LASSERT(desc->bd_nob > 0);
- LASSERT(desc->bd_md_count == 0);
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(desc->bd_req != NULL);
desc->bd_failure = 0;
- peer = desc->bd_import->imp_connection->c_peer;
+ peer.pid = desc->bd_import->imp_connection->c_peer.pid;
+ lnet_nid4_to_nid(desc->bd_import->imp_connection->c_peer.nid,
+ &peer.nid);
LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
LASSERT(desc->bd_cbid.cbid_arg == desc);
- total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ total_md = desc->bd_md_count;
/* rq_mbits is matchbits of the final bulk */
- mbits = req->rq_mbits - total_md + 1;
+ mbits = req->rq_mbits - desc->bd_md_count + 1;
LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
"first mbits = x%llu, last mbits = x%llu\n",
desc->bd_registered = 1;
desc->bd_last_mbits = mbits;
- desc->bd_md_count = total_md;
+ desc->bd_refs = total_md;
md.user_ptr = &desc->bd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
md.threshold = 1; /* PUT or GET */
- for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
+ for (posted_md = 0; posted_md < desc->bd_md_count;
+ posted_md++, mbits++) {
md.options = PTLRPC_MD_OPTIONS |
(ptlrpc_is_bulk_op_get(desc->bd_type) ?
LNET_MD_OP_GET : LNET_MD_OP_PUT);
ptlrpc_fill_bulk_md(&md, desc, posted_md);
- if (posted_md > 0 && posted_md + 1 == total_md &&
+ if (posted_md > 0 && posted_md + 1 == desc->bd_md_count &&
OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
rc = -ENOMEM;
} else {
- me = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
+ me = LNetMEAttach(desc->bd_portal, &peer, mbits, 0,
LNET_UNLINK, LNET_INS_AFTER);
rc = PTR_ERR_OR_ZERO(me);
}
percpu_ref_get(&ptlrpc_pending);
/* About to let the network at it... */
- rc = LNetMDAttach(me, md, LNET_UNLINK,
+ rc = LNetMDAttach(me, &md, LNET_UNLINK,
&desc->bd_mds[posted_md]);
if (rc != 0) {
CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
desc->bd_import->imp_obd->obd_name, mbits,
posted_md, rc);
- LNetMEUnlink(me);
break;
}
}
if (rc != 0) {
LASSERT(rc == -ENOMEM);
spin_lock(&desc->bd_lock);
- desc->bd_md_count -= total_md - posted_md;
+ desc->bd_refs -= total_md - posted_md;
spin_unlock(&desc->bd_lock);
- LASSERT(desc->bd_md_count >= 0);
+ LASSERT(desc->bd_refs >= 0);
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
req->rq_status = -ENOMEM;
desc->bd_registered = 0;
spin_lock(&desc->bd_lock);
/* Holler if peer manages to touch buffers before he knows the mbits */
- if (desc->bd_md_count != total_md)
+ if (desc->bd_refs != total_md)
CWARN("%s: Peer %s touched %d buffers while I registered\n",
- desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
- total_md - desc->bd_md_count);
+ desc->bd_import->imp_obd->obd_name, libcfs_idstr(&peer),
+ total_md - desc->bd_refs);
spin_unlock(&desc->bd_lock);
- CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
- "mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count,
+ CDEBUG(D_NET,
+ "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
+ desc->bd_refs,
ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
desc->bd_iov_count, desc->bd_nob,
desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
LASSERT(!in_interrupt()); /* might sleep */
+ if (desc)
+ desc->bd_registered = 0;
+
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
- req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
+ req->rq_bulk_deadline = ktime_get_real_seconds() +
+ PTLRPC_REQ_LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
RETURN(1); /* never registered */
* Network access will complete in finite time but the HUGE
* timeout lets us CWARN for visibility of sluggish NALs.
*/
- int seconds = LONG_UNLINK;
+ int seconds = PTLRPC_REQ_LONG_UNLINK;
while (seconds > 0 &&
wait_event_idle_timeout(*wq,
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, ktime_get_real_seconds() -
- req->rq_arrival_time.tv_sec, 1);
+ timeout_t service_timeout;
+ service_timeout = clamp_t(timeout_t, ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec, 1,
+ (AT_OFF ? obd_timeout * 3 / 2 : at_max));
if (!(flags & PTLRPC_REPLY_EARLY) &&
(req->rq_type != PTL_RPC_MSG_ERR) &&
(req->rq_reqmsg != NULL) &&
(MSG_RESENT | MSG_REPLAY |
MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
/* early replies, errors and recovery requests don't count
- * toward our service time estimate */
- int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
+ * toward our service time estimate
+ */
+ timeout_t oldse = at_measured(&svcpt->scp_at_estimate,
+ service_timeout);
if (oldse != 0) {
DEBUG_REQ(D_ADAPTTO, req,
}
}
/* Report actual service time for client latency calc */
- lustre_msg_set_service_time(req->rq_repmsg, service_time);
+ lustre_msg_set_service_timeout(req->rq_repmsg, service_timeout);
/* Report service time estimate for future client reqs, but report 0
* (to be ignored by client) if it's an error reply during recovery.
* b=15815
req->rq_export->exp_obd->obd_recovering)) {
lustre_msg_set_timeout(req->rq_repmsg, 0);
} else {
- time64_t timeout;
+ timeout_t timeout;
if (req->rq_export && req->rq_reqmsg != NULL &&
(flags & PTLRPC_REPLY_EARLY) &&
timeout = ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec +
- min_t(time64_t, at_extra,
+ min_t(timeout_t, at_extra,
exp_obd->obd_recovery_timeout / 4);
} else {
timeout = at_get(&svcpt->scp_at_estimate);
LNET_ACK_REQ : LNET_NOACK_REQ,
&rs->rs_cb_id, req->rq_self, req->rq_source,
ptlrpc_req2svc(req)->srv_rep_portal,
- req->rq_xid, req->rq_reply_off, NULL);
+ req->rq_rep_mbits ? req->rq_rep_mbits : req->rq_xid,
+ req->rq_reply_off, NULL);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
{
int rc;
+ __u32 opc;
int mpflag = 0;
+ bool rep_mbits = false;
struct lnet_handle_md bulk_cookie;
+ struct lnet_processid peer;
struct ptlrpc_connection *connection;
struct lnet_me *reply_me = NULL;
struct lnet_md reply_md;
LNetInvalidateMDHandle(&bulk_cookie);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
- RETURN(0);
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
+ RETURN(0);
- LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
- LASSERT(request->rq_wait_ctx == 0);
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DELAY_RECOV) &&
+ lustre_msg_get_opc(request->rq_reqmsg) == MDS_CONNECT &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) == 0)) {
+ RETURN(0);
+ }
+
+ LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
+ LASSERT(request->rq_wait_ctx == 0);
- /* If this is a re-transmit, we're required to have disengaged
- * cleanly from the previous attempt */
- LASSERT(!request->rq_receiving_reply);
+ /* If this is a re-transmit, we're required to have disengaged
+ * cleanly from the previous attempt */
+ LASSERT(!request->rq_receiving_reply);
LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (imp->imp_state == LUSTRE_IMP_FULL)));
+ (imp->imp_state == LUSTRE_IMP_FULL)));
if (unlikely(obd != NULL && obd->obd_fail)) {
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- obd->obd_name);
+ obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
- request->rq_status = -ENODEV;
- RETURN(-ENODEV);
- }
+ request->rq_status = -ENODEV;
+ RETURN(-ENODEV);
+ }
connection = imp->imp_connection;
"Allocating new XID for resend on EINPROGRESS");
}
- if (request->rq_bulk != NULL) {
- ptlrpc_set_bulk_mbits(request);
+ opc = lustre_msg_get_opc(request->rq_reqmsg);
+ if (opc != OST_CONNECT && opc != MDS_CONNECT &&
+ opc != MGS_CONNECT && OCD_HAS_FLAG(&imp->imp_connect_data, FLAGS2))
+ rep_mbits = imp->imp_connect_data.ocd_connect_flags2 &
+ OBD_CONNECT2_REP_MBITS;
+
+ if ((request->rq_bulk != NULL) || rep_mbits) {
+ ptlrpc_set_mbits(request);
lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
}
LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
(imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
!(imp->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_AT));
+ OBD_CONNECT_AT));
if (request->rq_resend) {
lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
request->rq_resend_cb(request, &request->rq_async_args);
}
if (request->rq_memalloc)
- mpflag = cfs_memory_pressure_get_and_set();
+ mpflag = memalloc_noreclaim_save();
rc = sptlrpc_cli_wrap_request(request);
if (rc)
bulk_cookie = request->rq_bulk->bd_mds[0];
}
- if (!noreply) {
- LASSERT (request->rq_replen != 0);
- if (request->rq_repbuf == NULL) {
- LASSERT(request->rq_repdata == NULL);
- LASSERT(request->rq_repmsg == NULL);
- rc = sptlrpc_cli_alloc_repbuf(request,
- request->rq_replen);
- if (rc) {
- /* this prevents us from looping in
- * ptlrpc_queue_wait */
+ if (!noreply) {
+ LASSERT (request->rq_replen != 0);
+ if (request->rq_repbuf == NULL) {
+ LASSERT(request->rq_repdata == NULL);
+ LASSERT(request->rq_repmsg == NULL);
+ rc = sptlrpc_cli_alloc_repbuf(request,
+ request->rq_replen);
+ if (rc) {
+ /* this prevents us from looping in
+ * ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
request->rq_repmsg = NULL;
}
- reply_me = LNetMEAttach(request->rq_reply_portal,
- connection->c_peer, request->rq_xid, 0,
- LNET_UNLINK, LNET_INS_AFTER);
+ peer.pid = connection->c_peer.pid;
+ lnet_nid4_to_nid(connection->c_peer.nid, &peer.nid);
+ if (request->rq_bulk &&
+ OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_REPLY_ATTACH)) {
+ reply_me = ERR_PTR(-ENOMEM);
+ } else {
+ reply_me = LNetMEAttach(request->rq_reply_portal,
+ &peer,
+ rep_mbits ? request->rq_mbits :
+ request->rq_xid,
+ 0, LNET_UNLINK, LNET_INS_AFTER);
+ }
+
if (IS_ERR(reply_me)) {
rc = PTR_ERR(reply_me);
CERROR("LNetMEAttach failed: %d\n", rc);
request->rq_receiving_reply = !noreply;
/* Clear any flags that may be present from previous sends. */
request->rq_req_unlinked = 0;
- request->rq_replied = 0;
- request->rq_err = 0;
- request->rq_timedout = 0;
- request->rq_net_err = 0;
- request->rq_resend = 0;
- request->rq_restart = 0;
+ request->rq_replied = 0;
+ request->rq_err = 0;
+ request->rq_timedout = 0;
+ request->rq_net_err = 0;
+ request->rq_resend = 0;
+ request->rq_restart = 0;
request->rq_reply_truncated = 0;
spin_unlock(&request->rq_lock);
- if (!noreply) {
- reply_md.start = request->rq_repbuf;
- reply_md.length = request->rq_repbuf_len;
- /* Allow multiple early replies */
- reply_md.threshold = LNET_MD_THRESH_INF;
- /* Manage remote for early replies */
- reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
- LNET_MD_MANAGE_REMOTE |
- LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
+ if (!noreply) {
+ reply_md.start = request->rq_repbuf;
+ reply_md.length = request->rq_repbuf_len;
+ /* Allow multiple early replies */
+ reply_md.threshold = LNET_MD_THRESH_INF;
+ /* Manage remote for early replies */
+ reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
+ LNET_MD_MANAGE_REMOTE |
+ LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
reply_md.user_ptr = &request->rq_reply_cbid;
- reply_md.eq_handle = ptlrpc_eq;
+ reply_md.handler = ptlrpc_handler;
/* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink */
- rc = LNetMDAttach(reply_me, reply_md, LNET_RETAIN,
+ rc = LNetMDAttach(reply_me, &reply_md, LNET_RETAIN,
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
spin_unlock(&request->rq_lock);
- GOTO(cleanup_me, rc = -ENOMEM);
+ GOTO(cleanup_bulk, rc = -ENOMEM);
}
percpu_ref_get(&ptlrpc_pending);
request->rq_reply_portal);
}
- /* add references on request for request_out_callback */
- ptlrpc_request_addref(request);
+ /* add references on request for request_out_callback */
+ ptlrpc_request_addref(request);
if (obd != NULL && obd->obd_svc_stats != NULL)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
- atomic_read(&imp->imp_inflight));
+ atomic_read(&imp->imp_inflight));
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
request->rq_sent_ns = ktime_get_real();
request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
- add the network latency for our local timeout. */
- request->rq_deadline = request->rq_sent + request->rq_timeout +
- ptlrpc_at_get_net_latency(request);
-
- ptlrpc_pinger_sending_on_import(imp);
+ * add the network latency for our local timeout.
+ */
+ request->rq_deadline = request->rq_sent + request->rq_timeout +
+ ptlrpc_at_get_net_latency(request);
DEBUG_REQ(D_INFO, request, "send flags=%x",
lustre_msg_get_flags(request->rq_reqmsg));
GOTO(out, rc);
request->rq_req_unlinked = 1;
- ptlrpc_req_finished(request);
- if (noreply)
- GOTO(out, rc);
-
- cleanup_me:
- /* MEUnlink is safe; the PUT didn't even get off the ground, and
- * nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer.
- */
- LNetMEUnlink(reply_me);
+ ptlrpc_req_finished(request);
+ if (noreply)
+ GOTO(out, rc);
+
+ LNetMDUnlink(request->rq_reply_md_h);
+
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
cleanup_bulk:
/* We do sync unlink here as there was no real transfer here so
* the chance to have long unlink to sluggish net is smaller here. */
- ptlrpc_unregister_bulk(request, 0);
- if (request->rq_bulk != NULL)
- request->rq_bulk->bd_registered = 0;
+ ptlrpc_unregister_bulk(request, 0);
out:
if (rc == -ENOMEM) {
/* set rq_sent so that this request is treated
}
if (request->rq_memalloc)
- cfs_memory_pressure_restore(mpflag);
+ memalloc_noreclaim_restore(mpflag);
return rc;
}
int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
- static struct lnet_process_id match_id = {
- .nid = LNET_NID_ANY,
+ static struct lnet_processid match_id = {
+ .nid = LNET_ANY_NID,
.pid = LNET_PID_ANY
};
int rc;
* which means buffer can only be attached on local CPT, and LND
* threads can find it by grabbing a local lock */
me = LNetMEAttach(service->srv_req_portal,
- match_id, 0, ~0, LNET_UNLINK,
+ &match_id, 0, ~0, LNET_UNLINK,
rqbd->rqbd_svcpt->scp_cpt >= 0 ?
LNET_INS_LOCAL : LNET_INS_AFTER);
if (IS_ERR(me)) {
md.threshold = LNET_MD_THRESH_INF;
md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
md.user_ptr = &rqbd->rqbd_cbid;
- md.eq_handle = ptlrpc_eq;
+ md.handler = ptlrpc_handler;
- rc = LNetMDAttach(me, md, LNET_UNLINK, &rqbd->rqbd_md_h);
+ rc = LNetMDAttach(me, &md, LNET_UNLINK, &rqbd->rqbd_md_h);
if (rc == 0) {
percpu_ref_get(&ptlrpc_pending);
return 0;
CERROR("ptlrpc: LNetMDAttach failed: rc = %d\n", rc);
LASSERT(rc == -ENOMEM);
- LNetMEUnlink(me);
LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;