1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_RPC
39 #include <liblustre.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
43 #include <lustre_lib.h>
45 #include "ptlrpc_internal.h"
47 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
48 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
49 struct ptlrpc_connection *conn, int portal, __u64 xid,
56 LASSERT (portal != 0);
57 LASSERT (conn != NULL);
58 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
61 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
62 md.options = PTLRPC_MD_OPTIONS;
64 md.eq_handle = ptlrpc_eq_h;
66 if (unlikely(ack == LNET_ACK_REQ &&
67 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
68 /* don't ask for the ack to simulate failing client */
72 rc = LNetMDBind (md, LNET_UNLINK, mdh);
73 if (unlikely(rc != 0)) {
74 CERROR ("LNetMDBind failed: %d\n", rc);
75 LASSERT (rc == -ENOMEM);
79 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
80 len, portal, xid, offset);
82 rc = LNetPut (conn->c_self, *mdh, ack,
83 conn->c_peer, portal, xid, offset, 0);
84 if (unlikely(rc != 0)) {
86 /* We're going to get an UNLINK event when I unlink below,
87 * which will complete just like any other failed send, so
88 * I fall through and return success here! */
89 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
90 libcfs_id2str(conn->c_peer), portal, xid, rc);
91 rc2 = LNetMDUnlink(*mdh);
92 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
98 int ptlrpc_start_bulk_transfer (struct ptlrpc_bulk_desc *desc)
100 struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
107 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
110 /* NB no locking required until desc is on the network */
111 LASSERT (!desc->bd_network_rw);
112 LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
113 desc->bd_type == BULK_GET_SINK);
114 desc->bd_success = 0;
116 md.user_ptr = &desc->bd_cbid;
117 md.eq_handle = ptlrpc_eq_h;
118 md.threshold = 2; /* SENT and ACK/REPLY */
119 md.options = PTLRPC_MD_OPTIONS;
120 ptlrpc_fill_bulk_md(&md, desc);
122 LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
123 LASSERT (desc->bd_cbid.cbid_arg == desc);
125 /* NB total length may be 0 for a read past EOF, so we send a 0
126 * length bulk, since the client expects a bulk event. */
128 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
130 CERROR("LNetMDBind failed: %d\n", rc);
131 LASSERT (rc == -ENOMEM);
135 /* Client's bulk and reply matchbits are the same */
136 xid = desc->bd_req->rq_xid;
137 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
138 "id %s xid "LPX64"\n", desc->bd_iov_count,
139 desc->bd_nob, desc->bd_portal,
140 libcfs_id2str(conn->c_peer), xid);
142 /* Network is about to get at the memory */
143 desc->bd_network_rw = 1;
145 if (desc->bd_type == BULK_PUT_SOURCE)
146 rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
147 conn->c_peer, desc->bd_portal, xid, 0, 0);
149 rc = LNetGet (conn->c_self, desc->bd_md_h,
150 conn->c_peer, desc->bd_portal, xid, 0);
153 /* Can't send, so we unlink the MD bound above. The UNLINK
154 * event this creates will signal completion with failure,
155 * so we return SUCCESS here! */
156 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
157 libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
158 rc2 = LNetMDUnlink(desc->bd_md_h);
165 void ptlrpc_abort_bulk (struct ptlrpc_bulk_desc *desc)
167 /* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
168 * serialises with completion callback) */
169 struct l_wait_info lwi;
172 LASSERT (!in_interrupt ()); /* might sleep */
174 if (!ptlrpc_bulk_active(desc)) /* completed or */
175 return; /* never started */
177 /* Do not send any meaningful data over the wire for evicted clients */
178 if (desc->bd_export && desc->bd_export->exp_failed)
179 ptl_rpc_wipe_bulk_pages(desc);
181 /* The unlink ensures the callback happens ASAP and is the last
182 * one. If it fails, it must be because completion just happened,
183 * but we must still l_wait_event() in this case, to give liblustre
184 * a chance to run server_bulk_callback()*/
186 LNetMDUnlink (desc->bd_md_h);
189 /* Network access will complete in finite time but the HUGE
190 * timeout lets us CWARN for visibility of sluggish NALs */
191 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
192 rc = l_wait_event(desc->bd_waitq,
193 !ptlrpc_bulk_active(desc), &lwi);
197 LASSERT(rc == -ETIMEDOUT);
198 CWARN("Unexpectedly long timeout: desc %p\n", desc);
202 int ptlrpc_register_bulk (struct ptlrpc_request *req)
204 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
205 lnet_process_id_t peer;
208 lnet_handle_me_t me_h;
212 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
215 /* NB no locking required until desc is on the network */
216 LASSERT (desc->bd_nob > 0);
217 LASSERT (!desc->bd_network_rw);
218 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
219 LASSERT (desc->bd_req != NULL);
220 LASSERT (desc->bd_type == BULK_PUT_SINK ||
221 desc->bd_type == BULK_GET_SOURCE);
223 desc->bd_success = 0;
225 peer = desc->bd_import->imp_connection->c_peer;
227 md.user_ptr = &desc->bd_cbid;
228 md.eq_handle = ptlrpc_eq_h;
229 md.threshold = 1; /* PUT or GET */
230 md.options = PTLRPC_MD_OPTIONS |
231 ((desc->bd_type == BULK_GET_SOURCE) ?
232 LNET_MD_OP_GET : LNET_MD_OP_PUT);
233 ptlrpc_fill_bulk_md(&md, desc);
235 LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
236 LASSERT (desc->bd_cbid.cbid_arg == desc);
238 /* XXX Registering the same xid on retried bulk makes my head
239 * explode trying to understand how the original request's bulk
240 * might interfere with the retried request -eeb */
241 LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
242 "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
243 desc->bd_registered, req->rq_xid, desc->bd_last_xid);
244 desc->bd_registered = 1;
245 desc->bd_last_xid = req->rq_xid;
247 rc = LNetMEAttach(desc->bd_portal, peer,
248 req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
250 CERROR("LNetMEAttach failed: %d\n", rc);
251 LASSERT (rc == -ENOMEM);
255 /* About to let the network at it... */
256 desc->bd_network_rw = 1;
257 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
259 CERROR("LNetMDAttach failed: %d\n", rc);
260 LASSERT (rc == -ENOMEM);
261 desc->bd_network_rw = 0;
262 rc2 = LNetMEUnlink (me_h);
267 CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPX64", "
269 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
270 desc->bd_iov_count, desc->bd_nob,
271 req->rq_xid, desc->bd_portal);
275 void ptlrpc_unregister_bulk (struct ptlrpc_request *req)
277 /* Disconnect a bulk desc from the network. Idempotent. Not
278 * thread-safe (i.e. only interlocks with completion callback). */
279 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
281 struct l_wait_info lwi;
284 LASSERT (!in_interrupt ()); /* might sleep */
286 if (!ptlrpc_bulk_active(desc)) /* completed or */
287 return; /* never registered */
289 LASSERT (desc->bd_req == req); /* bd_req NULL until registered */
291 /* the unlink ensures the callback happens ASAP and is the last
292 * one. If it fails, it must be because completion just happened,
293 * but we must still l_wait_event() in this case to give liblustre
294 * a chance to run client_bulk_callback() */
296 LNetMDUnlink (desc->bd_md_h);
298 if (req->rq_set != NULL)
299 wq = &req->rq_set->set_waitq;
301 wq = &req->rq_reply_waitq;
304 /* Network access will complete in finite time but the HUGE
305 * timeout lets us CWARN for visibility of sluggish NALs */
306 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
307 rc = l_wait_event(*wq, !ptlrpc_bulk_active(desc), &lwi);
311 LASSERT (rc == -ETIMEDOUT);
312 DEBUG_REQ(D_WARNING,req,"Unexpectedly long timeout: desc %p",
317 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
319 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
320 int service_time = max_t(int, cfs_time_current_sec() -
321 req->rq_arrival_time.tv_sec, 1);
323 if (!(flags & PTLRPC_REPLY_EARLY) &&
324 (req->rq_type != PTL_RPC_MSG_ERR) &&
325 !(lustre_msg_get_flags(req->rq_reqmsg) &
326 (MSG_RESENT | MSG_REPLAY | MSG_LAST_REPLAY))) {
327 /* early replies, errors and recovery requests don't count
328 * toward our service time estimate */
329 int oldse = at_add(&svc->srv_at_estimate, service_time);
331 DEBUG_REQ(D_ADAPTTO, req,
332 "svc %s changed estimate from %d to %d",
333 svc->srv_name, oldse,
334 at_get(&svc->srv_at_estimate));
336 /* Report actual service time for client latency calc */
337 lustre_msg_set_service_time(req->rq_repmsg, service_time);
338 /* Report service time estimate for future client reqs, but report 0
339 * (to be ignored by client) if it's a error reply during recovery.
341 if (req->rq_type == PTL_RPC_MSG_ERR &&
342 (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
343 lustre_msg_set_timeout(req->rq_repmsg, 0);
345 lustre_msg_set_timeout(req->rq_repmsg,
346 at_get(&svc->srv_at_estimate));
348 if (req->rq_reqmsg &&
349 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
350 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
351 "req_flags=%#x magic=%d:%x/%x len=%d\n",
352 flags, lustre_msg_get_flags(req->rq_reqmsg),
353 lustre_msg_is_v1(req->rq_reqmsg),
354 lustre_msg_get_magic(req->rq_reqmsg),
355 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
359 int ptlrpc_send_reply (struct ptlrpc_request *req, int flags)
361 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
362 struct ptlrpc_reply_state *rs = req->rq_reply_state;
363 struct ptlrpc_connection *conn;
366 /* We must already have a reply buffer (only ptlrpc_error() may be
367 * called without one). The reply generated by sptlrpc layer (e.g.
368 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
369 * have a request buffer which is either the actual (swabbed) incoming
370 * request, or a saved copy if this is a req saved in
371 * target_queue_final_reply().
373 LASSERT (req->rq_no_reply == 0);
374 LASSERT (req->rq_reqbuf != NULL);
375 LASSERT (rs != NULL);
376 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
377 LASSERT (req->rq_repmsg != NULL);
378 LASSERT (req->rq_repmsg == rs->rs_msg);
379 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
380 LASSERT (rs->rs_cb_id.cbid_arg == rs);
382 /* There may be no rq_export during failover */
384 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
385 req->rq_export->exp_obd->obd_fail)) {
386 /* Failed obd's only send ENODEV */
387 req->rq_type = PTL_RPC_MSG_ERR;
388 req->rq_status = -ENODEV;
389 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
390 req->rq_export->exp_obd->obd_minor);
393 if (req->rq_type != PTL_RPC_MSG_ERR)
394 req->rq_type = PTL_RPC_MSG_REPLY;
396 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
397 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
398 lustre_msg_set_opc(req->rq_repmsg,
399 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
401 target_pack_pool_reply(req);
403 ptlrpc_at_set_reply(req, flags);
405 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
406 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
408 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
410 if (unlikely(conn == NULL)) {
411 CERROR("not replying on NULL connection\n"); /* bug 9635 */
414 atomic_inc (&svc->srv_outstanding_replies);
415 ptlrpc_rs_addref(rs); /* +1 ref for the network */
417 rc = sptlrpc_svc_wrap_reply(req);
421 req->rq_sent = cfs_time_current_sec();
423 rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
424 (rs->rs_difficult && !rs->rs_no_ack) ?
425 LNET_ACK_REQ : LNET_NOACK_REQ,
426 &rs->rs_cb_id, conn, svc->srv_rep_portal,
427 req->rq_xid, req->rq_reply_off);
429 if (unlikely(rc != 0)) {
430 atomic_dec (&svc->srv_outstanding_replies);
431 ptlrpc_req_drop_rs(req);
433 ptlrpc_connection_put(conn);
437 int ptlrpc_reply (struct ptlrpc_request *req)
439 if (req->rq_no_reply)
442 return (ptlrpc_send_reply(req, 0));
445 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
450 if (req->rq_no_reply)
453 if (!req->rq_repmsg) {
454 rc = lustre_pack_reply(req, 1, NULL, NULL);
459 req->rq_type = PTL_RPC_MSG_ERR;
461 rc = ptlrpc_send_reply(req, may_be_difficult);
465 int ptlrpc_error(struct ptlrpc_request *req)
467 return ptlrpc_send_error(req, 0);
470 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
474 struct ptlrpc_connection *connection;
475 lnet_handle_me_t reply_me_h;
477 struct obd_device *obd = request->rq_import->imp_obd;
480 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
483 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
484 LASSERT(request->rq_wait_ctx == 0);
486 /* If this is a re-transmit, we're required to have disengaged
487 * cleanly from the previous attempt */
488 LASSERT (!request->rq_receiving_reply);
490 if (request->rq_import->imp_obd &&
491 request->rq_import->imp_obd->obd_fail) {
492 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
493 request->rq_import->imp_obd->obd_name);
494 /* this prevents us from waiting in ptlrpc_queue_wait */
496 request->rq_status = -ENODEV;
500 connection = request->rq_import->imp_connection;
502 lustre_msg_set_handle(request->rq_reqmsg,
503 &request->rq_import->imp_remote_handle);
504 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
505 lustre_msg_set_conn_cnt(request->rq_reqmsg,
506 request->rq_import->imp_conn_cnt);
507 lustre_msghdr_set_flags(request->rq_reqmsg,
508 request->rq_import->imp_msghdr_flags);
510 rc = sptlrpc_cli_wrap_request(request);
514 /* bulk register should be done after wrap_request() */
515 if (request->rq_bulk != NULL) {
516 rc = ptlrpc_register_bulk (request);
522 LASSERT (request->rq_replen != 0);
523 if (request->rq_repbuf == NULL) {
524 LASSERT(request->rq_repdata == NULL);
525 LASSERT(request->rq_repmsg == NULL);
526 rc = sptlrpc_cli_alloc_repbuf(request,
529 /* this prevents us from looping in
530 * ptlrpc_queue_wait */
532 request->rq_status = rc;
533 GOTO(cleanup_bulk, rc);
536 request->rq_repdata = NULL;
537 request->rq_repmsg = NULL;
540 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
541 connection->c_peer, request->rq_xid, 0,
542 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
544 CERROR("LNetMEAttach failed: %d\n", rc);
545 LASSERT (rc == -ENOMEM);
546 GOTO(cleanup_bulk, rc = -ENOMEM);
550 spin_lock(&request->rq_lock);
551 /* If the MD attach succeeds, there _will_ be a reply_in callback */
552 request->rq_receiving_reply = !noreply;
553 /* We are responsible for unlinking the reply buffer */
554 request->rq_must_unlink = !noreply;
555 /* Clear any flags that may be present from previous sends. */
556 request->rq_replied = 0;
558 request->rq_timedout = 0;
559 request->rq_net_err = 0;
560 request->rq_resend = 0;
561 request->rq_restart = 0;
562 spin_unlock(&request->rq_lock);
565 reply_md.start = request->rq_repbuf;
566 reply_md.length = request->rq_repbuf_len;
567 /* Allow multiple early replies */
568 reply_md.threshold = LNET_MD_THRESH_INF;
569 /* Manage remote for early replies */
570 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
571 LNET_MD_MANAGE_REMOTE;
572 reply_md.user_ptr = &request->rq_reply_cbid;
573 reply_md.eq_handle = ptlrpc_eq_h;
575 /* We must see the unlink callback to unset rq_must_unlink,
576 so we can't auto-unlink */
577 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
578 &request->rq_reply_md_h);
580 CERROR("LNetMDAttach failed: %d\n", rc);
581 LASSERT (rc == -ENOMEM);
582 spin_lock(&request->rq_lock);
583 /* ...but the MD attach didn't succeed... */
584 request->rq_receiving_reply = 0;
585 spin_unlock(&request->rq_lock);
586 GOTO(cleanup_me, rc = -ENOMEM);
589 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
591 request->rq_repbuf_len, request->rq_xid,
592 request->rq_reply_portal);
595 /* add references on request for request_out_callback */
596 ptlrpc_request_addref(request);
597 if (obd->obd_svc_stats != NULL)
598 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
599 request->rq_import->imp_inflight.counter);
601 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
603 do_gettimeofday(&request->rq_arrival_time);
604 request->rq_sent = cfs_time_current_sec();
605 /* We give the server rq_timeout secs to process the req, and
606 add the network latency for our local timeout. */
607 request->rq_deadline = request->rq_sent + request->rq_timeout +
608 ptlrpc_at_get_net_latency(request);
610 ptlrpc_pinger_sending_on_import(request->rq_import);
612 DEBUG_REQ(D_INFO, request, "send flg=%x",
613 lustre_msg_get_flags(request->rq_reqmsg));
614 rc = ptl_send_buf(&request->rq_req_md_h,
615 request->rq_reqbuf, request->rq_reqdata_len,
616 LNET_NOACK_REQ, &request->rq_req_cbid,
618 request->rq_request_portal,
621 ptlrpc_lprocfs_rpc_sent(request);
625 ptlrpc_req_finished(request);
630 /* MEUnlink is safe; the PUT didn't even get off the ground, and
631 * nobody apart from the PUT's target has the right nid+XID to
632 * access the reply buffer. */
633 rc2 = LNetMEUnlink(reply_me_h);
635 /* UNLINKED callback called synchronously */
636 LASSERT (!request->rq_receiving_reply);
639 if (request->rq_bulk != NULL)
640 ptlrpc_unregister_bulk(request);
645 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
647 struct ptlrpc_service *service = rqbd->rqbd_service;
648 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
651 lnet_handle_me_t me_h;
653 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
654 service->srv_req_portal);
656 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
659 rc = LNetMEAttach(service->srv_req_portal,
660 match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
662 CERROR("LNetMEAttach failed: %d\n", rc);
666 LASSERT(rqbd->rqbd_refcount == 0);
667 rqbd->rqbd_refcount = 1;
669 md.start = rqbd->rqbd_buffer;
670 md.length = service->srv_buf_size;
671 md.max_size = service->srv_max_req_size;
672 md.threshold = LNET_MD_THRESH_INF;
673 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
674 md.user_ptr = &rqbd->rqbd_cbid;
675 md.eq_handle = ptlrpc_eq_h;
677 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
681 CERROR("LNetMDAttach failed: %d; \n", rc);
682 LASSERT (rc == -ENOMEM);
683 rc = LNetMEUnlink (me_h);
685 rqbd->rqbd_refcount = 0;