4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_RPC
37 #include <liblustre.h>
39 #include <obd_support.h>
40 #include <lustre_net.h>
41 #include <lustre_lib.h>
43 #include <obd_class.h>
44 #include "ptlrpc_internal.h"
47 * Helper function. Sends \a len bytes from \a base at offset \a offset
48 * over \a conn connection to portal \a portal.
49 * Returns 0 on success or error code.
51 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
52 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
53 struct ptlrpc_connection *conn, int portal, __u64 xid,
60 LASSERT (portal != 0);
61 LASSERT (conn != NULL);
62 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
65 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
66 md.options = PTLRPC_MD_OPTIONS;
68 md.eq_handle = ptlrpc_eq_h;
70 if (unlikely(ack == LNET_ACK_REQ &&
71 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
72 /* don't ask for the ack to simulate failing client */
76 rc = LNetMDBind (md, LNET_UNLINK, mdh);
77 if (unlikely(rc != 0)) {
78 CERROR ("LNetMDBind failed: %d\n", rc);
79 LASSERT (rc == -ENOMEM);
83 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
84 len, portal, xid, offset);
86 rc = LNetPut (conn->c_self, *mdh, ack,
87 conn->c_peer, portal, xid, offset, 0);
88 if (unlikely(rc != 0)) {
90 /* We're going to get an UNLINK event when I unlink below,
91 * which will complete just like any other failed send, so
92 * I fall through and return success here! */
93 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
94 libcfs_id2str(conn->c_peer), portal, xid, rc);
95 rc2 = LNetMDUnlink(*mdh);
96 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
102 #ifdef HAVE_SERVER_SUPPORT
104 * Prepare bulk descriptor for specified incoming request \a req that
105 * can fit \a npages * pages. \a type is bulk type. \a portal is where
106 * the bulk to be sent. Used on server-side after request was already
108 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
111 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
112 int npages, int type, int portal)
114 struct obd_export *exp = req->rq_export;
115 struct ptlrpc_bulk_desc *desc;
118 LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
120 desc = new_bulk(npages, type, portal);
124 desc->bd_export = class_export_get(exp);
127 desc->bd_cbid.cbid_fn = server_bulk_callback;
128 desc->bd_cbid.cbid_arg = desc;
130 /* NB we don't assign rq_bulk here; server-side requests are
131 * re-used, and the handler frees the bulk desc explicitly. */
137 * Starts bulk transfer for descriptor \a desc
138 * Returns 0 on success or error code.
140 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
142 struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
149 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
152 /* NB no locking required until desc is on the network */
153 LASSERT (!desc->bd_network_rw);
154 LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
155 desc->bd_type == BULK_GET_SINK);
156 desc->bd_success = 0;
158 md.user_ptr = &desc->bd_cbid;
159 md.eq_handle = ptlrpc_eq_h;
160 md.threshold = 2; /* SENT and ACK/REPLY */
161 md.options = PTLRPC_MD_OPTIONS;
162 ptlrpc_fill_bulk_md(&md, desc);
164 LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
165 LASSERT (desc->bd_cbid.cbid_arg == desc);
167 /* NB total length may be 0 for a read past EOF, so we send a 0
168 * length bulk, since the client expects a bulk event. */
170 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
172 CERROR("LNetMDBind failed: %d\n", rc);
173 LASSERT (rc == -ENOMEM);
177 /* Client's bulk and reply matchbits are the same */
178 xid = desc->bd_req->rq_xid;
179 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
180 "id %s xid "LPX64"\n", desc->bd_iov_count,
181 desc->bd_nob, desc->bd_portal,
182 libcfs_id2str(conn->c_peer), xid);
184 /* Network is about to get at the memory */
185 desc->bd_network_rw = 1;
187 if (desc->bd_type == BULK_PUT_SOURCE)
188 rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
189 conn->c_peer, desc->bd_portal, xid, 0, 0);
191 rc = LNetGet (conn->c_self, desc->bd_md_h,
192 conn->c_peer, desc->bd_portal, xid, 0);
195 /* Can't send, so we unlink the MD bound above. The UNLINK
196 * event this creates will signal completion with failure,
197 * so we return SUCCESS here! */
198 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
199 libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
200 rc2 = LNetMDUnlink(desc->bd_md_h);
208 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
209 * serialises with completion callback)
211 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
213 struct l_wait_info lwi;
216 LASSERT(!cfs_in_interrupt()); /* might sleep */
218 if (!ptlrpc_server_bulk_active(desc)) /* completed or */
219 return; /* never started */
221 /* We used to poison the pages with 0xab here because we did not want to
222 * send any meaningful data over the wire for evicted clients (bug 9297)
223 * However, this is no longer safe now that we use the page cache on the
226 /* The unlink ensures the callback happens ASAP and is the last
227 * one. If it fails, it must be because completion just happened,
228 * but we must still l_wait_event() in this case, to give liblustre
229 * a chance to run server_bulk_callback()*/
231 LNetMDUnlink(desc->bd_md_h);
234 /* Network access will complete in finite time but the HUGE
235 * timeout lets us CWARN for visibility of sluggish NALs */
236 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
237 cfs_time_seconds(1), NULL, NULL);
238 rc = l_wait_event(desc->bd_waitq,
239 !ptlrpc_server_bulk_active(desc), &lwi);
243 LASSERT(rc == -ETIMEDOUT);
244 CWARN("Unexpectedly long timeout: desc %p\n", desc);
247 #endif /* HAVE_SERVER_SUPPORT */
250 * Register bulk for later transfer
251 * Returns 0 on success or error code.
253 int ptlrpc_register_bulk(struct ptlrpc_request *req)
255 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
256 lnet_process_id_t peer;
259 lnet_handle_me_t me_h;
263 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
266 /* NB no locking required until desc is on the network */
267 LASSERT (desc->bd_nob > 0);
268 LASSERT (!desc->bd_network_rw);
269 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
270 LASSERT (desc->bd_req != NULL);
271 LASSERT (desc->bd_type == BULK_PUT_SINK ||
272 desc->bd_type == BULK_GET_SOURCE);
274 desc->bd_success = 0;
276 peer = desc->bd_import->imp_connection->c_peer;
278 md.user_ptr = &desc->bd_cbid;
279 md.eq_handle = ptlrpc_eq_h;
280 md.threshold = 1; /* PUT or GET */
281 md.options = PTLRPC_MD_OPTIONS |
282 ((desc->bd_type == BULK_GET_SOURCE) ?
283 LNET_MD_OP_GET : LNET_MD_OP_PUT);
284 ptlrpc_fill_bulk_md(&md, desc);
286 LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
287 LASSERT (desc->bd_cbid.cbid_arg == desc);
289 /* XXX Registering the same xid on retried bulk makes my head
290 * explode trying to understand how the original request's bulk
291 * might interfere with the retried request -eeb
292 * On the other hand replaying with the same xid is fine, since
293 * we are guaranteed old request have completed. -green */
294 LASSERTF(!(desc->bd_registered &&
295 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
296 req->rq_xid != desc->bd_last_xid,
297 "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
298 desc->bd_registered, req->rq_xid, desc->bd_last_xid);
299 desc->bd_registered = 1;
300 desc->bd_last_xid = req->rq_xid;
302 rc = LNetMEAttach(desc->bd_portal, peer,
303 req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
305 CERROR("LNetMEAttach failed: %d\n", rc);
306 LASSERT (rc == -ENOMEM);
310 /* About to let the network at it... */
311 desc->bd_network_rw = 1;
312 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
314 CERROR("LNetMDAttach failed: %d\n", rc);
315 LASSERT (rc == -ENOMEM);
316 desc->bd_network_rw = 0;
317 rc2 = LNetMEUnlink (me_h);
322 CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPU64", "
324 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
325 desc->bd_iov_count, desc->bd_nob,
326 req->rq_xid, desc->bd_portal);
331 * Disconnect a bulk desc from the network. Idempotent. Not
332 * thread-safe (i.e. only interlocks with completion callback).
333 * Returns 1 on success or 0 if network unregistration failed for whatever
336 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
338 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
340 struct l_wait_info lwi;
344 LASSERT(!cfs_in_interrupt()); /* might sleep */
346 /* Let's setup deadline for reply unlink. */
347 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
348 async && req->rq_bulk_deadline == 0)
349 req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
351 if (!ptlrpc_client_bulk_active(req)) /* completed or */
352 RETURN(1); /* never registered */
354 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
356 /* the unlink ensures the callback happens ASAP and is the last
357 * one. If it fails, it must be because completion just happened,
358 * but we must still l_wait_event() in this case to give liblustre
359 * a chance to run client_bulk_callback() */
361 LNetMDUnlink(desc->bd_md_h);
363 if (!ptlrpc_client_bulk_active(req)) /* completed or */
364 RETURN(1); /* never registered */
366 /* Move to "Unregistering" phase as bulk was not unlinked yet. */
367 ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
369 /* Do not wait for unlink to finish. */
373 if (req->rq_set != NULL)
374 wq = &req->rq_set->set_waitq;
376 wq = &req->rq_reply_waitq;
379 /* Network access will complete in finite time but the HUGE
380 * timeout lets us CWARN for visibility of sluggish NALs */
381 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
382 cfs_time_seconds(1), NULL, NULL);
383 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
385 ptlrpc_rqphase_move(req, req->rq_next_phase);
389 LASSERT(rc == -ETIMEDOUT);
390 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
396 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
398 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
399 int service_time = max_t(int, cfs_time_current_sec() -
400 req->rq_arrival_time.tv_sec, 1);
402 if (!(flags & PTLRPC_REPLY_EARLY) &&
403 (req->rq_type != PTL_RPC_MSG_ERR) &&
404 (req->rq_reqmsg != NULL) &&
405 !(lustre_msg_get_flags(req->rq_reqmsg) &
406 (MSG_RESENT | MSG_REPLAY |
407 MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
408 /* early replies, errors and recovery requests don't count
409 * toward our service time estimate */
410 int oldse = at_measured(&svc->srv_at_estimate, service_time);
412 DEBUG_REQ(D_ADAPTTO, req,
413 "svc %s changed estimate from %d to %d",
414 svc->srv_name, oldse,
415 at_get(&svc->srv_at_estimate));
417 /* Report actual service time for client latency calc */
418 lustre_msg_set_service_time(req->rq_repmsg, service_time);
419 /* Report service time estimate for future client reqs, but report 0
420 * (to be ignored by client) if it's a error reply during recovery.
422 if (req->rq_type == PTL_RPC_MSG_ERR &&
423 (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
424 lustre_msg_set_timeout(req->rq_repmsg, 0);
426 lustre_msg_set_timeout(req->rq_repmsg,
427 at_get(&svc->srv_at_estimate));
429 if (req->rq_reqmsg &&
430 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
431 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
432 "req_flags=%#x magic=%d:%x/%x len=%d\n",
433 flags, lustre_msg_get_flags(req->rq_reqmsg),
434 lustre_msg_is_v1(req->rq_reqmsg),
435 lustre_msg_get_magic(req->rq_reqmsg),
436 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
441 * Send request reply from request \a req reply buffer.
442 * \a flags defines reply types
443 * Returns 0 on sucess or error code
445 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
447 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
448 struct ptlrpc_reply_state *rs = req->rq_reply_state;
449 struct ptlrpc_connection *conn;
452 /* We must already have a reply buffer (only ptlrpc_error() may be
453 * called without one). The reply generated by sptlrpc layer (e.g.
454 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
455 * have a request buffer which is either the actual (swabbed) incoming
456 * request, or a saved copy if this is a req saved in
457 * target_queue_final_reply().
459 LASSERT (req->rq_no_reply == 0);
460 LASSERT (req->rq_reqbuf != NULL);
461 LASSERT (rs != NULL);
462 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
463 LASSERT (req->rq_repmsg != NULL);
464 LASSERT (req->rq_repmsg == rs->rs_msg);
465 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
466 LASSERT (rs->rs_cb_id.cbid_arg == rs);
468 /* There may be no rq_export during failover */
470 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
471 req->rq_export->exp_obd->obd_fail)) {
472 /* Failed obd's only send ENODEV */
473 req->rq_type = PTL_RPC_MSG_ERR;
474 req->rq_status = -ENODEV;
475 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
476 req->rq_export->exp_obd->obd_minor);
479 if (req->rq_type != PTL_RPC_MSG_ERR)
480 req->rq_type = PTL_RPC_MSG_REPLY;
482 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
483 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
484 lustre_msg_set_opc(req->rq_repmsg,
485 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
487 target_pack_pool_reply(req);
489 ptlrpc_at_set_reply(req, flags);
491 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
492 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
494 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
496 if (unlikely(conn == NULL)) {
497 CERROR("not replying on NULL connection\n"); /* bug 9635 */
500 ptlrpc_rs_addref(rs); /* +1 ref for the network */
502 rc = sptlrpc_svc_wrap_reply(req);
506 req->rq_sent = cfs_time_current_sec();
508 rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
509 (rs->rs_difficult && !rs->rs_no_ack) ?
510 LNET_ACK_REQ : LNET_NOACK_REQ,
511 &rs->rs_cb_id, conn, svc->srv_rep_portal,
512 req->rq_xid, req->rq_reply_off);
514 if (unlikely(rc != 0))
515 ptlrpc_req_drop_rs(req);
516 ptlrpc_connection_put(conn);
520 int ptlrpc_reply (struct ptlrpc_request *req)
522 if (req->rq_no_reply)
525 return (ptlrpc_send_reply(req, 0));
529 * For request \a req send an error reply back. Create empty
530 * reply buffers if necessary.
532 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
537 if (req->rq_no_reply)
540 if (!req->rq_repmsg) {
541 rc = lustre_pack_reply(req, 1, NULL, NULL);
546 req->rq_type = PTL_RPC_MSG_ERR;
548 rc = ptlrpc_send_reply(req, may_be_difficult);
552 int ptlrpc_error(struct ptlrpc_request *req)
554 return ptlrpc_send_error(req, 0);
558 * Send request \a request.
559 * if \a noreply is set, don't expect any reply back and don't set up
561 * Returns 0 on success or error code.
563 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
568 struct ptlrpc_connection *connection;
569 lnet_handle_me_t reply_me_h;
571 struct obd_device *obd = request->rq_import->imp_obd;
574 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
577 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
578 LASSERT(request->rq_wait_ctx == 0);
580 /* If this is a re-transmit, we're required to have disengaged
581 * cleanly from the previous attempt */
582 LASSERT(!request->rq_receiving_reply);
584 if (request->rq_import->imp_obd &&
585 request->rq_import->imp_obd->obd_fail) {
586 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
587 request->rq_import->imp_obd->obd_name);
588 /* this prevents us from waiting in ptlrpc_queue_wait */
590 request->rq_status = -ENODEV;
594 connection = request->rq_import->imp_connection;
596 lustre_msg_set_handle(request->rq_reqmsg,
597 &request->rq_import->imp_remote_handle);
598 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
599 lustre_msg_set_conn_cnt(request->rq_reqmsg,
600 request->rq_import->imp_conn_cnt);
601 lustre_msghdr_set_flags(request->rq_reqmsg,
602 request->rq_import->imp_msghdr_flags);
604 if (request->rq_resend)
605 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
607 if (request->rq_memalloc)
608 mpflag = cfs_memory_pressure_get_and_set();
610 rc = sptlrpc_cli_wrap_request(request);
614 /* bulk register should be done after wrap_request() */
615 if (request->rq_bulk != NULL) {
616 rc = ptlrpc_register_bulk (request);
622 LASSERT (request->rq_replen != 0);
623 if (request->rq_repbuf == NULL) {
624 LASSERT(request->rq_repdata == NULL);
625 LASSERT(request->rq_repmsg == NULL);
626 rc = sptlrpc_cli_alloc_repbuf(request,
629 /* this prevents us from looping in
630 * ptlrpc_queue_wait */
632 request->rq_status = rc;
633 GOTO(cleanup_bulk, rc);
636 request->rq_repdata = NULL;
637 request->rq_repmsg = NULL;
640 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
641 connection->c_peer, request->rq_xid, 0,
642 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
644 CERROR("LNetMEAttach failed: %d\n", rc);
645 LASSERT (rc == -ENOMEM);
646 GOTO(cleanup_bulk, rc = -ENOMEM);
650 cfs_spin_lock(&request->rq_lock);
651 /* If the MD attach succeeds, there _will_ be a reply_in callback */
652 request->rq_receiving_reply = !noreply;
653 /* We are responsible for unlinking the reply buffer */
654 request->rq_must_unlink = !noreply;
655 /* Clear any flags that may be present from previous sends. */
656 request->rq_replied = 0;
658 request->rq_timedout = 0;
659 request->rq_net_err = 0;
660 request->rq_resend = 0;
661 request->rq_restart = 0;
662 request->rq_reply_truncate = 0;
663 cfs_spin_unlock(&request->rq_lock);
666 reply_md.start = request->rq_repbuf;
667 reply_md.length = request->rq_repbuf_len;
668 /* Allow multiple early replies */
669 reply_md.threshold = LNET_MD_THRESH_INF;
670 /* Manage remote for early replies */
671 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
672 LNET_MD_MANAGE_REMOTE |
673 LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
674 reply_md.user_ptr = &request->rq_reply_cbid;
675 reply_md.eq_handle = ptlrpc_eq_h;
677 /* We must see the unlink callback to unset rq_must_unlink,
678 so we can't auto-unlink */
679 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
680 &request->rq_reply_md_h);
682 CERROR("LNetMDAttach failed: %d\n", rc);
683 LASSERT (rc == -ENOMEM);
684 cfs_spin_lock(&request->rq_lock);
685 /* ...but the MD attach didn't succeed... */
686 request->rq_receiving_reply = 0;
687 cfs_spin_unlock(&request->rq_lock);
688 GOTO(cleanup_me, rc = -ENOMEM);
691 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
693 request->rq_repbuf_len, request->rq_xid,
694 request->rq_reply_portal);
697 /* add references on request for request_out_callback */
698 ptlrpc_request_addref(request);
699 if (obd->obd_svc_stats != NULL)
700 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
701 cfs_atomic_read(&request->rq_import->imp_inflight));
703 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
705 cfs_gettimeofday(&request->rq_arrival_time);
706 request->rq_sent = cfs_time_current_sec();
707 /* We give the server rq_timeout secs to process the req, and
708 add the network latency for our local timeout. */
709 request->rq_deadline = request->rq_sent + request->rq_timeout +
710 ptlrpc_at_get_net_latency(request);
712 ptlrpc_pinger_sending_on_import(request->rq_import);
714 DEBUG_REQ(D_INFO, request, "send flg=%x",
715 lustre_msg_get_flags(request->rq_reqmsg));
716 rc = ptl_send_buf(&request->rq_req_md_h,
717 request->rq_reqbuf, request->rq_reqdata_len,
718 LNET_NOACK_REQ, &request->rq_req_cbid,
720 request->rq_request_portal,
725 ptlrpc_req_finished(request);
730 /* MEUnlink is safe; the PUT didn't even get off the ground, and
731 * nobody apart from the PUT's target has the right nid+XID to
732 * access the reply buffer. */
733 rc2 = LNetMEUnlink(reply_me_h);
735 /* UNLINKED callback called synchronously */
736 LASSERT(!request->rq_receiving_reply);
739 /* We do sync unlink here as there was no real transfer here so
740 * the chance to have long unlink to sluggish net is smaller here. */
741 ptlrpc_unregister_bulk(request, 0);
743 if (request->rq_memalloc)
744 cfs_memory_pressure_restore(mpflag);
749 * Register request buffer descriptor for request receiving.
751 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
753 struct ptlrpc_service *service = rqbd->rqbd_service;
754 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
757 lnet_handle_me_t me_h;
759 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
760 service->srv_req_portal);
762 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
765 rc = LNetMEAttach(service->srv_req_portal,
766 match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
768 CERROR("LNetMEAttach failed: %d\n", rc);
772 LASSERT(rqbd->rqbd_refcount == 0);
773 rqbd->rqbd_refcount = 1;
775 md.start = rqbd->rqbd_buffer;
776 md.length = service->srv_buf_size;
777 md.max_size = service->srv_max_req_size;
778 md.threshold = LNET_MD_THRESH_INF;
779 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
780 md.user_ptr = &rqbd->rqbd_cbid;
781 md.eq_handle = ptlrpc_eq_h;
783 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
787 CERROR("LNetMDAttach failed: %d; \n", rc);
788 LASSERT (rc == -ENOMEM);
789 rc = LNetMEUnlink (me_h);
791 rqbd->rqbd_refcount = 0;