4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_RPC
39 #include <liblustre.h>
41 #include <obd_support.h>
42 #include <lustre_net.h>
43 #include <lustre_lib.h>
45 #include <obd_class.h>
46 #include "ptlrpc_internal.h"
49 * Helper function. Sends \a len bytes from \a base at offset \a offset
50 * over \a conn connection to portal \a portal.
51 * Returns 0 on success or error code.
53 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
54 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
55 struct ptlrpc_connection *conn, int portal, __u64 xid,
62 LASSERT (portal != 0);
63 LASSERT (conn != NULL);
64 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
67 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
68 md.options = PTLRPC_MD_OPTIONS;
70 md.eq_handle = ptlrpc_eq_h;
72 if (unlikely(ack == LNET_ACK_REQ &&
73 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
74 /* don't ask for the ack to simulate failing client */
78 rc = LNetMDBind (md, LNET_UNLINK, mdh);
79 if (unlikely(rc != 0)) {
80 CERROR ("LNetMDBind failed: %d\n", rc);
81 LASSERT (rc == -ENOMEM);
85 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
86 len, portal, xid, offset);
88 rc = LNetPut (conn->c_self, *mdh, ack,
89 conn->c_peer, portal, xid, offset, 0);
90 if (unlikely(rc != 0)) {
92 /* We're going to get an UNLINK event when I unlink below,
93 * which will complete just like any other failed send, so
94 * I fall through and return success here! */
95 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
96 libcfs_id2str(conn->c_peer), portal, xid, rc);
97 rc2 = LNetMDUnlink(*mdh);
98 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
104 static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
108 for (i = 0; i < count; i++)
109 LNetMDUnlink(bd_mds[i]);
112 #ifdef HAVE_SERVER_SUPPORT
114 * Prepare bulk descriptor for specified incoming request \a req that
115 * can fit \a npages * pages. \a type is bulk type. \a portal is where
116 * the bulk to be sent. Used on server-side after request was already
118 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
121 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
122 unsigned npages, unsigned max_brw,
123 unsigned type, unsigned portal)
125 struct obd_export *exp = req->rq_export;
126 struct ptlrpc_bulk_desc *desc;
129 LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
131 desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
135 desc->bd_export = class_export_get(exp);
138 desc->bd_cbid.cbid_fn = server_bulk_callback;
139 desc->bd_cbid.cbid_arg = desc;
141 /* NB we don't assign rq_bulk here; server-side requests are
142 * re-used, and the handler frees the bulk desc explicitly. */
146 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
149 * Starts bulk transfer for descriptor \a desc on the server.
150 * Returns 0 on success or error code.
152 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
154 struct obd_export *exp = desc->bd_export;
155 struct ptlrpc_connection *conn = exp->exp_connection;
163 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
166 /* NB no locking required until desc is on the network */
167 LASSERT(desc->bd_md_count == 0);
168 LASSERT(desc->bd_type == BULK_PUT_SOURCE ||
169 desc->bd_type == BULK_GET_SINK);
171 LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
172 LASSERT(desc->bd_cbid.cbid_arg == desc);
174 /* NB total length may be 0 for a read past EOF, so we send 0
175 * length bulks, since the client expects bulk events.
177 * The client may not need all of the bulk XIDs for the RPC. The RPC
178 * used the XID of the highest bulk XID needed, and the server masks
179 * off high bits to get bulk count for this RPC. LU-1431 */
180 xid = desc->bd_req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
181 total_md = desc->bd_req->rq_xid - xid + 1;
183 desc->bd_md_count = total_md;
184 desc->bd_failure = 0;
186 md.user_ptr = &desc->bd_cbid;
187 md.eq_handle = ptlrpc_eq_h;
188 md.threshold = 2; /* SENT and ACK/REPLY */
190 for (posted_md = 0; posted_md < total_md; xid++) {
191 md.options = PTLRPC_MD_OPTIONS;
193 /* NB it's assumed that source and sink buffer frags are
194 * page-aligned. Otherwise we'd have to send client bulk
195 * sizes over and split server buffer accordingly */
196 ptlrpc_fill_bulk_md(&md, desc, posted_md);
197 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
199 CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
200 exp->exp_obd->obd_name, posted_md, rc);
201 LASSERT(rc == -ENOMEM);
202 if (posted_md == 0) {
203 desc->bd_md_count = 0;
208 /* Network is about to get at the memory */
209 if (desc->bd_type == BULK_PUT_SOURCE)
210 rc = LNetPut(conn->c_self, desc->bd_mds[posted_md],
211 LNET_ACK_REQ, conn->c_peer,
212 desc->bd_portal, xid, 0, 0);
214 rc = LNetGet(conn->c_self, desc->bd_mds[posted_md],
215 conn->c_peer, desc->bd_portal, xid, 0);
219 CERROR("%s: failed bulk transfer with %s:%u x"LPU64": "
220 "rc = %d\n", exp->exp_obd->obd_name,
221 libcfs_id2str(conn->c_peer), desc->bd_portal,
228 /* Can't send, so we unlink the MD bound above. The UNLINK
229 * event this creates will signal completion with failure,
230 * so we return SUCCESS here! */
231 spin_lock(&desc->bd_lock);
232 desc->bd_md_count -= total_md - posted_md;
233 spin_unlock(&desc->bd_lock);
234 LASSERT(desc->bd_md_count >= 0);
236 mdunlink_iterate_helper(desc->bd_mds, posted_md);
240 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
241 "id %s xid "LPX64"-"LPX64"\n", desc->bd_iov_count,
242 desc->bd_nob, desc->bd_portal, libcfs_id2str(conn->c_peer),
243 xid - posted_md, xid - 1);
247 EXPORT_SYMBOL(ptlrpc_start_bulk_transfer);
250 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
251 * serialises with completion callback)
253 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
255 struct l_wait_info lwi;
258 LASSERT(!in_interrupt()); /* might sleep */
260 if (!ptlrpc_server_bulk_active(desc)) /* completed or */
261 return; /* never started */
263 /* We used to poison the pages with 0xab here because we did not want to
264 * send any meaningful data over the wire for evicted clients (bug 9297)
265 * However, this is no longer safe now that we use the page cache on the
268 /* The unlink ensures the callback happens ASAP and is the last
269 * one. If it fails, it must be because completion just happened,
270 * but we must still l_wait_event() in this case, to give liblustre
271 * a chance to run server_bulk_callback()*/
272 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_count);
275 /* Network access will complete in finite time but the HUGE
276 * timeout lets us CWARN for visibility of sluggish NALs */
277 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
278 cfs_time_seconds(1), NULL, NULL);
279 rc = l_wait_event(desc->bd_waitq,
280 !ptlrpc_server_bulk_active(desc), &lwi);
284 LASSERT(rc == -ETIMEDOUT);
285 CWARN("Unexpectedly long timeout: desc %p\n", desc);
288 EXPORT_SYMBOL(ptlrpc_abort_bulk);
289 #endif /* HAVE_SERVER_SUPPORT */
292 * Register bulk at the sender for later transfer.
293 * Returns 0 on success or error code.
295 int ptlrpc_register_bulk(struct ptlrpc_request *req)
297 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
298 lnet_process_id_t peer;
304 lnet_handle_me_t me_h;
308 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
311 /* NB no locking required until desc is on the network */
312 LASSERT(desc->bd_nob > 0);
313 LASSERT(desc->bd_md_count == 0);
314 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
315 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
316 LASSERT(desc->bd_req != NULL);
317 LASSERT(desc->bd_type == BULK_PUT_SINK ||
318 desc->bd_type == BULK_GET_SOURCE);
320 /* cleanup the state of the bulk for it will be reused */
321 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
322 desc->bd_nob_transferred = 0;
324 LASSERT(desc->bd_nob_transferred == 0);
326 desc->bd_failure = 0;
328 peer = desc->bd_import->imp_connection->c_peer;
330 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
331 LASSERT(desc->bd_cbid.cbid_arg == desc);
333 /* An XID is only used for a single request from the client.
334 * For retried bulk transfers, a new XID will be allocated in
335 * in ptlrpc_check_set() if it needs to be resent, so it is not
336 * using the same RDMA match bits after an error.
338 * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
339 * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
340 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
341 LASSERTF(!(desc->bd_registered &&
342 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
343 xid != desc->bd_last_xid,
344 "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
345 desc->bd_registered, xid, desc->bd_last_xid);
347 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
348 desc->bd_registered = 1;
349 desc->bd_last_xid = xid;
350 desc->bd_md_count = total_md;
351 md.user_ptr = &desc->bd_cbid;
352 md.eq_handle = ptlrpc_eq_h;
353 md.threshold = 1; /* PUT or GET */
355 for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
356 md.options = PTLRPC_MD_OPTIONS |
357 ((desc->bd_type == BULK_GET_SOURCE) ?
358 LNET_MD_OP_GET : LNET_MD_OP_PUT);
359 ptlrpc_fill_bulk_md(&md, desc, posted_md);
361 rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
362 LNET_UNLINK, LNET_INS_AFTER, &me_h);
364 CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
365 desc->bd_import->imp_obd->obd_name, xid,
370 /* About to let the network at it... */
371 rc = LNetMDAttach(me_h, md, LNET_UNLINK,
372 &desc->bd_mds[posted_md]);
374 CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
375 desc->bd_import->imp_obd->obd_name, xid,
377 rc2 = LNetMEUnlink(me_h);
384 LASSERT(rc == -ENOMEM);
385 spin_lock(&desc->bd_lock);
386 desc->bd_md_count -= total_md - posted_md;
387 spin_unlock(&desc->bd_lock);
388 LASSERT(desc->bd_md_count >= 0);
389 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
390 req->rq_status = -ENOMEM;
394 /* Set rq_xid to matchbits of the final bulk so that server can
395 * infer the number of bulks that were prepared */
397 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
398 "bd_last_xid = x"LPU64", rq_xid = x"LPU64"\n",
399 desc->bd_last_xid, req->rq_xid);
401 spin_lock(&desc->bd_lock);
402 /* Holler if peer manages to touch buffers before he knows the xid */
403 if (desc->bd_md_count != total_md)
404 CWARN("%s: Peer %s touched %d buffers while I registered\n",
405 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
406 total_md - desc->bd_md_count);
407 spin_unlock(&desc->bd_lock);
409 CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
410 "xid x"LPX64"-"LPX64", portal %u\n", desc->bd_md_count,
411 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
412 desc->bd_iov_count, desc->bd_nob,
413 desc->bd_last_xid, req->rq_xid, desc->bd_portal);
417 EXPORT_SYMBOL(ptlrpc_register_bulk);
420 * Disconnect a bulk desc from the network. Idempotent. Not
421 * thread-safe (i.e. only interlocks with completion callback).
422 * Returns 1 on success or 0 if network unregistration failed for whatever
425 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
427 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
428 struct l_wait_info lwi;
432 LASSERT(!in_interrupt()); /* might sleep */
434 /* Let's setup deadline for reply unlink. */
435 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
436 async && req->rq_bulk_deadline == 0)
437 req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
439 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
440 RETURN(1); /* never registered */
442 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
444 /* the unlink ensures the callback happens ASAP and is the last
445 * one. If it fails, it must be because completion just happened,
446 * but we must still l_wait_event() in this case to give liblustre
447 * a chance to run client_bulk_callback() */
448 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
450 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
451 RETURN(1); /* never registered */
453 /* Move to "Unregistering" phase as bulk was not unlinked yet. */
454 ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
456 /* Do not wait for unlink to finish. */
462 /* The wq argument is ignored by user-space wait_event macros */
463 wait_queue_head_t *wq = (req->rq_set != NULL) ?
464 &req->rq_set->set_waitq :
465 &req->rq_reply_waitq;
467 /* Network access will complete in finite time but the HUGE
468 * timeout lets us CWARN for visibility of sluggish NALs */
469 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
470 cfs_time_seconds(1), NULL, NULL);
471 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
473 ptlrpc_rqphase_move(req, req->rq_next_phase);
477 LASSERT(rc == -ETIMEDOUT);
478 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
483 EXPORT_SYMBOL(ptlrpc_unregister_bulk);
485 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
487 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
488 struct ptlrpc_service *svc = svcpt->scp_service;
489 int service_time = max_t(int, cfs_time_current_sec() -
490 req->rq_arrival_time.tv_sec, 1);
492 if (!(flags & PTLRPC_REPLY_EARLY) &&
493 (req->rq_type != PTL_RPC_MSG_ERR) &&
494 (req->rq_reqmsg != NULL) &&
495 !(lustre_msg_get_flags(req->rq_reqmsg) &
496 (MSG_RESENT | MSG_REPLAY |
497 MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
498 /* early replies, errors and recovery requests don't count
499 * toward our service time estimate */
500 int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
503 DEBUG_REQ(D_ADAPTTO, req,
504 "svc %s changed estimate from %d to %d",
505 svc->srv_name, oldse,
506 at_get(&svcpt->scp_at_estimate));
509 /* Report actual service time for client latency calc */
510 lustre_msg_set_service_time(req->rq_repmsg, service_time);
511 /* Report service time estimate for future client reqs, but report 0
512 * (to be ignored by client) if it's a error reply during recovery.
514 if (req->rq_type == PTL_RPC_MSG_ERR &&
515 (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
516 lustre_msg_set_timeout(req->rq_repmsg, 0);
518 lustre_msg_set_timeout(req->rq_repmsg,
519 at_get(&svcpt->scp_at_estimate));
521 if (req->rq_reqmsg &&
522 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
523 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
524 "req_flags=%#x magic=%d:%x/%x len=%d\n",
525 flags, lustre_msg_get_flags(req->rq_reqmsg),
526 lustre_msg_is_v1(req->rq_reqmsg),
527 lustre_msg_get_magic(req->rq_reqmsg),
528 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
533 * Send request reply from request \a req reply buffer.
534 * \a flags defines reply types
535 * Returns 0 on sucess or error code
537 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
539 struct ptlrpc_reply_state *rs = req->rq_reply_state;
540 struct ptlrpc_connection *conn;
543 /* We must already have a reply buffer (only ptlrpc_error() may be
544 * called without one). The reply generated by sptlrpc layer (e.g.
545 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
546 * have a request buffer which is either the actual (swabbed) incoming
547 * request, or a saved copy if this is a req saved in
548 * target_queue_final_reply().
550 LASSERT (req->rq_no_reply == 0);
551 LASSERT (req->rq_reqbuf != NULL);
552 LASSERT (rs != NULL);
553 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
554 LASSERT (req->rq_repmsg != NULL);
555 LASSERT (req->rq_repmsg == rs->rs_msg);
556 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
557 LASSERT (rs->rs_cb_id.cbid_arg == rs);
559 /* There may be no rq_export during failover */
561 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
562 req->rq_export->exp_obd->obd_fail)) {
563 /* Failed obd's only send ENODEV */
564 req->rq_type = PTL_RPC_MSG_ERR;
565 req->rq_status = -ENODEV;
566 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
567 req->rq_export->exp_obd->obd_minor);
570 /* In order to keep interoprability with the client (< 2.3) which
571 * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
572 * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
573 * reply buffer on client will be overflow.
575 * XXX Remove this whenver we drop the interoprability with such client.
577 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
578 sizeof(struct ptlrpc_body_v2), 1);
580 if (req->rq_type != PTL_RPC_MSG_ERR)
581 req->rq_type = PTL_RPC_MSG_REPLY;
583 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
584 lustre_msg_set_status(req->rq_repmsg,
585 ptlrpc_status_hton(req->rq_status));
586 lustre_msg_set_opc(req->rq_repmsg,
587 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
589 target_pack_pool_reply(req);
591 ptlrpc_at_set_reply(req, flags);
593 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
594 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
596 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
598 if (unlikely(conn == NULL)) {
599 CERROR("not replying on NULL connection\n"); /* bug 9635 */
602 ptlrpc_rs_addref(rs); /* +1 ref for the network */
604 rc = sptlrpc_svc_wrap_reply(req);
608 req->rq_sent = cfs_time_current_sec();
610 rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
611 (rs->rs_difficult && !rs->rs_no_ack) ?
612 LNET_ACK_REQ : LNET_NOACK_REQ,
614 ptlrpc_req2svc(req)->srv_rep_portal,
615 req->rq_xid, req->rq_reply_off);
617 if (unlikely(rc != 0))
618 ptlrpc_req_drop_rs(req);
619 ptlrpc_connection_put(conn);
622 EXPORT_SYMBOL(ptlrpc_send_reply);
624 int ptlrpc_reply (struct ptlrpc_request *req)
626 if (req->rq_no_reply)
629 return (ptlrpc_send_reply(req, 0));
631 EXPORT_SYMBOL(ptlrpc_reply);
634 * For request \a req send an error reply back. Create empty
635 * reply buffers if necessary.
637 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
642 if (req->rq_no_reply)
645 if (!req->rq_repmsg) {
646 rc = lustre_pack_reply(req, 1, NULL, NULL);
651 if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
652 req->rq_status != -EPERM && req->rq_status != -ENOENT &&
653 req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
654 req->rq_type = PTL_RPC_MSG_ERR;
656 rc = ptlrpc_send_reply(req, may_be_difficult);
659 EXPORT_SYMBOL(ptlrpc_send_error);
661 int ptlrpc_error(struct ptlrpc_request *req)
663 return ptlrpc_send_error(req, 0);
665 EXPORT_SYMBOL(ptlrpc_error);
668 * Send request \a request.
669 * if \a noreply is set, don't expect any reply back and don't set up
671 * Returns 0 on success or error code.
673 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
678 struct ptlrpc_connection *connection;
679 lnet_handle_me_t reply_me_h;
681 struct obd_device *obd = request->rq_import->imp_obd;
684 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
687 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
688 LASSERT(request->rq_wait_ctx == 0);
690 /* If this is a re-transmit, we're required to have disengaged
691 * cleanly from the previous attempt */
692 LASSERT(!request->rq_receiving_reply);
694 if (request->rq_import->imp_obd &&
695 request->rq_import->imp_obd->obd_fail) {
696 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
697 request->rq_import->imp_obd->obd_name);
698 /* this prevents us from waiting in ptlrpc_queue_wait */
700 request->rq_status = -ENODEV;
704 connection = request->rq_import->imp_connection;
706 lustre_msg_set_handle(request->rq_reqmsg,
707 &request->rq_import->imp_remote_handle);
708 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
709 lustre_msg_set_conn_cnt(request->rq_reqmsg,
710 request->rq_import->imp_conn_cnt);
711 lustre_msghdr_set_flags(request->rq_reqmsg,
712 request->rq_import->imp_msghdr_flags);
714 if (request->rq_resend)
715 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
717 if (request->rq_memalloc)
718 mpflag = cfs_memory_pressure_get_and_set();
720 rc = sptlrpc_cli_wrap_request(request);
724 /* bulk register should be done after wrap_request() */
725 if (request->rq_bulk != NULL) {
726 rc = ptlrpc_register_bulk (request);
732 LASSERT (request->rq_replen != 0);
733 if (request->rq_repbuf == NULL) {
734 LASSERT(request->rq_repdata == NULL);
735 LASSERT(request->rq_repmsg == NULL);
736 rc = sptlrpc_cli_alloc_repbuf(request,
739 /* this prevents us from looping in
740 * ptlrpc_queue_wait */
742 request->rq_status = rc;
743 GOTO(cleanup_bulk, rc);
746 request->rq_repdata = NULL;
747 request->rq_repmsg = NULL;
750 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
751 connection->c_peer, request->rq_xid, 0,
752 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
754 CERROR("LNetMEAttach failed: %d\n", rc);
755 LASSERT (rc == -ENOMEM);
756 GOTO(cleanup_bulk, rc = -ENOMEM);
760 spin_lock(&request->rq_lock);
761 /* If the MD attach succeeds, there _will_ be a reply_in callback */
762 request->rq_receiving_reply = !noreply;
763 /* We are responsible for unlinking the reply buffer */
764 request->rq_must_unlink = !noreply;
765 /* Clear any flags that may be present from previous sends. */
766 request->rq_replied = 0;
768 request->rq_timedout = 0;
769 request->rq_net_err = 0;
770 request->rq_resend = 0;
771 request->rq_restart = 0;
772 request->rq_reply_truncate = 0;
773 spin_unlock(&request->rq_lock);
776 reply_md.start = request->rq_repbuf;
777 reply_md.length = request->rq_repbuf_len;
778 /* Allow multiple early replies */
779 reply_md.threshold = LNET_MD_THRESH_INF;
780 /* Manage remote for early replies */
781 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
782 LNET_MD_MANAGE_REMOTE |
783 LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
784 reply_md.user_ptr = &request->rq_reply_cbid;
785 reply_md.eq_handle = ptlrpc_eq_h;
787 /* We must see the unlink callback to unset rq_must_unlink,
788 so we can't auto-unlink */
789 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
790 &request->rq_reply_md_h);
792 CERROR("LNetMDAttach failed: %d\n", rc);
793 LASSERT (rc == -ENOMEM);
794 spin_lock(&request->rq_lock);
795 /* ...but the MD attach didn't succeed... */
796 request->rq_receiving_reply = 0;
797 spin_unlock(&request->rq_lock);
798 GOTO(cleanup_me, rc = -ENOMEM);
801 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
803 request->rq_repbuf_len, request->rq_xid,
804 request->rq_reply_portal);
807 /* add references on request for request_out_callback */
808 ptlrpc_request_addref(request);
809 if (obd->obd_svc_stats != NULL)
810 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
811 cfs_atomic_read(&request->rq_import->imp_inflight));
813 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
815 do_gettimeofday(&request->rq_arrival_time);
816 request->rq_sent = cfs_time_current_sec();
817 /* We give the server rq_timeout secs to process the req, and
818 add the network latency for our local timeout. */
819 request->rq_deadline = request->rq_sent + request->rq_timeout +
820 ptlrpc_at_get_net_latency(request);
822 ptlrpc_pinger_sending_on_import(request->rq_import);
824 DEBUG_REQ(D_INFO, request, "send flg=%x",
825 lustre_msg_get_flags(request->rq_reqmsg));
826 rc = ptl_send_buf(&request->rq_req_md_h,
827 request->rq_reqbuf, request->rq_reqdata_len,
828 LNET_NOACK_REQ, &request->rq_req_cbid,
830 request->rq_request_portal,
835 ptlrpc_req_finished(request);
840 /* MEUnlink is safe; the PUT didn't even get off the ground, and
841 * nobody apart from the PUT's target has the right nid+XID to
842 * access the reply buffer. */
843 rc2 = LNetMEUnlink(reply_me_h);
845 /* UNLINKED callback called synchronously */
846 LASSERT(!request->rq_receiving_reply);
849 /* We do sync unlink here as there was no real transfer here so
850 * the chance to have long unlink to sluggish net is smaller here. */
851 ptlrpc_unregister_bulk(request, 0);
853 if (request->rq_memalloc)
854 cfs_memory_pressure_restore(mpflag);
857 EXPORT_SYMBOL(ptl_send_rpc);
860 * Register request buffer descriptor for request receiving.
862 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
864 struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
865 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
868 lnet_handle_me_t me_h;
870 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
871 service->srv_req_portal);
873 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
876 /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
877 * which means buffer can only be attached on local CPT, and LND
878 * threads can find it by grabbing a local lock */
879 rc = LNetMEAttach(service->srv_req_portal,
880 match_id, 0, ~0, LNET_UNLINK,
881 rqbd->rqbd_svcpt->scp_cpt >= 0 ?
882 LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
884 CERROR("LNetMEAttach failed: %d\n", rc);
888 LASSERT(rqbd->rqbd_refcount == 0);
889 rqbd->rqbd_refcount = 1;
891 md.start = rqbd->rqbd_buffer;
892 md.length = service->srv_buf_size;
893 md.max_size = service->srv_max_req_size;
894 md.threshold = LNET_MD_THRESH_INF;
895 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
896 md.user_ptr = &rqbd->rqbd_cbid;
897 md.eq_handle = ptlrpc_eq_h;
899 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
903 CERROR("LNetMDAttach failed: %d; \n", rc);
904 LASSERT (rc == -ENOMEM);
905 rc = LNetMEUnlink (me_h);
907 rqbd->rqbd_refcount = 0;