4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_RPC
33 #include <libcfs/linux/linux-mem.h>
34 #include <obd_support.h>
35 #include <lustre_net.h>
36 #include <lustre_lib.h>
38 #include <obd_class.h>
39 #include "ptlrpc_internal.h"
40 #include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
43 * Helper function. Sends \a len bytes from \a base at offset \a offset
44 * over \a conn connection to portal \a portal.
45 * Returns 0 on success or error code.
47 static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
48 enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
49 struct lnet_nid *self, struct lnet_processid *peer_id,
50 int portal, __u64 xid, unsigned int offset,
51 struct lnet_handle_md *bulk_cookie)
59 CDEBUG(D_INFO, "peer_id %s\n", libcfs_idstr(peer_id));
62 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
63 md.options = PTLRPC_MD_OPTIONS;
65 md.handler = ptlrpc_handler;
66 LNetInvalidateMDHandle(&md.bulk_handle);
69 md.bulk_handle = *bulk_cookie;
70 md.options |= LNET_MD_BULK_HANDLE;
73 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, CFS_FAIL_ONCE) &&
74 ack == LNET_ACK_REQ) {
75 /* don't ask for the ack to simulate failing client */
79 rc = LNetMDBind(&md, LNET_UNLINK, mdh);
80 if (unlikely(rc != 0)) {
81 CERROR("LNetMDBind failed: %d\n", rc);
82 LASSERT(rc == -ENOMEM);
86 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
87 len, portal, xid, offset);
89 percpu_ref_get(&ptlrpc_pending);
91 rc = LNetPut(self, *mdh, ack,
92 peer_id, portal, xid, offset, 0);
93 if (unlikely(rc != 0)) {
95 /* Will get UNLINK event when unlink below, which will complete
96 * like any other failed send, fall through and return success
98 CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
99 libcfs_idstr(peer_id), portal, xid, rc);
100 rc2 = LNetMDUnlink(*mdh);
101 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
107 #define mdunlink_iterate_helper(mds, count) \
108 __mdunlink_iterate_helper(mds, count, false)
109 static void __mdunlink_iterate_helper(struct lnet_handle_md *bd_mds,
110 int count, bool discard)
114 for (i = 0; i < count; i++)
115 __LNetMDUnlink(bd_mds[i], discard);
118 #ifdef HAVE_SERVER_SUPPORT
120 * Prepare bulk descriptor for specified incoming request \a req that
121 * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
122 * the bulk to be sent. Used on server-side after request was already
124 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
127 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
128 unsigned int nfrags, unsigned int max_brw,
129 unsigned int type, unsigned int portal,
130 const struct ptlrpc_bulk_frag_ops *ops)
132 struct obd_export *exp = req->rq_export;
133 struct ptlrpc_bulk_desc *desc;
136 LASSERT(ptlrpc_is_bulk_op_active(type));
138 desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
142 desc->bd_export = class_export_get(exp);
145 desc->bd_cbid.cbid_fn = server_bulk_callback;
146 desc->bd_cbid.cbid_arg = desc;
148 /* NB we don't assign rq_bulk here; server-side requests are
149 * re-used, and the handler frees the bulk desc explicitly.
154 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
157 * Starts bulk transfer for descriptor \a desc on the server.
158 * Returns 0 on success or error code.
160 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
162 struct obd_export *exp = desc->bd_export;
163 struct lnet_nid self_nid;
164 struct lnet_processid peer_id;
173 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
176 /* NB no locking required until desc is on the network */
177 LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
179 LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
180 LASSERT(desc->bd_cbid.cbid_arg == desc);
182 /* Multi-Rail: get the preferred self and peer NIDs from the
183 * request, so they are based on the route taken by the message.
185 self_nid = desc->bd_req->rq_self;
186 peer_id = desc->bd_req->rq_source;
188 /* NB total length may be 0 for a read past EOF, so we send 0
189 * length bulks, since the client expects bulk events.
191 * The client may not need all of the bulk mbits for the RPC. The RPC
192 * used the mbits of the highest bulk mbits needed, and the server masks
193 * off high bits to get bulk count for this RPC. LU-1431
195 mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
196 total_md = desc->bd_req->rq_mbits - mbits + 1;
197 desc->bd_refs = total_md;
198 desc->bd_failure = 0;
200 md.user_ptr = &desc->bd_cbid;
201 md.handler = ptlrpc_handler;
202 md.threshold = 2; /* SENT and ACK/REPLY */
204 for (posted_md = 0; posted_md < total_md; mbits++) {
205 md.options = PTLRPC_MD_OPTIONS;
207 /* Note. source and sink buf frags are page-aligned. Else send
208 * client bulk sizes over and split server buffer accordingly
210 ptlrpc_fill_bulk_md(&md, desc, posted_md);
211 rc = LNetMDBind(&md, LNET_UNLINK, &desc->bd_mds[posted_md]);
213 CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
214 exp->exp_obd->obd_name, posted_md, rc);
215 LASSERT(rc == -ENOMEM);
216 if (posted_md == 0) {
217 desc->bd_md_count = 0;
222 percpu_ref_get(&ptlrpc_pending);
224 /* sanity.sh 224c: lets skip last md */
225 if (posted_md == desc->bd_md_max_brw - 1)
226 CFS_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
227 CFS_FAIL_PTLRPC_OST_BULK_CB2);
229 /* Network is about to get at the memory */
230 if (ptlrpc_is_bulk_put_source(desc->bd_type))
231 rc = LNetPut(&self_nid, desc->bd_mds[posted_md],
232 LNET_ACK_REQ, &peer_id,
233 desc->bd_portal, mbits, 0, 0);
235 rc = LNetGet(&self_nid, desc->bd_mds[posted_md],
236 &peer_id, desc->bd_portal,
241 CERROR("%s: failed bulk transfer with %s:%u x%llu: rc = %d\n",
242 exp->exp_obd->obd_name,
243 libcfs_idstr(&peer_id), desc->bd_portal,
250 /* Can't send, so we unlink the MD bound above. The UNLINK
251 * event this creates will signal completion with failure,
252 * so we return SUCCESS here!
254 spin_lock(&desc->bd_lock);
255 desc->bd_refs -= total_md - posted_md;
256 spin_unlock(&desc->bd_lock);
257 LASSERT(desc->bd_refs >= 0);
259 mdunlink_iterate_helper(desc->bd_mds, posted_md);
263 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d id %s mbits %#llx-%#llx\n",
265 desc->bd_nob, desc->bd_portal, libcfs_idstr(&peer_id),
266 mbits - posted_md, mbits - 1);
272 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
273 * serialises with completion callback)
275 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
277 LASSERT(!in_interrupt()); /* might sleep */
279 if (!ptlrpc_server_bulk_active(desc)) /* completed or */
280 return; /* never started */
282 /* We used to poison the pages with 0xab here because we did not want to
283 * send any meaningful data over the wire for evicted clients (bug 9297)
284 * However, this is no longer safe now that we use the page cache on the
287 /* The unlink ensures the callback happens ASAP and is the last
288 * one. If it fails, it must be because completion just happened,
289 * but we must still wait_event_idle_timeout() in this case, to give
290 * us a chance to run server_bulk_callback()
292 __mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw, true);
295 /* Network access will complete in finite time but the HUGE
296 * timeout lets us CWARN for visibility of sluggish NALs
298 int seconds = PTLRPC_REQ_LONG_UNLINK;
300 while (seconds > 0 &&
301 wait_event_idle_timeout(desc->bd_waitq,
302 !ptlrpc_server_bulk_active(desc),
303 cfs_time_seconds(1)) == 0)
308 CWARN("Unexpectedly long timeout: desc %p\n", desc);
311 #endif /* HAVE_SERVER_SUPPORT */
314 * Register bulk at the sender for later transfer.
315 * Returns 0 on success or error code.
317 int ptlrpc_register_bulk(struct ptlrpc_request *req)
319 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
320 struct lnet_processid peer;
330 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
333 /* NB no locking required until desc is on the network */
334 LASSERT(desc->bd_nob > 0);
335 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
336 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
337 LASSERT(desc->bd_req != NULL);
338 LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
340 /* cleanup the state of the bulk for it will be reused */
341 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
342 desc->bd_nob_transferred = 0;
343 else if (desc->bd_nob_transferred != 0)
344 /* If network failed after RPC was sent, this condition could
345 * happen. Rather than assert (was here before), return EIO err
349 desc->bd_failure = 0;
351 peer = desc->bd_import->imp_connection->c_peer;
353 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
354 LASSERT(desc->bd_cbid.cbid_arg == desc);
356 total_md = desc->bd_md_count;
357 /* rq_mbits is matchbits of the final bulk */
358 mbits = req->rq_mbits - desc->bd_md_count + 1;
360 LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
361 "first mbits = x%llu, last mbits = x%llu\n",
362 mbits, req->rq_mbits);
363 LASSERTF(!(desc->bd_registered &&
364 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
365 mbits != desc->bd_last_mbits,
366 "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
367 desc->bd_registered, mbits, desc->bd_last_mbits);
369 desc->bd_registered = 1;
370 desc->bd_last_mbits = mbits;
371 desc->bd_refs = total_md;
372 md.user_ptr = &desc->bd_cbid;
373 md.handler = ptlrpc_handler;
374 md.threshold = 1; /* PUT or GET */
376 for (posted_md = 0; posted_md < desc->bd_md_count;
377 posted_md++, mbits++) {
378 md.options = PTLRPC_MD_OPTIONS |
379 (ptlrpc_is_bulk_op_get(desc->bd_type) ?
380 LNET_MD_OP_GET : LNET_MD_OP_PUT);
381 ptlrpc_fill_bulk_md(&md, desc, posted_md);
383 if (posted_md > 0 && posted_md + 1 == desc->bd_md_count &&
384 CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
387 me = LNetMEAttach(desc->bd_portal, &peer, mbits, 0,
388 LNET_UNLINK, LNET_INS_AFTER);
389 rc = PTR_ERR_OR_ZERO(me);
392 CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
393 desc->bd_import->imp_obd->obd_name, mbits,
397 percpu_ref_get(&ptlrpc_pending);
399 /* About to let the network at it... */
400 rc = LNetMDAttach(me, &md, LNET_UNLINK,
401 &desc->bd_mds[posted_md]);
403 CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
404 desc->bd_import->imp_obd->obd_name, mbits,
411 LASSERT(rc == -ENOMEM);
412 spin_lock(&desc->bd_lock);
413 desc->bd_refs -= total_md - posted_md;
414 spin_unlock(&desc->bd_lock);
415 LASSERT(desc->bd_refs >= 0);
416 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
417 req->rq_status = -ENOMEM;
418 desc->bd_registered = 0;
422 spin_lock(&desc->bd_lock);
423 /* Holler if peer manages to touch buffers before he knows the mbits */
424 if (desc->bd_refs != total_md)
425 CWARN("%s: Peer %s touched %d buffers while I registered\n",
426 desc->bd_import->imp_obd->obd_name, libcfs_idstr(&peer),
427 total_md - desc->bd_refs);
428 spin_unlock(&desc->bd_lock);
431 "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
433 ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
434 desc->bd_iov_count, desc->bd_nob,
435 desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
441 * Disconnect a bulk desc from the network. Idempotent. Not
442 * thread-safe (i.e. only interlocks with completion callback).
443 * Returns 1 on success or 0 if network unregistration failed for whatever
446 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
448 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
452 LASSERT(!in_interrupt()); /* might sleep */
455 desc->bd_registered = 0;
457 /* Let's setup deadline for reply unlink. */
458 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
459 async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
460 req->rq_bulk_deadline = ktime_get_real_seconds() +
461 PTLRPC_REQ_LONG_UNLINK;
463 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
464 RETURN(1); /* never registered */
466 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
468 /* the unlink ensures the callback happens ASAP and is the last
469 * one. If it fails, it must be because completion just happened,
470 * but we must still wait_event_idle_timeout() in this case to give
471 * us a chance to run client_bulk_callback()
473 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
475 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
476 RETURN(1); /* never registered */
478 /* Move to "Unregistering" phase as bulk was not unlinked yet. */
479 ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
481 /* Do not wait for unlink to finish. */
486 /* The wq argument is ignored by user-space wait_event macros */
487 wait_queue_head_t *wq = (req->rq_set != NULL) ?
488 &req->rq_set->set_waitq :
489 &req->rq_reply_waitq;
491 * Network access will complete in finite time but the HUGE
492 * timeout lets us CWARN for visibility of sluggish NALs.
494 int seconds = PTLRPC_REQ_LONG_UNLINK;
496 while (seconds > 0 &&
497 wait_event_idle_timeout(*wq,
498 !ptlrpc_client_bulk_active(req),
499 cfs_time_seconds(1)) == 0)
502 ptlrpc_rqphase_move(req, req->rq_next_phase);
506 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
512 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
514 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
515 struct ptlrpc_service *svc = svcpt->scp_service;
516 timeout_t service_timeout;
517 struct obd_device *obd = NULL;
520 obd = req->rq_export->exp_obd;
522 service_timeout = obd_at_off(obd) ?
523 obd_timeout * 3 / 2 : obd_get_at_max(obd);
524 service_timeout = clamp_t(timeout_t, ktime_get_real_seconds() -
525 req->rq_arrival_time.tv_sec, 1,
527 if (!(flags & PTLRPC_REPLY_EARLY) &&
528 (req->rq_type != PTL_RPC_MSG_ERR) &&
529 (req->rq_reqmsg != NULL) &&
530 !(lustre_msg_get_flags(req->rq_reqmsg) &
531 (MSG_RESENT | MSG_REPLAY |
532 MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
533 /* early replies, errors and recovery requests don't count
534 * toward our service time estimate
536 timeout_t oldse = obd_at_measure(obd, &svcpt->scp_at_estimate,
540 DEBUG_REQ(D_ADAPTTO, req,
541 "svc %s changed estimate from %d to %d",
542 svc->srv_name, oldse,
543 obd_at_get(obd, &svcpt->scp_at_estimate));
546 /* Report actual service time for client latency calc */
547 lustre_msg_set_service_timeout(req->rq_repmsg, service_timeout);
548 /* Report service time estimate for future client reqs, but report 0
549 * (to be ignored by client) if it's an error reply during recovery.
552 if (req->rq_type == PTL_RPC_MSG_ERR &&
553 (req->rq_export == NULL || obd->obd_recovering)) {
554 lustre_msg_set_timeout(req->rq_repmsg, 0);
558 if (req->rq_export && req->rq_reqmsg != NULL &&
559 (flags & PTLRPC_REPLY_EARLY) &&
560 lustre_msg_get_flags(req->rq_reqmsg) &
561 (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
562 timeout = ktime_get_real_seconds() -
563 req->rq_arrival_time.tv_sec +
564 min_t(timeout_t, at_extra,
565 obd->obd_recovery_timeout / 4);
567 timeout = obd_at_get(obd, &svcpt->scp_at_estimate);
569 lustre_msg_set_timeout(req->rq_repmsg, timeout);
572 if (req->rq_reqmsg &&
573 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
574 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n",
575 flags, lustre_msg_get_flags(req->rq_reqmsg),
576 lustre_msg_get_magic(req->rq_reqmsg),
577 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
582 * Send request reply from request \a req reply buffer.
583 * \a flags defines reply types
584 * Returns 0 on success or error code
586 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
588 struct ptlrpc_reply_state *rs = req->rq_reply_state;
589 struct ptlrpc_connection *conn;
592 /* We must already have a reply buffer (only ptlrpc_error() may be
593 * called without one). The reply generated by sptlrpc layer (e.g.
594 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
595 * have a request buffer which is either the actual (swabbed) incoming
596 * request, or a saved copy if this is a req saved in
597 * target_queue_final_reply().
599 LASSERT(req->rq_no_reply == 0);
600 LASSERT(req->rq_reqbuf != NULL);
602 LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
603 LASSERT(req->rq_repmsg != NULL);
604 LASSERT(req->rq_repmsg == rs->rs_msg);
605 LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
606 LASSERT(rs->rs_cb_id.cbid_arg == rs);
608 /* There may be no rq_export during failover */
610 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
611 req->rq_export->exp_obd->obd_fail)) {
612 /* Failed obd's only send ENODEV */
613 req->rq_type = PTL_RPC_MSG_ERR;
614 req->rq_status = -ENODEV;
615 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
616 req->rq_export->exp_obd->obd_minor);
619 if (req->rq_type != PTL_RPC_MSG_ERR)
620 req->rq_type = PTL_RPC_MSG_REPLY;
622 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
623 lustre_msg_set_status(req->rq_repmsg,
624 ptlrpc_status_hton(req->rq_status));
625 lustre_msg_set_opc(req->rq_repmsg,
626 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
628 target_pack_pool_reply(req);
630 ptlrpc_at_set_reply(req, flags);
632 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
633 conn = ptlrpc_connection_get(&req->rq_peer, &req->rq_self,
636 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
638 if (unlikely(conn == NULL)) {
639 CERROR("not replying on NULL connection\n"); /* bug 9635 */
642 ptlrpc_rs_addref(rs); /* +1 ref for the network */
644 rc = sptlrpc_svc_wrap_reply(req);
648 req->rq_sent = ktime_get_real_seconds();
650 rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
651 (rs->rs_difficult && !rs->rs_no_ack) ?
652 LNET_ACK_REQ : LNET_NOACK_REQ,
653 &rs->rs_cb_id, &req->rq_self,
655 ptlrpc_req2svc(req)->srv_rep_portal,
656 req->rq_rep_mbits ? req->rq_rep_mbits : req->rq_xid,
657 req->rq_reply_off, NULL);
659 if (unlikely(rc != 0))
660 ptlrpc_req_drop_rs(req);
661 ptlrpc_connection_put(conn);
665 int ptlrpc_reply(struct ptlrpc_request *req)
667 if (req->rq_no_reply)
670 return (ptlrpc_send_reply(req, 0));
674 * For request \a req send an error reply back. Create empty
675 * reply buffers if necessary.
677 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
683 if (req->rq_no_reply)
686 if (!req->rq_repmsg) {
687 rc = lustre_pack_reply(req, 1, NULL, NULL);
692 if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
693 req->rq_status != -EPERM && req->rq_status != -ENOENT &&
694 req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT &&
695 req->rq_status != -EROFS)
696 req->rq_type = PTL_RPC_MSG_ERR;
698 rc = ptlrpc_send_reply(req, may_be_difficult);
702 int ptlrpc_error(struct ptlrpc_request *req)
704 return ptlrpc_send_error(req, 0);
708 * Send request \a request.
709 * if \a noreply is set, don't expect any reply back and don't set up
711 * Returns 0 on success or error code.
713 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
718 bool rep_mbits = false;
719 struct lnet_handle_md bulk_cookie;
720 struct lnet_processid peer;
721 struct ptlrpc_connection *connection;
722 struct lnet_me *reply_me = NULL;
723 struct lnet_md reply_md;
724 struct obd_import *imp = request->rq_import;
725 struct obd_device *obd = imp->imp_obd;
729 LNetInvalidateMDHandle(&bulk_cookie);
731 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
734 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_DELAY_RECOV) &&
735 lustre_msg_get_opc(request->rq_reqmsg) == MDS_CONNECT &&
736 strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) == 0)) {
740 if (unlikely(CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DROP_MGS, cfs_fail_val) &&
741 lustre_msg_get_opc(request->rq_reqmsg) == MGS_CONNECT)) {
742 DEBUG_REQ(D_INFO, request, "Simulate MGS connect failure");
747 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
748 LASSERT(request->rq_wait_ctx == 0);
750 /* If this is re-transmit, disengaged cleanly from previous attempt */
751 LASSERT(!request->rq_receiving_reply);
752 LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
753 (imp->imp_state == LUSTRE_IMP_FULL)));
755 if (unlikely(obd != NULL && obd->obd_fail)) {
756 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
758 /* this prevents us from waiting in ptlrpc_queue_wait */
759 spin_lock(&request->rq_lock);
761 spin_unlock(&request->rq_lock);
762 request->rq_status = -ENODEV;
766 connection = imp->imp_connection;
768 lustre_msg_set_handle(request->rq_reqmsg,
769 &imp->imp_remote_handle);
770 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
771 lustre_msg_set_conn_cnt(request->rq_reqmsg,
773 lustre_msghdr_set_flags(request->rq_reqmsg,
774 imp->imp_msghdr_flags);
776 /* First time to resend request for EINPROGRESS, need to allocate new
777 * XID(see after_reply()), it's different from resend for reply timeout
779 if (request->rq_nr_resend != 0 &&
780 list_empty(&request->rq_unreplied_list)) {
782 /* resend for EINPROGRESS, allocate new xid to avoid reply
785 spin_lock(&imp->imp_lock);
786 ptlrpc_assign_next_xid_nolock(request);
787 min_xid = ptlrpc_known_replied_xid(imp);
788 spin_unlock(&imp->imp_lock);
790 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
791 DEBUG_REQ(D_RPCTRACE, request,
792 "Allocating new XID for resend on EINPROGRESS");
795 opc = lustre_msg_get_opc(request->rq_reqmsg);
796 if (opc != OST_CONNECT && opc != MDS_CONNECT &&
797 opc != MGS_CONNECT && OCD_HAS_FLAG(&imp->imp_connect_data, FLAGS2))
798 rep_mbits = imp->imp_connect_data.ocd_connect_flags2 &
799 OBD_CONNECT2_REP_MBITS;
801 if ((request->rq_bulk != NULL) || rep_mbits) {
802 ptlrpc_set_mbits(request);
803 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
806 if (list_empty(&request->rq_unreplied_list) ||
807 request->rq_xid <= imp->imp_known_replied_xid) {
808 DEBUG_REQ(D_ERROR, request,
809 "xid=%llu, replied=%llu, list_empty=%d",
810 request->rq_xid, imp->imp_known_replied_xid,
811 list_empty(&request->rq_unreplied_list));
816 * For enabled AT all request should have AT_SUPPORT in the
817 * FULL import state when OBD_CONNECT_AT is set.
818 * This check has a race with ptlrpc_connect_import_locked()
819 * with low chance, don't panic, only report.
821 if (!(obd_at_off(obd) || imp->imp_state != LUSTRE_IMP_FULL ||
822 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
823 !(imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_AT))) {
824 DEBUG_REQ(D_HA, request, "Wrong state of import detected, AT=%d, imp=%d, msghdr=%d, conn=%d\n",
825 obd_at_off(obd), imp->imp_state != LUSTRE_IMP_FULL,
826 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT),
827 !(imp->imp_connect_data.ocd_connect_flags &
830 if (request->rq_resend) {
831 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
832 if (request->rq_resend_cb != NULL)
833 request->rq_resend_cb(request, &request->rq_async_args);
835 if (request->rq_memalloc)
836 mpflag = memalloc_noreclaim_save();
838 rc = sptlrpc_cli_wrap_request(request);
842 /* bulk register should be done after wrap_request() */
843 if (request->rq_bulk != NULL) {
844 rc = ptlrpc_register_bulk(request);
846 GOTO(cleanup_bulk, rc);
848 * All the mds in the request will have the same cpt
849 * encoded in the cookie. So we can just get the first
852 bulk_cookie = request->rq_bulk->bd_mds[0];
856 LASSERT(request->rq_replen != 0);
857 if (request->rq_repbuf == NULL) {
858 LASSERT(request->rq_repdata == NULL);
859 LASSERT(request->rq_repmsg == NULL);
860 rc = sptlrpc_cli_alloc_repbuf(request,
863 /* prevent from looping in ptlrpc_queue_wait */
864 spin_lock(&request->rq_lock);
866 spin_unlock(&request->rq_lock);
867 request->rq_status = rc;
868 GOTO(cleanup_bulk, rc);
871 request->rq_repdata = NULL;
872 request->rq_repmsg = NULL;
875 peer = connection->c_peer;
876 if (request->rq_bulk &&
877 CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_REPLY_ATTACH)) {
878 reply_me = ERR_PTR(-ENOMEM);
880 reply_me = LNetMEAttach(request->rq_reply_portal,
882 rep_mbits ? request->rq_mbits :
884 0, LNET_UNLINK, LNET_INS_AFTER);
887 if (IS_ERR(reply_me)) {
888 rc = PTR_ERR(reply_me);
889 CERROR("LNetMEAttach failed: %d\n", rc);
890 LASSERT(rc == -ENOMEM);
891 GOTO(cleanup_bulk, rc = -ENOMEM);
895 spin_lock(&request->rq_lock);
896 /* We are responsible for unlinking the reply buffer */
897 request->rq_reply_unlinked = noreply;
898 request->rq_receiving_reply = !noreply;
899 /* Clear any flags that may be present from previous sends. */
900 request->rq_req_unlinked = 0;
901 request->rq_replied = 0;
903 request->rq_timedout = 0;
904 request->rq_net_err = 0;
905 request->rq_resend = 0;
906 request->rq_restart = 0;
907 request->rq_reply_truncated = 0;
908 spin_unlock(&request->rq_lock);
911 reply_md.start = request->rq_repbuf;
912 reply_md.length = request->rq_repbuf_len;
913 /* Allow multiple early replies */
914 reply_md.threshold = LNET_MD_THRESH_INF;
915 /* Manage remote for early replies */
916 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
917 LNET_MD_MANAGE_REMOTE |
918 LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
919 reply_md.user_ptr = &request->rq_reply_cbid;
920 reply_md.handler = ptlrpc_handler;
922 /* We must see the unlink callback to set rq_reply_unlinked,
923 * so we can't auto-unlink
925 rc = LNetMDAttach(reply_me, &reply_md, LNET_RETAIN,
926 &request->rq_reply_md_h);
928 CERROR("LNetMDAttach failed: %d\n", rc);
929 LASSERT(rc == -ENOMEM);
930 spin_lock(&request->rq_lock);
931 /* ...but the MD attach didn't succeed... */
932 request->rq_receiving_reply = 0;
933 spin_unlock(&request->rq_lock);
934 GOTO(cleanup_bulk, rc = -ENOMEM);
936 percpu_ref_get(&ptlrpc_pending);
939 "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
940 request->rq_repbuf_len, request->rq_xid,
941 request->rq_reply_portal);
944 /* add references on request for request_out_callback */
945 ptlrpc_request_addref(request);
946 if (obd != NULL && obd->obd_svc_stats != NULL)
947 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
948 atomic_read(&imp->imp_inflight));
950 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
952 request->rq_sent_ns = ktime_get_real();
953 request->rq_sent = ktime_get_real_seconds();
954 /* We give the server rq_timeout secs to process the req, and
955 * add the network latency for our local timeout.
957 request->rq_deadline = request->rq_sent + request->rq_timeout +
958 ptlrpc_at_get_net_latency(request);
960 DEBUG_REQ(D_INFO, request, "send flags=%x",
961 lustre_msg_get_flags(request->rq_reqmsg));
963 if (unlikely(opc == OBD_PING &&
964 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND_FAIL, cfs_fail_val))) {
965 DEBUG_REQ(D_INFO, request, "Simulate delay send failure");
969 rc = ptl_send_buf(&request->rq_req_md_h,
970 request->rq_reqbuf, request->rq_reqdata_len,
971 LNET_NOACK_REQ, &request->rq_req_cbid,
974 request->rq_request_portal,
975 request->rq_xid, 0, &bulk_cookie);
980 request->rq_req_unlinked = 1;
981 ptlrpc_req_finished(request);
985 LNetMDUnlink(request->rq_reply_md_h);
987 /* UNLINKED callback called synchronously */
988 LASSERT(!request->rq_receiving_reply);
991 /* We do sync unlink here as there was no real transfer here so
992 * the chance to have long unlink to sluggish net is smaller here.
994 ptlrpc_unregister_bulk(request, 0);
997 /* set rq_sent so that this request is treated
998 * as a delayed send in the upper layers
1000 request->rq_sent = ktime_get_real_seconds();
1003 if (request->rq_memalloc)
1004 memalloc_noreclaim_restore(mpflag);
1008 EXPORT_SYMBOL(ptl_send_rpc);
1010 /* Register request buffer descriptor for request receiving. */
1011 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
1013 struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
1014 static struct lnet_processid match_id = {
1015 .nid = LNET_ANY_NID,
1022 CDEBUG(D_NET, "%s: registering portal %d\n", service->srv_name,
1023 service->srv_req_portal);
1025 if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
1028 /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
1029 * which means buffer can only be attached on local CPT, and LND
1030 * threads can find it by grabbing a local lock
1032 me = LNetMEAttach(service->srv_req_portal,
1033 &match_id, 0, ~0, LNET_UNLINK,
1034 rqbd->rqbd_svcpt->scp_cpt >= 0 ?
1035 LNET_INS_LOCAL : LNET_INS_AFTER);
1037 CERROR("%s: LNetMEAttach failed: rc = %ld\n",
1038 service->srv_name, PTR_ERR(me));
1042 LASSERT(rqbd->rqbd_refcount == 0);
1043 rqbd->rqbd_refcount = 1;
1045 md.start = rqbd->rqbd_buffer;
1046 md.length = service->srv_buf_size;
1047 md.max_size = service->srv_max_req_size;
1048 md.threshold = LNET_MD_THRESH_INF;
1049 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
1050 md.user_ptr = &rqbd->rqbd_cbid;
1051 md.handler = ptlrpc_handler;
1053 rc = LNetMDAttach(me, &md, LNET_UNLINK, &rqbd->rqbd_md_h);
1055 percpu_ref_get(&ptlrpc_pending);
1059 CERROR("%s: LNetMDAttach failed: rc = %d\n", service->srv_name, rc);
1060 LASSERT(rc == -ENOMEM);
1061 rqbd->rqbd_refcount = 0;