4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_RPC
34 #include <obd_support.h>
35 #include <lustre_net.h>
36 #include <lustre_lib.h>
38 #include <obd_class.h>
39 #include "ptlrpc_internal.h"
40 #include <lnet/lib-lnet.h> /* for CFS_FAIL_PTLRPC_OST_BULK_CB2 */
43 * Helper function. Sends \a len bytes from \a base at offset \a offset
44 * over \a conn connection to portal \a portal.
45 * Returns 0 on success or error code.
47 static int ptl_send_buf(struct lnet_handle_md *mdh, void *base, int len,
48 enum lnet_ack_req ack, struct ptlrpc_cb_id *cbid,
49 lnet_nid_t self, struct lnet_process_id peer_id,
50 int portal, __u64 xid, unsigned int offset,
51 struct lnet_handle_md *bulk_cookie)
57 LASSERT (portal != 0);
58 CDEBUG (D_INFO, "peer_id %s\n", libcfs_id2str(peer_id));
61 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
62 md.options = PTLRPC_MD_OPTIONS;
64 md.eq_handle = ptlrpc_eq;
65 LNetInvalidateMDHandle(&md.bulk_handle);
68 md.bulk_handle = *bulk_cookie;
69 md.options |= LNET_MD_BULK_HANDLE;
72 if (unlikely(ack == LNET_ACK_REQ &&
73 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
74 /* don't ask for the ack to simulate failing client */
78 rc = LNetMDBind (md, LNET_UNLINK, mdh);
79 if (unlikely(rc != 0)) {
80 CERROR ("LNetMDBind failed: %d\n", rc);
81 LASSERT (rc == -ENOMEM);
85 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
86 len, portal, xid, offset);
88 percpu_ref_get(&ptlrpc_pending);
90 rc = LNetPut(self, *mdh, ack,
91 peer_id, portal, xid, offset, 0);
92 if (unlikely(rc != 0)) {
94 /* We're going to get an UNLINK event when I unlink below,
95 * which will complete just like any other failed send, so
96 * I fall through and return success here! */
97 CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
98 libcfs_id2str(peer_id), portal, xid, rc);
99 rc2 = LNetMDUnlink(*mdh);
100 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
106 static void mdunlink_iterate_helper(struct lnet_handle_md *bd_mds, int count)
110 for (i = 0; i < count; i++)
111 LNetMDUnlink(bd_mds[i]);
114 #ifdef HAVE_SERVER_SUPPORT
116 * Prepare bulk descriptor for specified incoming request \a req that
117 * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
118 * the bulk to be sent. Used on server-side after request was already
120 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
123 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
124 unsigned nfrags, unsigned max_brw,
127 const struct ptlrpc_bulk_frag_ops
130 struct obd_export *exp = req->rq_export;
131 struct ptlrpc_bulk_desc *desc;
134 LASSERT(ptlrpc_is_bulk_op_active(type));
136 desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
140 desc->bd_export = class_export_get(exp);
143 desc->bd_cbid.cbid_fn = server_bulk_callback;
144 desc->bd_cbid.cbid_arg = desc;
146 /* NB we don't assign rq_bulk here; server-side requests are
147 * re-used, and the handler frees the bulk desc explicitly. */
151 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
154 * Starts bulk transfer for descriptor \a desc on the server.
155 * Returns 0 on success or error code.
157 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
159 struct obd_export *exp = desc->bd_export;
161 struct lnet_process_id peer_id;
169 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
172 /* NB no locking required until desc is on the network */
173 LASSERT(desc->bd_md_count == 0);
174 LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
176 LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
177 LASSERT(desc->bd_cbid.cbid_arg == desc);
180 * Multi-Rail: get the preferred self and peer NIDs from the
181 * request, so they are based on the route taken by the
184 self_nid = desc->bd_req->rq_self;
185 peer_id = desc->bd_req->rq_source;
187 /* NB total length may be 0 for a read past EOF, so we send 0
188 * length bulks, since the client expects bulk events.
190 * The client may not need all of the bulk mbits for the RPC. The RPC
191 * used the mbits of the highest bulk mbits needed, and the server masks
192 * off high bits to get bulk count for this RPC. LU-1431 */
193 mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
194 total_md = desc->bd_req->rq_mbits - mbits + 1;
196 desc->bd_md_count = total_md;
197 desc->bd_failure = 0;
199 md.user_ptr = &desc->bd_cbid;
200 md.eq_handle = ptlrpc_eq;
201 md.threshold = 2; /* SENT and ACK/REPLY */
203 for (posted_md = 0; posted_md < total_md; mbits++) {
204 md.options = PTLRPC_MD_OPTIONS;
206 /* NB it's assumed that source and sink buffer frags are
207 * page-aligned. Otherwise we'd have to send client bulk
208 * sizes over and split server buffer accordingly */
209 ptlrpc_fill_bulk_md(&md, desc, posted_md);
210 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
212 CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
213 exp->exp_obd->obd_name, posted_md, rc);
214 LASSERT(rc == -ENOMEM);
215 if (posted_md == 0) {
216 desc->bd_md_count = 0;
221 percpu_ref_get(&ptlrpc_pending);
223 /* sanity.sh 224c: lets skip last md */
224 if (posted_md == desc->bd_md_max_brw - 1)
225 OBD_FAIL_CHECK_RESET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
226 CFS_FAIL_PTLRPC_OST_BULK_CB2);
228 /* Network is about to get at the memory */
229 if (ptlrpc_is_bulk_put_source(desc->bd_type))
230 rc = LNetPut(self_nid, desc->bd_mds[posted_md],
231 LNET_ACK_REQ, peer_id,
232 desc->bd_portal, mbits, 0, 0);
234 rc = LNetGet(self_nid, desc->bd_mds[posted_md],
235 peer_id, desc->bd_portal, mbits, 0, false);
239 CERROR("%s: failed bulk transfer with %s:%u x%llu: "
240 "rc = %d\n", exp->exp_obd->obd_name,
241 libcfs_id2str(peer_id), desc->bd_portal,
248 /* Can't send, so we unlink the MD bound above. The UNLINK
249 * event this creates will signal completion with failure,
250 * so we return SUCCESS here! */
251 spin_lock(&desc->bd_lock);
252 desc->bd_md_count -= total_md - posted_md;
253 spin_unlock(&desc->bd_lock);
254 LASSERT(desc->bd_md_count >= 0);
256 mdunlink_iterate_helper(desc->bd_mds, posted_md);
260 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
261 "id %s mbits %#llx-%#llx\n", desc->bd_iov_count,
262 desc->bd_nob, desc->bd_portal, libcfs_id2str(peer_id),
263 mbits - posted_md, mbits - 1);
269 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
270 * serialises with completion callback)
272 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
274 LASSERT(!in_interrupt()); /* might sleep */
276 if (!ptlrpc_server_bulk_active(desc)) /* completed or */
277 return; /* never started */
279 /* We used to poison the pages with 0xab here because we did not want to
280 * send any meaningful data over the wire for evicted clients (bug 9297)
281 * However, this is no longer safe now that we use the page cache on the
284 /* The unlink ensures the callback happens ASAP and is the last
285 * one. If it fails, it must be because completion just happened,
286 * but we must still wait_event_idle_timeout() in this case, to give
287 * us a chance to run server_bulk_callback()
289 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
292 /* Network access will complete in finite time but the HUGE
293 * timeout lets us CWARN for visibility of sluggish NALs */
294 int seconds = LONG_UNLINK;
296 while (seconds > 0 &&
297 wait_event_idle_timeout(desc->bd_waitq,
298 !ptlrpc_server_bulk_active(desc),
299 cfs_time_seconds(1)) == 0)
304 CWARN("Unexpectedly long timeout: desc %p\n", desc);
307 #endif /* HAVE_SERVER_SUPPORT */
310 * Register bulk at the sender for later transfer.
311 * Returns 0 on success or error code.
313 int ptlrpc_register_bulk(struct ptlrpc_request *req)
315 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
316 struct lnet_process_id peer;
325 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
328 /* NB no locking required until desc is on the network */
329 LASSERT(desc->bd_nob > 0);
330 LASSERT(desc->bd_md_count == 0);
331 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
332 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
333 LASSERT(desc->bd_req != NULL);
334 LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
336 /* cleanup the state of the bulk for it will be reused */
337 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
338 desc->bd_nob_transferred = 0;
339 else if (desc->bd_nob_transferred != 0)
340 /* If the network failed after an RPC was sent, this condition
341 * could happen. Rather than assert (was here before), return
345 desc->bd_failure = 0;
347 peer = desc->bd_import->imp_connection->c_peer;
349 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
350 LASSERT(desc->bd_cbid.cbid_arg == desc);
352 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
353 /* rq_mbits is matchbits of the final bulk */
354 mbits = req->rq_mbits - total_md + 1;
356 LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
357 "first mbits = x%llu, last mbits = x%llu\n",
358 mbits, req->rq_mbits);
359 LASSERTF(!(desc->bd_registered &&
360 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
361 mbits != desc->bd_last_mbits,
362 "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
363 desc->bd_registered, mbits, desc->bd_last_mbits);
365 desc->bd_registered = 1;
366 desc->bd_last_mbits = mbits;
367 desc->bd_md_count = total_md;
368 md.user_ptr = &desc->bd_cbid;
369 md.eq_handle = ptlrpc_eq;
370 md.threshold = 1; /* PUT or GET */
372 for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
373 md.options = PTLRPC_MD_OPTIONS |
374 (ptlrpc_is_bulk_op_get(desc->bd_type) ?
375 LNET_MD_OP_GET : LNET_MD_OP_PUT);
376 ptlrpc_fill_bulk_md(&md, desc, posted_md);
378 if (posted_md > 0 && posted_md + 1 == total_md &&
379 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_ATTACH)) {
382 me = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
383 LNET_UNLINK, LNET_INS_AFTER);
384 rc = PTR_ERR_OR_ZERO(me);
387 CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
388 desc->bd_import->imp_obd->obd_name, mbits,
392 percpu_ref_get(&ptlrpc_pending);
394 /* About to let the network at it... */
395 rc = LNetMDAttach(me, md, LNET_UNLINK,
396 &desc->bd_mds[posted_md]);
398 CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
399 desc->bd_import->imp_obd->obd_name, mbits,
407 LASSERT(rc == -ENOMEM);
408 spin_lock(&desc->bd_lock);
409 desc->bd_md_count -= total_md - posted_md;
410 spin_unlock(&desc->bd_lock);
411 LASSERT(desc->bd_md_count >= 0);
412 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
413 req->rq_status = -ENOMEM;
414 desc->bd_registered = 0;
418 spin_lock(&desc->bd_lock);
419 /* Holler if peer manages to touch buffers before he knows the mbits */
420 if (desc->bd_md_count != total_md)
421 CWARN("%s: Peer %s touched %d buffers while I registered\n",
422 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
423 total_md - desc->bd_md_count);
424 spin_unlock(&desc->bd_lock);
426 CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
427 "mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count,
428 ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
429 desc->bd_iov_count, desc->bd_nob,
430 desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
436 * Disconnect a bulk desc from the network. Idempotent. Not
437 * thread-safe (i.e. only interlocks with completion callback).
438 * Returns 1 on success or 0 if network unregistration failed for whatever
441 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
443 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
446 LASSERT(!in_interrupt()); /* might sleep */
448 /* Let's setup deadline for reply unlink. */
449 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
450 async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
451 req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
453 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
454 RETURN(1); /* never registered */
456 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
458 /* the unlink ensures the callback happens ASAP and is the last
459 * one. If it fails, it must be because completion just happened,
460 * but we must still wait_event_idle_timeout() in this case to give
461 * us a chance to run client_bulk_callback()
463 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
465 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
466 RETURN(1); /* never registered */
468 /* Move to "Unregistering" phase as bulk was not unlinked yet. */
469 ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
471 /* Do not wait for unlink to finish. */
476 /* The wq argument is ignored by user-space wait_event macros */
477 wait_queue_head_t *wq = (req->rq_set != NULL) ?
478 &req->rq_set->set_waitq :
479 &req->rq_reply_waitq;
481 * Network access will complete in finite time but the HUGE
482 * timeout lets us CWARN for visibility of sluggish NALs.
484 int seconds = LONG_UNLINK;
486 while (seconds > 0 &&
487 wait_event_idle_timeout(*wq,
488 !ptlrpc_client_bulk_active(req),
489 cfs_time_seconds(1)) == 0)
492 ptlrpc_rqphase_move(req, req->rq_next_phase);
496 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
502 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
504 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
505 struct ptlrpc_service *svc = svcpt->scp_service;
506 int service_time = max_t(int, ktime_get_real_seconds() -
507 req->rq_arrival_time.tv_sec, 1);
509 if (!(flags & PTLRPC_REPLY_EARLY) &&
510 (req->rq_type != PTL_RPC_MSG_ERR) &&
511 (req->rq_reqmsg != NULL) &&
512 !(lustre_msg_get_flags(req->rq_reqmsg) &
513 (MSG_RESENT | MSG_REPLAY |
514 MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
515 /* early replies, errors and recovery requests don't count
516 * toward our service time estimate */
517 int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
520 DEBUG_REQ(D_ADAPTTO, req,
521 "svc %s changed estimate from %d to %d",
522 svc->srv_name, oldse,
523 at_get(&svcpt->scp_at_estimate));
526 /* Report actual service time for client latency calc */
527 lustre_msg_set_service_time(req->rq_repmsg, service_time);
528 /* Report service time estimate for future client reqs, but report 0
529 * (to be ignored by client) if it's an error reply during recovery.
532 if (req->rq_type == PTL_RPC_MSG_ERR &&
533 (req->rq_export == NULL ||
534 req->rq_export->exp_obd->obd_recovering)) {
535 lustre_msg_set_timeout(req->rq_repmsg, 0);
539 if (req->rq_export && req->rq_reqmsg != NULL &&
540 (flags & PTLRPC_REPLY_EARLY) &&
541 lustre_msg_get_flags(req->rq_reqmsg) &
542 (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
543 struct obd_device *exp_obd = req->rq_export->exp_obd;
545 timeout = ktime_get_real_seconds() -
546 req->rq_arrival_time.tv_sec +
547 min_t(time64_t, at_extra,
548 exp_obd->obd_recovery_timeout / 4);
550 timeout = at_get(&svcpt->scp_at_estimate);
552 lustre_msg_set_timeout(req->rq_repmsg, timeout);
555 if (req->rq_reqmsg &&
556 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
557 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
558 "req_flags=%#x magic=%x/%x len=%d\n",
559 flags, lustre_msg_get_flags(req->rq_reqmsg),
560 lustre_msg_get_magic(req->rq_reqmsg),
561 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
566 * Send request reply from request \a req reply buffer.
567 * \a flags defines reply types
568 * Returns 0 on success or error code
570 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
572 struct ptlrpc_reply_state *rs = req->rq_reply_state;
573 struct ptlrpc_connection *conn;
576 /* We must already have a reply buffer (only ptlrpc_error() may be
577 * called without one). The reply generated by sptlrpc layer (e.g.
578 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
579 * have a request buffer which is either the actual (swabbed) incoming
580 * request, or a saved copy if this is a req saved in
581 * target_queue_final_reply().
583 LASSERT (req->rq_no_reply == 0);
584 LASSERT (req->rq_reqbuf != NULL);
585 LASSERT (rs != NULL);
586 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
587 LASSERT (req->rq_repmsg != NULL);
588 LASSERT (req->rq_repmsg == rs->rs_msg);
589 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
590 LASSERT (rs->rs_cb_id.cbid_arg == rs);
592 /* There may be no rq_export during failover */
594 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
595 req->rq_export->exp_obd->obd_fail)) {
596 /* Failed obd's only send ENODEV */
597 req->rq_type = PTL_RPC_MSG_ERR;
598 req->rq_status = -ENODEV;
599 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
600 req->rq_export->exp_obd->obd_minor);
603 if (req->rq_type != PTL_RPC_MSG_ERR)
604 req->rq_type = PTL_RPC_MSG_REPLY;
606 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
607 lustre_msg_set_status(req->rq_repmsg,
608 ptlrpc_status_hton(req->rq_status));
609 lustre_msg_set_opc(req->rq_repmsg,
610 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
612 target_pack_pool_reply(req);
614 ptlrpc_at_set_reply(req, flags);
616 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
617 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
619 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
621 if (unlikely(conn == NULL)) {
622 CERROR("not replying on NULL connection\n"); /* bug 9635 */
625 ptlrpc_rs_addref(rs); /* +1 ref for the network */
627 rc = sptlrpc_svc_wrap_reply(req);
631 req->rq_sent = ktime_get_real_seconds();
633 rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
634 (rs->rs_difficult && !rs->rs_no_ack) ?
635 LNET_ACK_REQ : LNET_NOACK_REQ,
636 &rs->rs_cb_id, req->rq_self, req->rq_source,
637 ptlrpc_req2svc(req)->srv_rep_portal,
638 req->rq_xid, req->rq_reply_off, NULL);
640 if (unlikely(rc != 0))
641 ptlrpc_req_drop_rs(req);
642 ptlrpc_connection_put(conn);
646 int ptlrpc_reply (struct ptlrpc_request *req)
648 if (req->rq_no_reply)
651 return (ptlrpc_send_reply(req, 0));
655 * For request \a req send an error reply back. Create empty
656 * reply buffers if necessary.
658 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
663 if (req->rq_no_reply)
666 if (!req->rq_repmsg) {
667 rc = lustre_pack_reply(req, 1, NULL, NULL);
672 if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
673 req->rq_status != -EPERM && req->rq_status != -ENOENT &&
674 req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
675 req->rq_type = PTL_RPC_MSG_ERR;
677 rc = ptlrpc_send_reply(req, may_be_difficult);
681 int ptlrpc_error(struct ptlrpc_request *req)
683 return ptlrpc_send_error(req, 0);
687 * Send request \a request.
688 * if \a noreply is set, don't expect any reply back and don't set up
690 * Returns 0 on success or error code.
692 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
696 struct lnet_handle_md bulk_cookie;
697 struct ptlrpc_connection *connection;
698 struct lnet_me *reply_me = NULL;
699 struct lnet_md reply_md;
700 struct obd_import *imp = request->rq_import;
701 struct obd_device *obd = imp->imp_obd;
704 LNetInvalidateMDHandle(&bulk_cookie);
706 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
709 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
710 LASSERT(request->rq_wait_ctx == 0);
712 /* If this is a re-transmit, we're required to have disengaged
713 * cleanly from the previous attempt */
714 LASSERT(!request->rq_receiving_reply);
715 LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
716 (imp->imp_state == LUSTRE_IMP_FULL)));
718 if (unlikely(obd != NULL && obd->obd_fail)) {
719 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
721 /* this prevents us from waiting in ptlrpc_queue_wait */
722 spin_lock(&request->rq_lock);
724 spin_unlock(&request->rq_lock);
725 request->rq_status = -ENODEV;
729 connection = imp->imp_connection;
731 lustre_msg_set_handle(request->rq_reqmsg,
732 &imp->imp_remote_handle);
733 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
734 lustre_msg_set_conn_cnt(request->rq_reqmsg,
736 lustre_msghdr_set_flags(request->rq_reqmsg,
737 imp->imp_msghdr_flags);
739 /* If it's the first time to resend the request for EINPROGRESS,
740 * we need to allocate a new XID (see after_reply()), it's different
741 * from the resend for reply timeout. */
742 if (request->rq_nr_resend != 0 &&
743 list_empty(&request->rq_unreplied_list)) {
745 /* resend for EINPROGRESS, allocate new xid to avoid reply
747 spin_lock(&imp->imp_lock);
748 ptlrpc_assign_next_xid_nolock(request);
749 min_xid = ptlrpc_known_replied_xid(imp);
750 spin_unlock(&imp->imp_lock);
752 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
753 DEBUG_REQ(D_RPCTRACE, request,
754 "Allocating new XID for resend on EINPROGRESS");
757 if (request->rq_bulk != NULL) {
758 ptlrpc_set_bulk_mbits(request);
759 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
762 if (list_empty(&request->rq_unreplied_list) ||
763 request->rq_xid <= imp->imp_known_replied_xid) {
764 DEBUG_REQ(D_ERROR, request,
765 "xid=%llu, replied=%llu, list_empty=%d",
766 request->rq_xid, imp->imp_known_replied_xid,
767 list_empty(&request->rq_unreplied_list));
771 /** For enabled AT all request should have AT_SUPPORT in the
772 * FULL import state when OBD_CONNECT_AT is set */
773 LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
774 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
775 !(imp->imp_connect_data.ocd_connect_flags &
778 if (request->rq_resend) {
779 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
780 if (request->rq_resend_cb != NULL)
781 request->rq_resend_cb(request, &request->rq_async_args);
783 if (request->rq_memalloc)
784 mpflag = cfs_memory_pressure_get_and_set();
786 rc = sptlrpc_cli_wrap_request(request);
790 /* bulk register should be done after wrap_request() */
791 if (request->rq_bulk != NULL) {
792 rc = ptlrpc_register_bulk (request);
794 GOTO(cleanup_bulk, rc);
796 * All the mds in the request will have the same cpt
797 * encoded in the cookie. So we can just get the first
800 bulk_cookie = request->rq_bulk->bd_mds[0];
804 LASSERT (request->rq_replen != 0);
805 if (request->rq_repbuf == NULL) {
806 LASSERT(request->rq_repdata == NULL);
807 LASSERT(request->rq_repmsg == NULL);
808 rc = sptlrpc_cli_alloc_repbuf(request,
811 /* this prevents us from looping in
812 * ptlrpc_queue_wait */
813 spin_lock(&request->rq_lock);
815 spin_unlock(&request->rq_lock);
816 request->rq_status = rc;
817 GOTO(cleanup_bulk, rc);
820 request->rq_repdata = NULL;
821 request->rq_repmsg = NULL;
824 reply_me = LNetMEAttach(request->rq_reply_portal,
825 connection->c_peer, request->rq_xid, 0,
826 LNET_UNLINK, LNET_INS_AFTER);
827 if (IS_ERR(reply_me)) {
828 rc = PTR_ERR(reply_me);
829 CERROR("LNetMEAttach failed: %d\n", rc);
830 LASSERT(rc == -ENOMEM);
831 GOTO(cleanup_bulk, rc = -ENOMEM);
835 spin_lock(&request->rq_lock);
836 /* We are responsible for unlinking the reply buffer */
837 request->rq_reply_unlinked = noreply;
838 request->rq_receiving_reply = !noreply;
839 /* Clear any flags that may be present from previous sends. */
840 request->rq_req_unlinked = 0;
841 request->rq_replied = 0;
843 request->rq_timedout = 0;
844 request->rq_net_err = 0;
845 request->rq_resend = 0;
846 request->rq_restart = 0;
847 request->rq_reply_truncated = 0;
848 spin_unlock(&request->rq_lock);
851 reply_md.start = request->rq_repbuf;
852 reply_md.length = request->rq_repbuf_len;
853 /* Allow multiple early replies */
854 reply_md.threshold = LNET_MD_THRESH_INF;
855 /* Manage remote for early replies */
856 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
857 LNET_MD_MANAGE_REMOTE |
858 LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
859 reply_md.user_ptr = &request->rq_reply_cbid;
860 reply_md.eq_handle = ptlrpc_eq;
862 /* We must see the unlink callback to set rq_reply_unlinked,
863 * so we can't auto-unlink */
864 rc = LNetMDAttach(reply_me, reply_md, LNET_RETAIN,
865 &request->rq_reply_md_h);
867 CERROR("LNetMDAttach failed: %d\n", rc);
868 LASSERT(rc == -ENOMEM);
869 spin_lock(&request->rq_lock);
870 /* ...but the MD attach didn't succeed... */
871 request->rq_receiving_reply = 0;
872 spin_unlock(&request->rq_lock);
873 GOTO(cleanup_me, rc = -ENOMEM);
875 percpu_ref_get(&ptlrpc_pending);
878 "Setup reply buffer: %u bytes, xid %llu, portal %u\n",
879 request->rq_repbuf_len, request->rq_xid,
880 request->rq_reply_portal);
883 /* add references on request for request_out_callback */
884 ptlrpc_request_addref(request);
885 if (obd != NULL && obd->obd_svc_stats != NULL)
886 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
887 atomic_read(&imp->imp_inflight));
889 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
891 request->rq_sent_ns = ktime_get_real();
892 request->rq_sent = ktime_get_real_seconds();
893 /* We give the server rq_timeout secs to process the req, and
894 add the network latency for our local timeout. */
895 request->rq_deadline = request->rq_sent + request->rq_timeout +
896 ptlrpc_at_get_net_latency(request);
898 ptlrpc_pinger_sending_on_import(imp);
900 DEBUG_REQ(D_INFO, request, "send flags=%x",
901 lustre_msg_get_flags(request->rq_reqmsg));
902 rc = ptl_send_buf(&request->rq_req_md_h,
903 request->rq_reqbuf, request->rq_reqdata_len,
904 LNET_NOACK_REQ, &request->rq_req_cbid,
905 LNET_NID_ANY, connection->c_peer,
906 request->rq_request_portal,
907 request->rq_xid, 0, &bulk_cookie);
911 request->rq_req_unlinked = 1;
912 ptlrpc_req_finished(request);
917 /* MEUnlink is safe; the PUT didn't even get off the ground, and
918 * nobody apart from the PUT's target has the right nid+XID to
919 * access the reply buffer.
921 LNetMEUnlink(reply_me);
922 /* UNLINKED callback called synchronously */
923 LASSERT(!request->rq_receiving_reply);
926 /* We do sync unlink here as there was no real transfer here so
927 * the chance to have long unlink to sluggish net is smaller here. */
928 ptlrpc_unregister_bulk(request, 0);
929 if (request->rq_bulk != NULL)
930 request->rq_bulk->bd_registered = 0;
933 /* set rq_sent so that this request is treated
934 * as a delayed send in the upper layers */
935 request->rq_sent = ktime_get_real_seconds();
938 if (request->rq_memalloc)
939 cfs_memory_pressure_restore(mpflag);
943 EXPORT_SYMBOL(ptl_send_rpc);
946 * Register request buffer descriptor for request receiving.
948 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
950 struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
951 static struct lnet_process_id match_id = {
959 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
960 service->srv_req_portal);
962 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
965 /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
966 * which means buffer can only be attached on local CPT, and LND
967 * threads can find it by grabbing a local lock */
968 me = LNetMEAttach(service->srv_req_portal,
969 match_id, 0, ~0, LNET_UNLINK,
970 rqbd->rqbd_svcpt->scp_cpt >= 0 ?
971 LNET_INS_LOCAL : LNET_INS_AFTER);
973 CERROR("LNetMEAttach failed: %ld\n", PTR_ERR(me));
977 LASSERT(rqbd->rqbd_refcount == 0);
978 rqbd->rqbd_refcount = 1;
980 md.start = rqbd->rqbd_buffer;
981 md.length = service->srv_buf_size;
982 md.max_size = service->srv_max_req_size;
983 md.threshold = LNET_MD_THRESH_INF;
984 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
985 md.user_ptr = &rqbd->rqbd_cbid;
986 md.eq_handle = ptlrpc_eq;
988 rc = LNetMDAttach(me, md, LNET_UNLINK, &rqbd->rqbd_md_h);
990 percpu_ref_get(&ptlrpc_pending);
994 CERROR("ptlrpc: LNetMDAttach failed: rc = %d\n", rc);
995 LASSERT(rc == -ENOMEM);
998 rqbd->rqbd_refcount = 0;