4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_RPC
34 #include <obd_support.h>
35 #include <lustre_net.h>
36 #include <lustre_lib.h>
38 #include <obd_class.h>
39 #include "ptlrpc_internal.h"
42 * Helper function. Sends \a len bytes from \a base at offset \a offset
43 * over \a conn connection to portal \a portal.
44 * Returns 0 on success or error code.
46 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
47 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
48 struct ptlrpc_connection *conn, int portal, __u64 xid,
55 LASSERT (portal != 0);
56 LASSERT (conn != NULL);
57 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
60 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
61 md.options = PTLRPC_MD_OPTIONS;
63 md.eq_handle = ptlrpc_eq_h;
65 if (unlikely(ack == LNET_ACK_REQ &&
66 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
67 /* don't ask for the ack to simulate failing client */
71 rc = LNetMDBind (md, LNET_UNLINK, mdh);
72 if (unlikely(rc != 0)) {
73 CERROR ("LNetMDBind failed: %d\n", rc);
74 LASSERT (rc == -ENOMEM);
78 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %lld, offset %u\n",
79 len, portal, xid, offset);
81 rc = LNetPut (conn->c_self, *mdh, ack,
82 conn->c_peer, portal, xid, offset, 0);
83 if (unlikely(rc != 0)) {
85 /* We're going to get an UNLINK event when I unlink below,
86 * which will complete just like any other failed send, so
87 * I fall through and return success here! */
88 CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
89 libcfs_id2str(conn->c_peer), portal, xid, rc);
90 rc2 = LNetMDUnlink(*mdh);
91 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
97 static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
101 for (i = 0; i < count; i++)
102 LNetMDUnlink(bd_mds[i]);
105 #ifdef HAVE_SERVER_SUPPORT
107 * Prepare bulk descriptor for specified incoming request \a req that
108 * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
109 * the bulk to be sent. Used on server-side after request was already
111 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
114 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
115 unsigned nfrags, unsigned max_brw,
118 const struct ptlrpc_bulk_frag_ops
121 struct obd_export *exp = req->rq_export;
122 struct ptlrpc_bulk_desc *desc;
125 LASSERT(ptlrpc_is_bulk_op_active(type));
127 desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
131 desc->bd_export = class_export_get(exp);
134 desc->bd_cbid.cbid_fn = server_bulk_callback;
135 desc->bd_cbid.cbid_arg = desc;
137 /* NB we don't assign rq_bulk here; server-side requests are
138 * re-used, and the handler frees the bulk desc explicitly. */
142 EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
145 * Starts bulk transfer for descriptor \a desc on the server.
146 * Returns 0 on success or error code.
148 int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc)
150 struct obd_export *exp = desc->bd_export;
151 struct ptlrpc_connection *conn = exp->exp_connection;
159 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_PUT_NET))
162 /* NB no locking required until desc is on the network */
163 LASSERT(desc->bd_md_count == 0);
164 LASSERT(ptlrpc_is_bulk_op_active(desc->bd_type));
166 LASSERT(desc->bd_cbid.cbid_fn == server_bulk_callback);
167 LASSERT(desc->bd_cbid.cbid_arg == desc);
169 /* NB total length may be 0 for a read past EOF, so we send 0
170 * length bulks, since the client expects bulk events.
172 * The client may not need all of the bulk mbits for the RPC. The RPC
173 * used the mbits of the highest bulk mbits needed, and the server masks
174 * off high bits to get bulk count for this RPC. LU-1431 */
175 mbits = desc->bd_req->rq_mbits & ~((__u64)desc->bd_md_max_brw - 1);
176 total_md = desc->bd_req->rq_mbits - mbits + 1;
178 desc->bd_md_count = total_md;
179 desc->bd_failure = 0;
181 md.user_ptr = &desc->bd_cbid;
182 md.eq_handle = ptlrpc_eq_h;
183 md.threshold = 2; /* SENT and ACK/REPLY */
185 for (posted_md = 0; posted_md < total_md; mbits++) {
186 md.options = PTLRPC_MD_OPTIONS;
188 /* NB it's assumed that source and sink buffer frags are
189 * page-aligned. Otherwise we'd have to send client bulk
190 * sizes over and split server buffer accordingly */
191 ptlrpc_fill_bulk_md(&md, desc, posted_md);
192 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_mds[posted_md]);
194 CERROR("%s: LNetMDBind failed for MD %u: rc = %d\n",
195 exp->exp_obd->obd_name, posted_md, rc);
196 LASSERT(rc == -ENOMEM);
197 if (posted_md == 0) {
198 desc->bd_md_count = 0;
204 /* LU-6441: last md is not sent and desc->bd_md_count == 1 */
205 if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB3,
207 total_md > 1 && posted_md == total_md - 1) {
212 /* Network is about to get at the memory */
213 if (ptlrpc_is_bulk_put_source(desc->bd_type))
214 rc = LNetPut(conn->c_self, desc->bd_mds[posted_md],
215 LNET_ACK_REQ, conn->c_peer,
216 desc->bd_portal, mbits, 0, 0);
218 rc = LNetGet(conn->c_self, desc->bd_mds[posted_md],
219 conn->c_peer, desc->bd_portal, mbits, 0);
223 CERROR("%s: failed bulk transfer with %s:%u x%llu: "
224 "rc = %d\n", exp->exp_obd->obd_name,
225 libcfs_id2str(conn->c_peer), desc->bd_portal,
232 /* Can't send, so we unlink the MD bound above. The UNLINK
233 * event this creates will signal completion with failure,
234 * so we return SUCCESS here! */
235 spin_lock(&desc->bd_lock);
236 desc->bd_md_count -= total_md - posted_md;
237 spin_unlock(&desc->bd_lock);
238 LASSERT(desc->bd_md_count >= 0);
240 mdunlink_iterate_helper(desc->bd_mds, posted_md);
244 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
245 "id %s mbits %#llx-%#llx\n", desc->bd_iov_count,
246 desc->bd_nob, desc->bd_portal, libcfs_id2str(conn->c_peer),
247 mbits - posted_md, mbits - 1);
253 * Server side bulk abort. Idempotent. Not thread-safe (i.e. only
254 * serialises with completion callback)
256 void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
258 struct l_wait_info lwi;
261 LASSERT(!in_interrupt()); /* might sleep */
263 if (!ptlrpc_server_bulk_active(desc)) /* completed or */
264 return; /* never started */
266 /* We used to poison the pages with 0xab here because we did not want to
267 * send any meaningful data over the wire for evicted clients (bug 9297)
268 * However, this is no longer safe now that we use the page cache on the
271 /* The unlink ensures the callback happens ASAP and is the last
272 * one. If it fails, it must be because completion just happened,
273 * but we must still l_wait_event() in this case, to give liblustre
274 * a chance to run server_bulk_callback()*/
275 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
278 /* Network access will complete in finite time but the HUGE
279 * timeout lets us CWARN for visibility of sluggish NALs */
280 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
281 cfs_time_seconds(1), NULL, NULL);
282 rc = l_wait_event(desc->bd_waitq,
283 !ptlrpc_server_bulk_active(desc), &lwi);
287 LASSERT(rc == -ETIMEDOUT);
288 CWARN("Unexpectedly long timeout: desc %p\n", desc);
291 #endif /* HAVE_SERVER_SUPPORT */
294 * Register bulk at the sender for later transfer.
295 * Returns 0 on success or error code.
297 int ptlrpc_register_bulk(struct ptlrpc_request *req)
299 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
300 lnet_process_id_t peer;
306 lnet_handle_me_t me_h;
310 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
313 /* NB no locking required until desc is on the network */
314 LASSERT(desc->bd_nob > 0);
315 LASSERT(desc->bd_md_count == 0);
316 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
317 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
318 LASSERT(desc->bd_req != NULL);
319 LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
321 /* cleanup the state of the bulk for it will be reused */
322 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
323 desc->bd_nob_transferred = 0;
325 LASSERT(desc->bd_nob_transferred == 0);
327 desc->bd_failure = 0;
329 peer = desc->bd_import->imp_connection->c_peer;
331 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
332 LASSERT(desc->bd_cbid.cbid_arg == desc);
334 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
335 /* rq_mbits is matchbits of the final bulk */
336 mbits = req->rq_mbits - total_md + 1;
338 LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
339 "first mbits = x%llu, last mbits = x%llu\n",
340 mbits, req->rq_mbits);
341 LASSERTF(!(desc->bd_registered &&
342 req->rq_send_state != LUSTRE_IMP_REPLAY) ||
343 mbits != desc->bd_last_mbits,
344 "registered: %d rq_mbits: %llu bd_last_mbits: %llu\n",
345 desc->bd_registered, mbits, desc->bd_last_mbits);
347 desc->bd_registered = 1;
348 desc->bd_last_mbits = mbits;
349 desc->bd_md_count = total_md;
350 md.user_ptr = &desc->bd_cbid;
351 md.eq_handle = ptlrpc_eq_h;
352 md.threshold = 1; /* PUT or GET */
354 for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
355 md.options = PTLRPC_MD_OPTIONS |
356 (ptlrpc_is_bulk_op_get(desc->bd_type) ?
357 LNET_MD_OP_GET : LNET_MD_OP_PUT);
358 ptlrpc_fill_bulk_md(&md, desc, posted_md);
360 rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
361 LNET_UNLINK, LNET_INS_AFTER, &me_h);
363 CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
364 desc->bd_import->imp_obd->obd_name, mbits,
369 /* About to let the network at it... */
370 rc = LNetMDAttach(me_h, md, LNET_UNLINK,
371 &desc->bd_mds[posted_md]);
373 CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
374 desc->bd_import->imp_obd->obd_name, mbits,
376 rc2 = LNetMEUnlink(me_h);
383 LASSERT(rc == -ENOMEM);
384 spin_lock(&desc->bd_lock);
385 desc->bd_md_count -= total_md - posted_md;
386 spin_unlock(&desc->bd_lock);
387 LASSERT(desc->bd_md_count >= 0);
388 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
389 req->rq_status = -ENOMEM;
393 spin_lock(&desc->bd_lock);
394 /* Holler if peer manages to touch buffers before he knows the mbits */
395 if (desc->bd_md_count != total_md)
396 CWARN("%s: Peer %s touched %d buffers while I registered\n",
397 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
398 total_md - desc->bd_md_count);
399 spin_unlock(&desc->bd_lock);
401 CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
402 "mbits x%#llx-%#llx, portal %u\n", desc->bd_md_count,
403 ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
404 desc->bd_iov_count, desc->bd_nob,
405 desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
411 * Disconnect a bulk desc from the network. Idempotent. Not
412 * thread-safe (i.e. only interlocks with completion callback).
413 * Returns 1 on success or 0 if network unregistration failed for whatever
416 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
418 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
419 struct l_wait_info lwi;
423 LASSERT(!in_interrupt()); /* might sleep */
425 /* Let's setup deadline for reply unlink. */
426 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
427 async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
428 req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
430 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
431 RETURN(1); /* never registered */
433 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
435 /* the unlink ensures the callback happens ASAP and is the last
436 * one. If it fails, it must be because completion just happened,
437 * but we must still l_wait_event() in this case to give liblustre
438 * a chance to run client_bulk_callback() */
439 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
441 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
442 RETURN(1); /* never registered */
444 /* Move to "Unregistering" phase as bulk was not unlinked yet. */
445 ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
447 /* Do not wait for unlink to finish. */
452 /* The wq argument is ignored by user-space wait_event macros */
453 wait_queue_head_t *wq = (req->rq_set != NULL) ?
454 &req->rq_set->set_waitq :
455 &req->rq_reply_waitq;
456 /* Network access will complete in finite time but the HUGE
457 * timeout lets us CWARN for visibility of sluggish NALs */
458 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
459 cfs_time_seconds(1), NULL, NULL);
460 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
462 ptlrpc_rqphase_move(req, req->rq_next_phase);
466 LASSERT(rc == -ETIMEDOUT);
467 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
473 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
475 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
476 struct ptlrpc_service *svc = svcpt->scp_service;
477 int service_time = max_t(int, cfs_time_current_sec() -
478 req->rq_arrival_time.tv_sec, 1);
480 if (!(flags & PTLRPC_REPLY_EARLY) &&
481 (req->rq_type != PTL_RPC_MSG_ERR) &&
482 (req->rq_reqmsg != NULL) &&
483 !(lustre_msg_get_flags(req->rq_reqmsg) &
484 (MSG_RESENT | MSG_REPLAY |
485 MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
486 /* early replies, errors and recovery requests don't count
487 * toward our service time estimate */
488 int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
491 DEBUG_REQ(D_ADAPTTO, req,
492 "svc %s changed estimate from %d to %d",
493 svc->srv_name, oldse,
494 at_get(&svcpt->scp_at_estimate));
497 /* Report actual service time for client latency calc */
498 lustre_msg_set_service_time(req->rq_repmsg, service_time);
499 /* Report service time estimate for future client reqs, but report 0
500 * (to be ignored by client) if it's an error reply during recovery.
502 if (req->rq_type == PTL_RPC_MSG_ERR &&
503 (req->rq_export == NULL ||
504 req->rq_export->exp_obd->obd_recovering)) {
505 lustre_msg_set_timeout(req->rq_repmsg, 0);
509 if (req->rq_export && req->rq_reqmsg != NULL &&
510 (flags & PTLRPC_REPLY_EARLY) &&
511 lustre_msg_get_flags(req->rq_reqmsg) &
512 (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))
513 timeout = cfs_time_current_sec() -
514 req->rq_arrival_time.tv_sec +
516 req->rq_export->exp_obd->
517 obd_recovery_timeout / 4);
519 timeout = at_get(&svcpt->scp_at_estimate);
520 lustre_msg_set_timeout(req->rq_repmsg, timeout);
523 if (req->rq_reqmsg &&
524 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
525 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
526 "req_flags=%#x magic=%x/%x len=%d\n",
527 flags, lustre_msg_get_flags(req->rq_reqmsg),
528 lustre_msg_get_magic(req->rq_reqmsg),
529 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
534 * Send request reply from request \a req reply buffer.
535 * \a flags defines reply types
536 * Returns 0 on success or error code
538 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
540 struct ptlrpc_reply_state *rs = req->rq_reply_state;
541 struct ptlrpc_connection *conn;
544 /* We must already have a reply buffer (only ptlrpc_error() may be
545 * called without one). The reply generated by sptlrpc layer (e.g.
546 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
547 * have a request buffer which is either the actual (swabbed) incoming
548 * request, or a saved copy if this is a req saved in
549 * target_queue_final_reply().
551 LASSERT (req->rq_no_reply == 0);
552 LASSERT (req->rq_reqbuf != NULL);
553 LASSERT (rs != NULL);
554 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
555 LASSERT (req->rq_repmsg != NULL);
556 LASSERT (req->rq_repmsg == rs->rs_msg);
557 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
558 LASSERT (rs->rs_cb_id.cbid_arg == rs);
560 /* There may be no rq_export during failover */
562 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
563 req->rq_export->exp_obd->obd_fail)) {
564 /* Failed obd's only send ENODEV */
565 req->rq_type = PTL_RPC_MSG_ERR;
566 req->rq_status = -ENODEV;
567 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
568 req->rq_export->exp_obd->obd_minor);
571 /* In order to keep interoprability with the client (< 2.3) which
572 * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
573 * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
574 * reply buffer on client will be overflow.
576 * XXX Remove this whenver we drop the interoprability with such client.
578 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
579 sizeof(struct ptlrpc_body_v2), 1);
581 if (req->rq_type != PTL_RPC_MSG_ERR)
582 req->rq_type = PTL_RPC_MSG_REPLY;
584 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
585 lustre_msg_set_status(req->rq_repmsg,
586 ptlrpc_status_hton(req->rq_status));
587 lustre_msg_set_opc(req->rq_repmsg,
588 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
590 target_pack_pool_reply(req);
592 ptlrpc_at_set_reply(req, flags);
594 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
595 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
597 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
599 if (unlikely(conn == NULL)) {
600 CERROR("not replying on NULL connection\n"); /* bug 9635 */
603 ptlrpc_rs_addref(rs); /* +1 ref for the network */
605 rc = sptlrpc_svc_wrap_reply(req);
609 req->rq_sent = cfs_time_current_sec();
611 rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
612 (rs->rs_difficult && !rs->rs_no_ack) ?
613 LNET_ACK_REQ : LNET_NOACK_REQ,
615 ptlrpc_req2svc(req)->srv_rep_portal,
616 req->rq_xid, req->rq_reply_off);
618 if (unlikely(rc != 0))
619 ptlrpc_req_drop_rs(req);
620 ptlrpc_connection_put(conn);
624 int ptlrpc_reply (struct ptlrpc_request *req)
626 if (req->rq_no_reply)
629 return (ptlrpc_send_reply(req, 0));
633 * For request \a req send an error reply back. Create empty
634 * reply buffers if necessary.
636 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
641 if (req->rq_no_reply)
644 if (!req->rq_repmsg) {
645 rc = lustre_pack_reply(req, 1, NULL, NULL);
650 if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
651 req->rq_status != -EPERM && req->rq_status != -ENOENT &&
652 req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
653 req->rq_type = PTL_RPC_MSG_ERR;
655 rc = ptlrpc_send_reply(req, may_be_difficult);
659 int ptlrpc_error(struct ptlrpc_request *req)
661 return ptlrpc_send_error(req, 0);
665 * Send request \a request.
666 * if \a noreply is set, don't expect any reply back and don't set up
668 * Returns 0 on success or error code.
670 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
675 struct ptlrpc_connection *connection;
676 lnet_handle_me_t reply_me_h;
678 struct obd_import *imp = request->rq_import;
679 struct obd_device *obd = imp->imp_obd;
682 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
685 LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
686 LASSERT(request->rq_wait_ctx == 0);
688 /* If this is a re-transmit, we're required to have disengaged
689 * cleanly from the previous attempt */
690 LASSERT(!request->rq_receiving_reply);
691 LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
692 (imp->imp_state == LUSTRE_IMP_FULL)));
694 if (unlikely(obd != NULL && obd->obd_fail)) {
695 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
697 /* this prevents us from waiting in ptlrpc_queue_wait */
698 spin_lock(&request->rq_lock);
700 spin_unlock(&request->rq_lock);
701 request->rq_status = -ENODEV;
705 connection = imp->imp_connection;
707 lustre_msg_set_handle(request->rq_reqmsg,
708 &imp->imp_remote_handle);
709 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
710 lustre_msg_set_conn_cnt(request->rq_reqmsg,
712 lustre_msghdr_set_flags(request->rq_reqmsg,
713 imp->imp_msghdr_flags);
715 /* If it's the first time to resend the request for EINPROGRESS,
716 * we need to allocate a new XID (see after_reply()), it's different
717 * from the resend for reply timeout. */
718 if (request->rq_nr_resend != 0 &&
719 list_empty(&request->rq_unreplied_list)) {
721 /* resend for EINPROGRESS, allocate new xid to avoid reply
723 spin_lock(&imp->imp_lock);
724 ptlrpc_assign_next_xid_nolock(request);
725 min_xid = ptlrpc_known_replied_xid(imp);
726 spin_unlock(&imp->imp_lock);
728 lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
729 DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for "
730 "resend on EINPROGRESS");
733 if (request->rq_bulk != NULL) {
734 ptlrpc_set_bulk_mbits(request);
735 lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
738 if (list_empty(&request->rq_unreplied_list) ||
739 request->rq_xid <= imp->imp_known_replied_xid) {
740 DEBUG_REQ(D_ERROR, request, "xid: %llu, replied: %llu, "
741 "list_empty:%d\n", request->rq_xid,
742 imp->imp_known_replied_xid,
743 list_empty(&request->rq_unreplied_list));
747 /** For enabled AT all request should have AT_SUPPORT in the
748 * FULL import state when OBD_CONNECT_AT is set */
749 LASSERT(AT_OFF || imp->imp_state != LUSTRE_IMP_FULL ||
750 (imp->imp_msghdr_flags & MSGHDR_AT_SUPPORT) ||
751 !(imp->imp_connect_data.ocd_connect_flags &
754 if (request->rq_resend) {
755 lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
756 if (request->rq_resend_cb != NULL)
757 request->rq_resend_cb(request, &request->rq_async_args);
759 if (request->rq_memalloc)
760 mpflag = cfs_memory_pressure_get_and_set();
762 rc = sptlrpc_cli_wrap_request(request);
764 /* set rq_sent so that this request is treated
765 * as a delayed send in the upper layers */
766 request->rq_sent = cfs_time_current_sec();
770 /* bulk register should be done after wrap_request() */
771 if (request->rq_bulk != NULL) {
772 rc = ptlrpc_register_bulk (request);
778 LASSERT (request->rq_replen != 0);
779 if (request->rq_repbuf == NULL) {
780 LASSERT(request->rq_repdata == NULL);
781 LASSERT(request->rq_repmsg == NULL);
782 rc = sptlrpc_cli_alloc_repbuf(request,
785 /* this prevents us from looping in
786 * ptlrpc_queue_wait */
787 spin_lock(&request->rq_lock);
789 spin_unlock(&request->rq_lock);
790 request->rq_status = rc;
791 GOTO(cleanup_bulk, rc);
794 request->rq_repdata = NULL;
795 request->rq_repmsg = NULL;
798 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
799 connection->c_peer, request->rq_xid, 0,
800 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
802 CERROR("LNetMEAttach failed: %d\n", rc);
803 LASSERT (rc == -ENOMEM);
804 GOTO(cleanup_bulk, rc = -ENOMEM);
808 spin_lock(&request->rq_lock);
809 /* We are responsible for unlinking the reply buffer */
810 request->rq_reply_unlinked = noreply;
811 request->rq_receiving_reply = !noreply;
812 /* Clear any flags that may be present from previous sends. */
813 request->rq_req_unlinked = 0;
814 request->rq_replied = 0;
816 request->rq_timedout = 0;
817 request->rq_net_err = 0;
818 request->rq_resend = 0;
819 request->rq_restart = 0;
820 request->rq_reply_truncated = 0;
821 spin_unlock(&request->rq_lock);
824 reply_md.start = request->rq_repbuf;
825 reply_md.length = request->rq_repbuf_len;
826 /* Allow multiple early replies */
827 reply_md.threshold = LNET_MD_THRESH_INF;
828 /* Manage remote for early replies */
829 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
830 LNET_MD_MANAGE_REMOTE |
831 LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
832 reply_md.user_ptr = &request->rq_reply_cbid;
833 reply_md.eq_handle = ptlrpc_eq_h;
835 /* We must see the unlink callback to set rq_reply_unlinked,
836 * so we can't auto-unlink */
837 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
838 &request->rq_reply_md_h);
840 CERROR("LNetMDAttach failed: %d\n", rc);
841 LASSERT (rc == -ENOMEM);
842 spin_lock(&request->rq_lock);
843 /* ...but the MD attach didn't succeed... */
844 request->rq_receiving_reply = 0;
845 spin_unlock(&request->rq_lock);
846 GOTO(cleanup_me, rc = -ENOMEM);
849 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu"
851 request->rq_repbuf_len, request->rq_xid,
852 request->rq_reply_portal);
855 /* add references on request for request_out_callback */
856 ptlrpc_request_addref(request);
857 if (obd != NULL && obd->obd_svc_stats != NULL)
858 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
859 atomic_read(&imp->imp_inflight));
861 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
863 do_gettimeofday(&request->rq_sent_tv);
864 request->rq_sent = cfs_time_current_sec();
865 /* We give the server rq_timeout secs to process the req, and
866 add the network latency for our local timeout. */
867 request->rq_deadline = request->rq_sent + request->rq_timeout +
868 ptlrpc_at_get_net_latency(request);
870 ptlrpc_pinger_sending_on_import(imp);
872 DEBUG_REQ(D_INFO, request, "send flg=%x",
873 lustre_msg_get_flags(request->rq_reqmsg));
874 rc = ptl_send_buf(&request->rq_req_md_h,
875 request->rq_reqbuf, request->rq_reqdata_len,
876 LNET_NOACK_REQ, &request->rq_req_cbid,
878 request->rq_request_portal,
883 request->rq_req_unlinked = 1;
884 ptlrpc_req_finished(request);
889 /* MEUnlink is safe; the PUT didn't even get off the ground, and
890 * nobody apart from the PUT's target has the right nid+XID to
891 * access the reply buffer. */
892 rc2 = LNetMEUnlink(reply_me_h);
894 /* UNLINKED callback called synchronously */
895 LASSERT(!request->rq_receiving_reply);
898 /* We do sync unlink here as there was no real transfer here so
899 * the chance to have long unlink to sluggish net is smaller here. */
900 ptlrpc_unregister_bulk(request, 0);
902 if (request->rq_memalloc)
903 cfs_memory_pressure_restore(mpflag);
906 EXPORT_SYMBOL(ptl_send_rpc);
909 * Register request buffer descriptor for request receiving.
911 int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
913 struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
914 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
917 lnet_handle_me_t me_h;
919 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
920 service->srv_req_portal);
922 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
925 /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
926 * which means buffer can only be attached on local CPT, and LND
927 * threads can find it by grabbing a local lock */
928 rc = LNetMEAttach(service->srv_req_portal,
929 match_id, 0, ~0, LNET_UNLINK,
930 rqbd->rqbd_svcpt->scp_cpt >= 0 ?
931 LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
933 CERROR("LNetMEAttach failed: %d\n", rc);
937 LASSERT(rqbd->rqbd_refcount == 0);
938 rqbd->rqbd_refcount = 1;
940 md.start = rqbd->rqbd_buffer;
941 md.length = service->srv_buf_size;
942 md.max_size = service->srv_max_req_size;
943 md.threshold = LNET_MD_THRESH_INF;
944 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
945 md.user_ptr = &rqbd->rqbd_cbid;
946 md.eq_handle = ptlrpc_eq_h;
948 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
952 CERROR("LNetMDAttach failed: %d; \n", rc);
953 LASSERT (rc == -ENOMEM);
954 rc = LNetMEUnlink (me_h);
956 rqbd->rqbd_refcount = 0;