1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 #define DEBUG_SUBSYSTEM S_RPC
28 #include <liblustre.h>
30 #include <obd_support.h>
31 #include <lustre_net.h>
32 #include <lustre_lib.h>
34 #include "ptlrpc_internal.h"
36 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
37 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
38 struct ptlrpc_connection *conn, int portal, __u64 xid)
44 LASSERT (portal != 0);
45 LASSERT (conn != NULL);
46 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
49 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
50 md.options = PTLRPC_MD_OPTIONS;
52 md.eq_handle = ptlrpc_eq_h;
54 if (unlikely(ack == LNET_ACK_REQ &&
55 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_ACK | OBD_FAIL_ONCE))) {
56 /* don't ask for the ack to simulate failing client */
58 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
61 rc = LNetMDBind (md, LNET_UNLINK, mdh);
62 if (unlikely(rc != 0)) {
63 CERROR ("LNetMDBind failed: %d\n", rc);
64 LASSERT (rc == -ENOMEM);
68 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64"\n",
71 rc = LNetPut (conn->c_self, *mdh, ack,
72 conn->c_peer, portal, xid, 0, 0);
73 if (unlikely(rc != 0)) {
75 /* We're going to get an UNLINK event when I unlink below,
76 * which will complete just like any other failed send, so
77 * I fall through and return success here! */
78 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
79 libcfs_id2str(conn->c_peer), portal, xid, rc);
80 rc2 = LNetMDUnlink(*mdh);
81 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
87 int ptlrpc_start_bulk_transfer (struct ptlrpc_bulk_desc *desc)
89 struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
96 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_PUT_NET))
99 /* NB no locking required until desc is on the network */
100 LASSERT (!desc->bd_network_rw);
101 LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
102 desc->bd_type == BULK_GET_SINK);
103 desc->bd_success = 0;
105 md.user_ptr = &desc->bd_cbid;
106 md.eq_handle = ptlrpc_eq_h;
107 md.threshold = 2; /* SENT and ACK/REPLY */
108 md.options = PTLRPC_MD_OPTIONS;
109 ptlrpc_fill_bulk_md(&md, desc);
111 LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
112 LASSERT (desc->bd_cbid.cbid_arg == desc);
114 /* NB total length may be 0 for a read past EOF, so we send a 0
115 * length bulk, since the client expects a bulk event. */
117 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
119 CERROR("LNetMDBind failed: %d\n", rc);
120 LASSERT (rc == -ENOMEM);
124 /* Client's bulk and reply matchbits are the same */
125 xid = desc->bd_req->rq_xid;
126 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
127 "id %s xid "LPX64"\n", desc->bd_iov_count,
128 desc->bd_nob, desc->bd_portal,
129 libcfs_id2str(conn->c_peer), xid);
131 /* Network is about to get at the memory */
132 desc->bd_network_rw = 1;
134 if (desc->bd_type == BULK_PUT_SOURCE)
135 rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
136 conn->c_peer, desc->bd_portal, xid, 0, 0);
138 rc = LNetGet (conn->c_self, desc->bd_md_h,
139 conn->c_peer, desc->bd_portal, xid, 0);
142 /* Can't send, so we unlink the MD bound above. The UNLINK
143 * event this creates will signal completion with failure,
144 * so we return SUCCESS here! */
145 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
146 libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
147 rc2 = LNetMDUnlink(desc->bd_md_h);
154 void ptlrpc_abort_bulk (struct ptlrpc_bulk_desc *desc)
156 /* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
157 * serialises with completion callback) */
158 struct l_wait_info lwi;
161 LASSERT (!in_interrupt ()); /* might sleep */
163 if (!ptlrpc_bulk_active(desc)) /* completed or */
164 return; /* never started */
166 /* Do not send any meaningful data over the wire for evicted clients */
167 if (desc->bd_export && desc->bd_export->exp_failed)
168 ptl_rpc_wipe_bulk_pages(desc);
170 /* The unlink ensures the callback happens ASAP and is the last
171 * one. If it fails, it must be because completion just happened,
172 * but we must still l_wait_event() in this case, to give liblustre
173 * a chance to run server_bulk_callback()*/
175 LNetMDUnlink (desc->bd_md_h);
178 /* Network access will complete in finite time but the HUGE
179 * timeout lets us CWARN for visibility of sluggish NALs */
180 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
181 rc = l_wait_event(desc->bd_waitq,
182 !ptlrpc_bulk_active(desc), &lwi);
186 LASSERT(rc == -ETIMEDOUT);
187 CWARN("Unexpectedly long timeout: desc %p\n", desc);
191 int ptlrpc_register_bulk (struct ptlrpc_request *req)
193 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
194 lnet_process_id_t peer;
197 lnet_handle_me_t me_h;
201 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_GET_NET))
204 /* NB no locking required until desc is on the network */
205 LASSERT (desc->bd_nob > 0);
206 LASSERT (!desc->bd_network_rw);
207 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
208 LASSERT (desc->bd_req != NULL);
209 LASSERT (desc->bd_type == BULK_PUT_SINK ||
210 desc->bd_type == BULK_GET_SOURCE);
212 desc->bd_success = 0;
214 peer = desc->bd_import->imp_connection->c_peer;
216 md.user_ptr = &desc->bd_cbid;
217 md.eq_handle = ptlrpc_eq_h;
218 md.threshold = 1; /* PUT or GET */
219 md.options = PTLRPC_MD_OPTIONS |
220 ((desc->bd_type == BULK_GET_SOURCE) ?
221 LNET_MD_OP_GET : LNET_MD_OP_PUT);
222 ptlrpc_fill_bulk_md(&md, desc);
224 LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
225 LASSERT (desc->bd_cbid.cbid_arg == desc);
227 /* XXX Registering the same xid on retried bulk makes my head
228 * explode trying to understand how the original request's bulk
229 * might interfere with the retried request -eeb */
230 LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
231 "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
232 desc->bd_registered, req->rq_xid, desc->bd_last_xid);
233 desc->bd_registered = 1;
234 desc->bd_last_xid = req->rq_xid;
236 rc = LNetMEAttach(desc->bd_portal, peer,
237 req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
239 CERROR("LNetMEAttach failed: %d\n", rc);
240 LASSERT (rc == -ENOMEM);
244 /* About to let the network at it... */
245 desc->bd_network_rw = 1;
246 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
248 CERROR("LNetMDAttach failed: %d\n", rc);
249 LASSERT (rc == -ENOMEM);
250 desc->bd_network_rw = 0;
251 rc2 = LNetMEUnlink (me_h);
256 CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPX64", "
258 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
259 desc->bd_iov_count, desc->bd_nob,
260 req->rq_xid, desc->bd_portal);
264 void ptlrpc_unregister_bulk (struct ptlrpc_request *req)
266 /* Disconnect a bulk desc from the network. Idempotent. Not
267 * thread-safe (i.e. only interlocks with completion callback). */
268 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
270 struct l_wait_info lwi;
273 LASSERT (!in_interrupt ()); /* might sleep */
275 if (!ptlrpc_bulk_active(desc)) /* completed or */
276 return; /* never registered */
278 LASSERT (desc->bd_req == req); /* bd_req NULL until registered */
280 /* the unlink ensures the callback happens ASAP and is the last
281 * one. If it fails, it must be because completion just happened,
282 * but we must still l_wait_event() in this case to give liblustre
283 * a chance to run client_bulk_callback() */
285 LNetMDUnlink (desc->bd_md_h);
287 if (req->rq_set != NULL)
288 wq = &req->rq_set->set_waitq;
290 wq = &req->rq_reply_waitq;
293 /* Network access will complete in finite time but the HUGE
294 * timeout lets us CWARN for visibility of sluggish NALs */
295 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
296 rc = l_wait_event(*wq, !ptlrpc_bulk_active(desc), &lwi);
300 LASSERT (rc == -ETIMEDOUT);
301 DEBUG_REQ(D_WARNING,req,"Unexpectedly long timeout: desc %p",
306 int ptlrpc_send_reply (struct ptlrpc_request *req, int may_be_difficult)
308 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
309 struct ptlrpc_reply_state *rs = req->rq_reply_state;
310 struct ptlrpc_connection *conn;
313 /* We must already have a reply buffer (only ptlrpc_error() may be
314 * called without one). The reply generated by security layer (e.g.
315 * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
316 * have a request buffer which is either the actual (swabbed) incoming
317 * request, or a saved copy if this is a req saved in
318 * target_queue_final_reply().
320 LASSERT (req->rq_reqbuf != NULL);
321 LASSERT (rs != NULL);
322 LASSERT (may_be_difficult || !rs->rs_difficult);
323 LASSERT (req->rq_repmsg != NULL);
324 LASSERT (req->rq_repmsg == rs->rs_msg);
325 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
326 LASSERT (rs->rs_cb_id.cbid_arg == rs);
328 if (unlikely(req->rq_export && req->rq_export->exp_obd &&
329 req->rq_export->exp_obd->obd_fail)) {
330 /* Failed obd's only send ENODEV */
331 req->rq_type = PTL_RPC_MSG_ERR;
332 req->rq_status = -ENODEV;
333 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
334 req->rq_export->exp_obd->obd_minor);
337 if (req->rq_type != PTL_RPC_MSG_ERR)
338 req->rq_type = PTL_RPC_MSG_REPLY;
340 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
341 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
342 lustre_msg_set_opc(req->rq_repmsg,
343 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
345 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
346 conn = ptlrpc_get_connection(req->rq_peer, req->rq_self, NULL);
348 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
350 if (unlikely(conn == NULL)) {
351 CERROR("not replying on NULL connection\n"); /* bug 9635 */
354 atomic_inc (&svc->srv_outstanding_replies);
355 ptlrpc_rs_addref(rs); /* +1 ref for the network */
357 rc = sptlrpc_svc_wrap_reply(req);
361 rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
362 rs->rs_difficult ? LNET_ACK_REQ : LNET_NOACK_REQ,
364 svc->srv_rep_portal, req->rq_xid);
366 if (unlikely(rc != 0)) {
367 atomic_dec (&svc->srv_outstanding_replies);
368 ptlrpc_rs_decref(rs);
370 ptlrpc_put_connection(conn);
374 int ptlrpc_reply (struct ptlrpc_request *req)
376 return (ptlrpc_send_reply (req, 0));
379 int ptlrpc_error(struct ptlrpc_request *req)
384 if (!req->rq_repmsg) {
385 rc = lustre_pack_reply(req, 1, NULL, NULL);
390 req->rq_type = PTL_RPC_MSG_ERR;
392 rc = ptlrpc_send_reply(req, 0);
396 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
400 struct ptlrpc_connection *connection;
401 lnet_handle_me_t reply_me_h;
405 OBD_FAIL_RETURN(OBD_FAIL_PTLRPC_DROP_RPC, 0);
407 LASSERT (request->rq_type == PTL_RPC_MSG_REQUEST);
409 /* If this is a re-transmit, we're required to have disengaged
410 * cleanly from the previous attempt */
411 LASSERT (!request->rq_receiving_reply);
413 if (request->rq_import->imp_obd &&
414 request->rq_import->imp_obd->obd_fail) {
415 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
416 request->rq_import->imp_obd->obd_name);
417 /* this prevents us from waiting in ptlrpc_queue_wait */
422 connection = request->rq_import->imp_connection;
424 lustre_msg_set_handle(request->rq_reqmsg,
425 &request->rq_import->imp_remote_handle);
426 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
427 lustre_msg_set_conn_cnt(request->rq_reqmsg,
428 request->rq_import->imp_conn_cnt);
430 rc = sptlrpc_cli_wrap_request(request);
434 /* bulk register should be done after wrap_request() */
435 if (request->rq_bulk != NULL) {
436 rc = ptlrpc_register_bulk (request);
442 LASSERT (request->rq_replen != 0);
443 if (request->rq_repbuf == NULL) {
444 rc = sptlrpc_cli_alloc_repbuf(request,
447 GOTO(cleanup_bulk, rc);
450 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
451 connection->c_peer, request->rq_xid, 0,
452 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
454 CERROR("LNetMEAttach failed: %d\n", rc);
455 LASSERT (rc == -ENOMEM);
456 GOTO(cleanup_bulk, rc = -ENOMEM);
460 spin_lock(&request->rq_lock);
461 /* If the MD attach succeeds, there _will_ be a reply_in callback */
462 request->rq_receiving_reply = !noreply;
463 /* Clear any flags that may be present from previous sends. */
464 request->rq_replied = 0;
466 request->rq_timedout = 0;
467 request->rq_net_err = 0;
468 request->rq_resend = 0;
469 request->rq_restart = 0;
470 spin_unlock(&request->rq_lock);
473 reply_md.start = request->rq_repbuf;
474 reply_md.length = request->rq_repbuf_len;
475 reply_md.threshold = 1;
476 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT;
477 reply_md.user_ptr = &request->rq_reply_cbid;
478 reply_md.eq_handle = ptlrpc_eq_h;
480 rc = LNetMDAttach(reply_me_h, reply_md, LNET_UNLINK,
481 &request->rq_reply_md_h);
483 CERROR("LNetMDAttach failed: %d\n", rc);
484 LASSERT (rc == -ENOMEM);
485 spin_lock(&request->rq_lock);
486 /* ...but the MD attach didn't succeed... */
487 request->rq_receiving_reply = 0;
488 spin_unlock(&request->rq_lock);
489 GOTO(cleanup_me, rc -ENOMEM);
492 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
494 request->rq_repbuf_len, request->rq_xid,
495 request->rq_reply_portal);
498 /* add references on request and import for request_out_callback */
499 ptlrpc_request_addref(request);
500 atomic_inc(&request->rq_import->imp_inflight);
502 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
504 request->rq_sent = CURRENT_SECONDS;
505 ptlrpc_pinger_sending_on_import(request->rq_import);
506 rc = ptl_send_buf(&request->rq_req_md_h,
507 request->rq_reqbuf, request->rq_reqdata_len,
508 LNET_NOACK_REQ, &request->rq_req_cbid,
510 request->rq_request_portal,
513 ptlrpc_lprocfs_rpc_sent(request);
517 /* drop request_out_callback refs, we couldn't start the send */
518 atomic_dec(&request->rq_import->imp_inflight);
519 ptlrpc_req_finished(request);
524 GOTO(cleanup_me, rc);
526 /* MEUnlink is safe; the PUT didn't even get off the ground, and
527 * nobody apart from the PUT's target has the right nid+XID to
528 * access the reply buffer. */
529 rc2 = LNetMEUnlink(reply_me_h);
531 /* UNLINKED callback called synchronously */
532 LASSERT (!request->rq_receiving_reply);
535 if (request->rq_bulk != NULL)
536 ptlrpc_unregister_bulk(request);
541 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
543 struct ptlrpc_service *service = rqbd->rqbd_service;
544 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
547 lnet_handle_me_t me_h;
549 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
550 service->srv_req_portal);
552 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_RQBD))
555 rc = LNetMEAttach(service->srv_req_portal,
556 match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
558 CERROR("LNetMEAttach failed: %d\n", rc);
562 LASSERT(rqbd->rqbd_refcount == 0);
563 rqbd->rqbd_refcount = 1;
565 md.start = rqbd->rqbd_buffer;
566 md.length = service->srv_buf_size;
567 md.max_size = service->srv_max_req_size;
568 md.threshold = LNET_MD_THRESH_INF;
569 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
570 md.user_ptr = &rqbd->rqbd_cbid;
571 md.eq_handle = ptlrpc_eq_h;
573 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
577 CERROR("LNetMDAttach failed: %d; \n", rc);
578 LASSERT (rc == -ENOMEM);
579 rc = LNetMEUnlink (me_h);
581 rqbd->rqbd_refcount = 0;