1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of the Lustre file system, http://www.lustre.org
7 * Lustre is a trademark of Cluster File Systems, Inc.
9 * You may have signed or agreed to another license before downloading
10 * this software. If so, you are bound by the terms and conditions
11 * of that agreement, and the following does not apply to you. See the
12 * LICENSE file included with this distribution for more information.
14 * If you did not agree to a different license, then this copy of Lustre
15 * is open source software; you can redistribute it and/or modify it
16 * under the terms of version 2 of the GNU General Public License as
17 * published by the Free Software Foundation.
19 * In either case, Lustre is distributed in the hope that it will be
20 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
21 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * license text for more details.
26 #define DEBUG_SUBSYSTEM S_RPC
28 #include <liblustre.h>
30 #include <obd_support.h>
31 #include <lustre_net.h>
32 #include <lustre_lib.h>
34 #include "ptlrpc_internal.h"
36 static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
37 lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
38 struct ptlrpc_connection *conn, int portal, __u64 xid,
45 LASSERT (portal != 0);
46 LASSERT (conn != NULL);
47 CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
50 md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
51 md.options = PTLRPC_MD_OPTIONS;
53 md.eq_handle = ptlrpc_eq_h;
55 if (ack == LNET_ACK_REQ &&
56 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_ACK | OBD_FAIL_ONCE)) {
57 /* don't ask for the ack to simulate failing client */
59 obd_fail_loc |= OBD_FAIL_ONCE | OBD_FAILED;
62 rc = LNetMDBind (md, LNET_UNLINK, mdh);
64 CERROR ("LNetMDBind failed: %d\n", rc);
65 LASSERT (rc == -ENOMEM);
69 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
70 len, portal, xid, offset);
72 rc = LNetPut (conn->c_self, *mdh, ack,
73 conn->c_peer, portal, xid, offset, 0);
76 /* We're going to get an UNLINK event when I unlink below,
77 * which will complete just like any other failed send, so
78 * I fall through and return success here! */
79 CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
80 libcfs_id2str(conn->c_peer), portal, xid, rc);
81 rc2 = LNetMDUnlink(*mdh);
82 LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
88 int ptlrpc_start_bulk_transfer (struct ptlrpc_bulk_desc *desc)
90 struct ptlrpc_connection *conn = desc->bd_export->exp_connection;
97 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_PUT_NET))
100 /* NB no locking required until desc is on the network */
101 LASSERT (!desc->bd_network_rw);
102 LASSERT (desc->bd_type == BULK_PUT_SOURCE ||
103 desc->bd_type == BULK_GET_SINK);
104 desc->bd_success = 0;
105 desc->bd_sender = LNET_NID_ANY;
107 md.user_ptr = &desc->bd_cbid;
108 md.eq_handle = ptlrpc_eq_h;
109 md.threshold = 2; /* SENT and ACK/REPLY */
110 md.options = PTLRPC_MD_OPTIONS;
111 ptlrpc_fill_bulk_md(&md, desc);
113 LASSERT (desc->bd_cbid.cbid_fn == server_bulk_callback);
114 LASSERT (desc->bd_cbid.cbid_arg == desc);
116 /* NB total length may be 0 for a read past EOF, so we send a 0
117 * length bulk, since the client expects a bulk event. */
119 rc = LNetMDBind(md, LNET_UNLINK, &desc->bd_md_h);
121 CERROR("LNetMDBind failed: %d\n", rc);
122 LASSERT (rc == -ENOMEM);
126 /* Client's bulk and reply matchbits are the same */
127 xid = desc->bd_req->rq_xid;
128 CDEBUG(D_NET, "Transferring %u pages %u bytes via portal %d "
129 "id %s xid "LPX64"\n", desc->bd_iov_count,
130 desc->bd_nob, desc->bd_portal,
131 libcfs_id2str(conn->c_peer), xid);
133 /* Network is about to get at the memory */
134 desc->bd_network_rw = 1;
136 if (desc->bd_type == BULK_PUT_SOURCE)
137 rc = LNetPut (conn->c_self, desc->bd_md_h, LNET_ACK_REQ,
138 conn->c_peer, desc->bd_portal, xid, 0, 0);
140 rc = LNetGet (conn->c_self, desc->bd_md_h,
141 conn->c_peer, desc->bd_portal, xid, 0);
144 /* Can't send, so we unlink the MD bound above. The UNLINK
145 * event this creates will signal completion with failure,
146 * so we return SUCCESS here! */
147 CERROR("Transfer(%s, %d, "LPX64") failed: %d\n",
148 libcfs_id2str(conn->c_peer), desc->bd_portal, xid, rc);
149 rc2 = LNetMDUnlink(desc->bd_md_h);
156 void ptlrpc_abort_bulk (struct ptlrpc_bulk_desc *desc)
158 /* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
159 * serialises with completion callback) */
160 struct l_wait_info lwi;
163 LASSERT (!in_interrupt ()); /* might sleep */
165 if (!ptlrpc_bulk_active(desc)) /* completed or */
166 return; /* never started */
168 /* Do not send any meaningful data over the wire for evicted clients */
169 if (desc->bd_export && desc->bd_export->exp_failed)
170 ptl_rpc_wipe_bulk_pages(desc);
172 /* The unlink ensures the callback happens ASAP and is the last
173 * one. If it fails, it must be because completion just happened,
174 * but we must still l_wait_event() in this case, to give liblustre
175 * a chance to run server_bulk_callback()*/
177 LNetMDUnlink (desc->bd_md_h);
180 /* Network access will complete in finite time but the HUGE
181 * timeout lets us CWARN for visibility of sluggish NALs */
182 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
183 rc = l_wait_event(desc->bd_waitq,
184 !ptlrpc_bulk_active(desc), &lwi);
188 LASSERT(rc == -ETIMEDOUT);
189 CWARN("Unexpectedly long timeout: desc %p\n", desc);
193 int ptlrpc_register_bulk (struct ptlrpc_request *req)
195 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
196 lnet_process_id_t peer;
199 lnet_handle_me_t me_h;
203 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_BULK_GET_NET))
206 /* NB no locking required until desc is on the network */
207 LASSERT (desc->bd_nob > 0);
208 LASSERT (!desc->bd_network_rw);
209 LASSERT (desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
210 LASSERT (desc->bd_req != NULL);
211 LASSERT (desc->bd_type == BULK_PUT_SINK ||
212 desc->bd_type == BULK_GET_SOURCE);
214 desc->bd_success = 0;
215 desc->bd_sender = LNET_NID_ANY;
217 peer = desc->bd_import->imp_connection->c_peer;
219 md.user_ptr = &desc->bd_cbid;
220 md.eq_handle = ptlrpc_eq_h;
221 md.threshold = 1; /* PUT or GET */
222 md.options = PTLRPC_MD_OPTIONS |
223 ((desc->bd_type == BULK_GET_SOURCE) ?
224 LNET_MD_OP_GET : LNET_MD_OP_PUT);
225 ptlrpc_fill_bulk_md(&md, desc);
227 LASSERT (desc->bd_cbid.cbid_fn == client_bulk_callback);
228 LASSERT (desc->bd_cbid.cbid_arg == desc);
230 /* XXX Registering the same xid on retried bulk makes my head
231 * explode trying to understand how the original request's bulk
232 * might interfere with the retried request -eeb */
233 LASSERTF (!desc->bd_registered || req->rq_xid != desc->bd_last_xid,
234 "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
235 desc->bd_registered, req->rq_xid, desc->bd_last_xid);
236 desc->bd_registered = 1;
237 desc->bd_last_xid = req->rq_xid;
239 rc = LNetMEAttach(desc->bd_portal, peer,
240 req->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
242 CERROR("LNetMEAttach failed: %d\n", rc);
243 LASSERT (rc == -ENOMEM);
247 /* About to let the network at it... */
248 desc->bd_network_rw = 1;
249 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &desc->bd_md_h);
251 CERROR("LNetMDAttach failed: %d\n", rc);
252 LASSERT (rc == -ENOMEM);
253 desc->bd_network_rw = 0;
254 rc2 = LNetMEUnlink (me_h);
259 CDEBUG(D_NET, "Setup bulk %s buffers: %u pages %u bytes, xid "LPX64", "
261 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
262 desc->bd_iov_count, desc->bd_nob,
263 req->rq_xid, desc->bd_portal);
267 void ptlrpc_unregister_bulk (struct ptlrpc_request *req)
269 /* Disconnect a bulk desc from the network. Idempotent. Not
270 * thread-safe (i.e. only interlocks with completion callback). */
271 struct ptlrpc_bulk_desc *desc = req->rq_bulk;
273 struct l_wait_info lwi;
276 LASSERT (!in_interrupt ()); /* might sleep */
278 if (!ptlrpc_bulk_active(desc)) /* completed or */
279 return; /* never registered */
281 LASSERT (desc->bd_req == req); /* bd_req NULL until registered */
283 /* the unlink ensures the callback happens ASAP and is the last
284 * one. If it fails, it must be because completion just happened,
285 * but we must still l_wait_event() in this case to give liblustre
286 * a chance to run client_bulk_callback() */
288 LNetMDUnlink (desc->bd_md_h);
290 if (req->rq_set != NULL)
291 wq = &req->rq_set->set_waitq;
293 wq = &req->rq_reply_waitq;
296 /* Network access will complete in finite time but the HUGE
297 * timeout lets us CWARN for visibility of sluggish NALs */
298 lwi = LWI_TIMEOUT (cfs_time_seconds(300), NULL, NULL);
299 rc = l_wait_event(*wq, !ptlrpc_bulk_active(desc), &lwi);
303 LASSERT (rc == -ETIMEDOUT);
304 DEBUG_REQ(D_WARNING,req,"Unexpectedly long timeout: desc %p",
309 int ptlrpc_send_reply (struct ptlrpc_request *req, int flags)
311 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
312 struct ptlrpc_reply_state *rs = req->rq_reply_state;
313 struct ptlrpc_connection *conn;
315 unsigned int offset = 0;
318 /* We must already have a reply buffer (only ptlrpc_error() may be
319 * called without one). We must also have a request buffer which
320 * is either the actual (swabbed) incoming request, or a saved copy
321 * if this is a req saved in target_queue_final_reply(). */
322 LASSERT (req->rq_reqmsg != NULL);
323 LASSERT (req->rq_repmsg != NULL);
324 LASSERT (rs != NULL);
325 LASSERT (req->rq_repmsg == rs->rs_msg);
326 LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
327 LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
328 LASSERT (rs->rs_cb_id.cbid_arg == rs);
330 /* There may be no rq_export during failover */
332 if (req->rq_export && req->rq_export->exp_obd &&
333 req->rq_export->exp_obd->obd_fail) {
334 /* Failed obd's only send ENODEV */
335 req->rq_type = PTL_RPC_MSG_ERR;
336 req->rq_status = -ENODEV;
337 CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
338 req->rq_export->exp_obd->obd_minor);
341 if (req->rq_type != PTL_RPC_MSG_ERR)
342 req->rq_type = PTL_RPC_MSG_REPLY;
344 lustre_msg_set_type(req->rq_repmsg, req->rq_type);
345 lustre_msg_set_status(req->rq_repmsg, req->rq_status);
346 lustre_msg_set_opc(req->rq_repmsg, lustre_msg_get_opc(req->rq_reqmsg));
348 service_time = max_t(int, cfs_time_current_sec() -
349 req->rq_arrival_time.tv_sec, 1);
350 if (!(flags & PTLRPC_REPLY_EARLY) &&
351 (req->rq_type != PTL_RPC_MSG_ERR)) {
352 /* early replies and errors don't count toward our service
354 int oldse = at_add(&svc->srv_at_estimate, service_time);
356 DEBUG_REQ(D_ADAPTTO, req,
357 "svc %s changed estimate from %d to %d",
358 svc->srv_name, oldse,
359 at_get(&svc->srv_at_estimate));
361 /* Report actual service time for client latency calc */
362 lustre_msg_set_service_time(req->rq_repmsg, service_time);
363 /* Report service time estimate for future client reqs */
364 lustre_msg_set_timeout(req->rq_repmsg, at_get(&svc->srv_at_estimate));
366 target_pack_pool_reply(req);
368 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) {
369 /* early replies go to offset 0, regular replies go after that*/
370 if (flags & PTLRPC_REPLY_EARLY) {
372 /* Include a checksum on early replies - must be done
373 after all other lustre_msg_set_* */
374 lustre_msg_set_cksum(req->rq_repmsg,
375 lustre_msg_calc_cksum(req->rq_repmsg));
377 offset = lustre_msg_early_size();
380 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
381 "req_flags=%#x magic=%d:%x/%x len=%d\n",
382 flags, lustre_msg_get_flags(req->rq_reqmsg),
383 lustre_msg_is_v1(req->rq_reqmsg),
384 lustre_msg_get_magic(req->rq_reqmsg),
385 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
388 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
389 conn = ptlrpc_get_connection(req->rq_peer, req->rq_self, NULL);
391 conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
394 CERROR("not replying on NULL connection\n"); /* bug 9635 */
398 atomic_inc (&svc->srv_outstanding_replies);
399 ptlrpc_rs_addref(rs); /* +1 ref for the network */
400 req->rq_sent = cfs_time_current_sec();
402 rc = ptl_send_buf (&rs->rs_md_h, req->rq_repmsg, req->rq_replen,
403 rs->rs_difficult ? LNET_ACK_REQ : LNET_NOACK_REQ,
404 &rs->rs_cb_id, conn, svc->srv_rep_portal,
405 req->rq_xid, offset);
407 atomic_dec (&svc->srv_outstanding_replies);
408 ptlrpc_req_drop_rs(req);
410 ptlrpc_put_connection(conn);
414 int ptlrpc_reply (struct ptlrpc_request *req)
416 return (ptlrpc_send_reply (req, 0));
419 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
424 if (!req->rq_repmsg) {
425 rc = lustre_pack_reply(req, 1, NULL, NULL);
430 req->rq_type = PTL_RPC_MSG_ERR;
432 rc = ptlrpc_send_reply(req, may_be_difficult);
436 int ptlrpc_error(struct ptlrpc_request *req)
438 return ptlrpc_send_error(req, 0);
441 int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
445 struct ptlrpc_connection *connection;
446 lnet_handle_me_t reply_me_h;
448 struct obd_device *obd = request->rq_import->imp_obd;
451 OBD_FAIL_RETURN(OBD_FAIL_PTLRPC_DROP_RPC, 0);
453 LASSERT (request->rq_type == PTL_RPC_MSG_REQUEST);
455 /* If this is a re-transmit, we're required to have disengaged
456 * cleanly from the previous attempt */
457 LASSERT (!request->rq_receiving_reply);
459 if (request->rq_import->imp_obd &&
460 request->rq_import->imp_obd->obd_fail) {
461 CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
462 request->rq_import->imp_obd->obd_name);
463 /* this prevents us from waiting in ptlrpc_queue_wait */
468 connection = request->rq_import->imp_connection;
470 if (request->rq_bulk != NULL) {
471 rc = ptlrpc_register_bulk (request);
476 lustre_msg_set_handle(request->rq_reqmsg,
477 &request->rq_import->imp_remote_handle);
478 lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
479 lustre_msg_set_conn_cnt(request->rq_reqmsg,
480 request->rq_import->imp_conn_cnt);
481 lustre_msghdr_set_flags(request->rq_reqmsg,
482 request->rq_import->imp_msghdr_flags);
485 LASSERT (request->rq_replen != 0);
486 if (request->rq_repbuf == NULL)
487 OBD_ALLOC(request->rq_repbuf, request->rq_replen);
488 if (request->rq_repbuf == NULL)
489 GOTO(cleanup_bulk, rc = -ENOMEM);
490 request->rq_repmsg = NULL;
492 rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
493 connection->c_peer, request->rq_xid, 0,
494 LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
496 CERROR("LNetMEAttach failed: %d\n", rc);
497 LASSERT (rc == -ENOMEM);
498 GOTO(cleanup_repmsg, rc = -ENOMEM);
502 spin_lock(&request->rq_lock);
503 /* If the MD attach succeeds, there _will_ be a reply_in callback */
504 request->rq_receiving_reply = !noreply;
505 /* We are responsible for unlinking the reply buffer */
506 request->rq_must_unlink = !noreply;
507 /* Clear any flags that may be present from previous sends. */
508 request->rq_replied = 0;
510 request->rq_timedout = 0;
511 request->rq_net_err = 0;
512 request->rq_resend = 0;
513 request->rq_restart = 0;
514 spin_unlock(&request->rq_lock);
517 reply_md.start = request->rq_repbuf;
518 reply_md.length = request->rq_replen;
519 /* Allow multiple early replies */
520 reply_md.threshold = LNET_MD_THRESH_INF;
521 /* Manage remote for early replies */
522 reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
523 LNET_MD_MANAGE_REMOTE;
524 reply_md.user_ptr = &request->rq_reply_cbid;
525 reply_md.eq_handle = ptlrpc_eq_h;
527 /* We must see the unlink callback to unset rq_must_unlink,
528 so we can't auto-unlink */
529 rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
530 &request->rq_reply_md_h);
532 CERROR("LNetMDAttach failed: %d\n", rc);
533 LASSERT (rc == -ENOMEM);
534 spin_lock(&request->rq_lock);
535 /* ...but the MD attach didn't succeed... */
536 request->rq_receiving_reply = 0;
537 spin_unlock(&request->rq_lock);
538 GOTO(cleanup_me, rc -ENOMEM);
541 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
543 request->rq_replen, request->rq_xid,
544 request->rq_reply_portal);
547 /* add references on request for request_out_callback */
548 ptlrpc_request_addref(request);
549 if (obd->obd_svc_stats != NULL)
550 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
551 request->rq_import->imp_inflight.counter);
553 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
555 request->rq_sent = cfs_time_current_sec();
556 do_gettimeofday(&request->rq_arrival_time);
557 /* We give the server rq_timeout secs to process the req, and
558 add the network latency for our local timeout. */
559 request->rq_deadline = request->rq_sent + request->rq_timeout +
560 ptlrpc_at_get_net_latency(request);
562 ptlrpc_pinger_sending_on_import(request->rq_import);
564 DEBUG_REQ(D_INFO, request, "send flg=%x",
565 lustre_msg_get_flags(request->rq_reqmsg));
566 rc = ptl_send_buf(&request->rq_req_md_h,
567 request->rq_reqmsg, request->rq_reqlen,
568 LNET_NOACK_REQ, &request->rq_req_cbid,
570 request->rq_request_portal,
573 ptlrpc_lprocfs_rpc_sent(request);
577 ptlrpc_req_finished(request);
582 /* MEUnlink is safe; the PUT didn't even get off the ground, and
583 * nobody apart from the PUT's target has the right nid+XID to
584 * access the reply buffer. */
585 rc2 = LNetMEUnlink(reply_me_h);
587 /* UNLINKED callback called synchronously */
588 LASSERT (!request->rq_receiving_reply);
591 OBD_FREE(request->rq_repbuf, request->rq_replen);
592 request->rq_repbuf = NULL;
593 request->rq_repmsg = NULL; //remove
596 if (request->rq_bulk != NULL)
597 ptlrpc_unregister_bulk(request);
602 int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd)
604 struct ptlrpc_service *service = rqbd->rqbd_service;
605 static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
608 lnet_handle_me_t me_h;
610 CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
611 service->srv_req_portal);
613 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_PTLRPC_RQBD))
616 rc = LNetMEAttach(service->srv_req_portal,
617 match_id, 0, ~0, LNET_UNLINK, LNET_INS_AFTER, &me_h);
619 CERROR("LNetMEAttach failed: %d\n", rc);
623 LASSERT(rqbd->rqbd_refcount == 0);
624 rqbd->rqbd_refcount = 1;
626 md.start = rqbd->rqbd_buffer;
627 md.length = service->srv_buf_size;
628 md.max_size = service->srv_max_req_size;
629 md.threshold = LNET_MD_THRESH_INF;
630 md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
631 md.user_ptr = &rqbd->rqbd_cbid;
632 md.eq_handle = ptlrpc_eq_h;
634 rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
638 CERROR("LNetMDAttach failed: %d; \n", rc);
639 LASSERT (rc == -ENOMEM);
640 rc = LNetMEUnlink (me_h);
642 rqbd->rqbd_refcount = 0;