4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_RPC
39 #include <libcfs/libcfs.h>
40 #include <linux/kernel.h>
41 #include <obd_class.h>
42 #include <lustre_net.h>
43 #include <lustre_sec.h>
44 #include "ptlrpc_internal.h"
46 lnet_handle_eq_t ptlrpc_eq_h;
49 * Client's outgoing request callback
51 void request_out_callback(lnet_event_t *ev)
53 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
54 struct ptlrpc_request *req = cbid->cbid_arg;
57 LASSERT (ev->type == LNET_EVENT_SEND ||
58 ev->type == LNET_EVENT_UNLINK);
59 LASSERT (ev->unlinked);
61 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
63 sptlrpc_request_out_callback(req);
64 spin_lock(&req->rq_lock);
65 req->rq_real_sent = cfs_time_current_sec();
67 req->rq_req_unlink = 0;
69 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
71 /* Failed send: make it seem like the reply timed out, just
72 * like failing sends in client.c does currently... */
75 ptlrpc_client_wake_req(req);
77 spin_unlock(&req->rq_lock);
79 ptlrpc_req_finished(req);
85 * Client's incoming reply callback
87 void reply_in_callback(lnet_event_t *ev)
89 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
90 struct ptlrpc_request *req = cbid->cbid_arg;
93 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
95 LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
96 LASSERT (ev->md.start == req->rq_repbuf);
97 LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
98 /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
99 for adaptive timeouts' early reply. */
100 LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
102 spin_lock(&req->rq_lock);
104 req->rq_receiving_reply = 0;
107 req->rq_reply_unlink = 0;
112 if (ev->type == LNET_EVENT_UNLINK) {
113 LASSERT(ev->unlinked);
114 DEBUG_REQ(D_NET, req, "unlink");
118 if (ev->mlength < ev->rlength ) {
119 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
120 req->rq_replen, ev->rlength, ev->offset);
121 req->rq_reply_truncate = 1;
123 req->rq_status = -EOVERFLOW;
124 req->rq_nob_received = ev->rlength + ev->offset;
128 if ((ev->offset == 0) &&
129 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
131 DEBUG_REQ(D_ADAPTTO, req,
132 "Early reply received: mlen=%u offset=%d replen=%d "
133 "replied=%d unlinked=%d", ev->mlength, ev->offset,
134 req->rq_replen, req->rq_replied, ev->unlinked);
136 req->rq_early_count++; /* number received, client side */
138 if (req->rq_replied) /* already got the real reply */
142 req->rq_reply_off = ev->offset;
143 req->rq_nob_received = ev->mlength;
144 /* And we're still receiving */
145 req->rq_receiving_reply = 1;
148 req->rq_rep_swab_mask = 0;
150 /* Got reply, no resend required */
152 req->rq_reply_off = ev->offset;
153 req->rq_nob_received = ev->mlength;
154 /* LNetMDUnlink can't be called under the LNET_LOCK,
155 so we must unlink in ptlrpc_unregister_reply */
156 DEBUG_REQ(D_INFO, req,
157 "reply in flags=%x mlen=%u offset=%d replen=%d",
158 lustre_msg_get_flags(req->rq_reqmsg),
159 ev->mlength, ev->offset, req->rq_replen);
162 req->rq_import->imp_last_reply_time = cfs_time_current_sec();
165 /* NB don't unlock till after wakeup; req can disappear under us
166 * since we don't have our own ref */
167 ptlrpc_client_wake_req(req);
168 spin_unlock(&req->rq_lock);
173 * Client's bulk has been written/read
175 void client_bulk_callback (lnet_event_t *ev)
177 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
178 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
179 struct ptlrpc_request *req;
182 LASSERT ((desc->bd_type == BULK_PUT_SINK &&
183 ev->type == LNET_EVENT_PUT) ||
184 (desc->bd_type == BULK_GET_SOURCE &&
185 ev->type == LNET_EVENT_GET) ||
186 ev->type == LNET_EVENT_UNLINK);
187 LASSERT (ev->unlinked);
189 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
192 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
195 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
196 "event type %d, status %d, desc %p\n",
197 ev->type, ev->status, desc);
199 spin_lock(&desc->bd_lock);
201 LASSERT(desc->bd_md_count > 0);
204 if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
205 desc->bd_nob_transferred += ev->mlength;
206 desc->bd_sender = ev->sender;
208 /* start reconnect and resend if network error hit */
209 spin_lock(&req->rq_lock);
211 spin_unlock(&req->rq_lock);
215 desc->bd_failure = 1;
217 /* NB don't unlock till after wakeup; desc can disappear under us
219 if (desc->bd_md_count == 0)
220 ptlrpc_client_wake_req(desc->bd_req);
222 spin_unlock(&desc->bd_lock);
227 * We will have percpt request history list for ptlrpc service in upcoming
228 * patches because we don't want to be serialized by current per-service
229 * history operations. So we require history ID can (somehow) show arriving
230 * order w/o grabbing global lock, and user can sort them in userspace.
232 * This is how we generate history ID for ptlrpc_request:
233 * ----------------------------------------------------
234 * | 32 bits | 16 bits | (16 - X)bits | X bits |
235 * ----------------------------------------------------
236 * | seconds | usec / 16 | sequence | CPT id |
237 * ----------------------------------------------------
239 * it might not be precise but should be good enough.
242 #define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
244 #define REQS_SEC_SHIFT 32
245 #define REQS_USEC_SHIFT 16
246 #define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
248 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
249 struct ptlrpc_request *req)
251 __u64 sec = req->rq_arrival_time.tv_sec;
252 __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
255 /* set sequence ID for request and add it to history list,
256 * it must be called with hold svcpt::scp_lock */
258 new_seq = (sec << REQS_SEC_SHIFT) |
259 (usec << REQS_USEC_SHIFT) |
260 (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
262 if (new_seq > svcpt->scp_hist_seq) {
263 /* This handles the initial case of scp_hist_seq == 0 or
264 * we just jumped into a new time window */
265 svcpt->scp_hist_seq = new_seq;
267 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
268 /* NB: increase sequence number in current usec bucket,
269 * however, it's possible that we used up all bits for
270 * sequence and jumped into the next usec bucket (future time),
271 * then we hope there will be less RPCs per bucket at some
272 * point, and sequence will catch up again */
273 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
274 new_seq = svcpt->scp_hist_seq;
277 req->rq_history_seq = new_seq;
279 list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
283 * Server's incoming request callback
285 void request_in_callback(lnet_event_t *ev)
287 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
288 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
289 struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
290 struct ptlrpc_service *service = svcpt->scp_service;
291 struct ptlrpc_request *req;
294 LASSERT (ev->type == LNET_EVENT_PUT ||
295 ev->type == LNET_EVENT_UNLINK);
296 LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
297 LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
298 rqbd->rqbd_buffer + service->srv_buf_size);
300 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
301 "event type %d, status %d, service %s\n",
302 ev->type, ev->status, service->srv_name);
305 /* If this is the last request message to fit in the
306 * request buffer we can use the request object embedded in
307 * rqbd. Note that if we failed to allocate a request,
308 * we'd have to re-post the rqbd, which we can't do in this
310 req = &rqbd->rqbd_req;
311 memset(req, 0, sizeof (*req));
313 LASSERT (ev->type == LNET_EVENT_PUT);
314 if (ev->status != 0) {
315 /* We moaned above already... */
318 req = ptlrpc_request_cache_alloc(ALLOC_ATOMIC_TRY);
320 CERROR("Can't allocate incoming request descriptor: "
321 "Dropping %s RPC from %s\n",
323 libcfs_id2str(ev->initiator));
328 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
329 * flags are reset and scalars are zero. We only set the message
330 * size to non-zero if this was a successful receive. */
331 req->rq_xid = ev->match_bits;
332 req->rq_reqbuf = ev->md.start + ev->offset;
333 if (ev->type == LNET_EVENT_PUT && ev->status == 0)
334 req->rq_reqdata_len = ev->mlength;
335 do_gettimeofday(&req->rq_arrival_time);
336 req->rq_peer = ev->initiator;
337 req->rq_self = ev->target.nid;
339 req->rq_phase = RQ_PHASE_NEW;
340 spin_lock_init(&req->rq_lock);
341 INIT_LIST_HEAD(&req->rq_timed_list);
342 INIT_LIST_HEAD(&req->rq_exp_list);
343 atomic_set(&req->rq_refcount, 1);
344 if (ev->type == LNET_EVENT_PUT)
345 CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
346 req, req->rq_xid, ev->mlength);
348 CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
350 spin_lock(&svcpt->scp_lock);
352 ptlrpc_req_add_history(svcpt, req);
355 svcpt->scp_nrqbds_posted--;
356 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
357 svcpt->scp_nrqbds_posted);
359 /* Normally, don't complain about 0 buffers posted; LNET won't
360 * drop incoming reqs since we set the portal lazy */
361 if (test_req_buffer_pressure &&
362 ev->type != LNET_EVENT_UNLINK &&
363 svcpt->scp_nrqbds_posted == 0)
364 CWARN("All %s request buffers busy\n",
367 /* req takes over the network's ref on rqbd */
369 /* req takes a ref on rqbd */
370 rqbd->rqbd_refcount++;
373 list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
374 svcpt->scp_nreqs_incoming++;
376 /* NB everything can disappear under us once the request
377 * has been queued and we unlock, so do the wake now... */
378 wake_up(&svcpt->scp_waitq);
380 spin_unlock(&svcpt->scp_lock);
385 * Server's outgoing reply callback
387 void reply_out_callback(lnet_event_t *ev)
389 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
390 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
391 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
394 LASSERT (ev->type == LNET_EVENT_SEND ||
395 ev->type == LNET_EVENT_ACK ||
396 ev->type == LNET_EVENT_UNLINK);
398 if (!rs->rs_difficult) {
399 /* 'Easy' replies have no further processing so I drop the
400 * net's ref on 'rs' */
401 LASSERT (ev->unlinked);
402 ptlrpc_rs_decref(rs);
407 LASSERT (rs->rs_on_net);
410 /* Last network callback. The net's ref on 'rs' stays put
411 * until ptlrpc_handle_rs() is done with it */
412 spin_lock(&svcpt->scp_rep_lock);
413 spin_lock(&rs->rs_lock);
416 if (!rs->rs_no_ack ||
418 rs->rs_export->exp_obd->obd_last_committed)
419 ptlrpc_schedule_difficult_reply(rs);
421 spin_unlock(&rs->rs_lock);
422 spin_unlock(&svcpt->scp_rep_lock);
427 #ifdef HAVE_SERVER_SUPPORT
429 * Server's bulk completion callback
431 void server_bulk_callback (lnet_event_t *ev)
433 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
434 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
437 LASSERT(ev->type == LNET_EVENT_SEND ||
438 ev->type == LNET_EVENT_UNLINK ||
439 (desc->bd_type == BULK_PUT_SOURCE &&
440 ev->type == LNET_EVENT_ACK) ||
441 (desc->bd_type == BULK_GET_SINK &&
442 ev->type == LNET_EVENT_REPLY));
444 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
445 "event type %d, status %d, desc %p\n",
446 ev->type, ev->status, desc);
448 spin_lock(&desc->bd_lock);
450 LASSERT(desc->bd_md_count > 0);
452 if ((ev->type == LNET_EVENT_ACK ||
453 ev->type == LNET_EVENT_REPLY) &&
455 /* We heard back from the peer, so even if we get this
456 * before the SENT event (oh yes we can), we know we
457 * read/wrote the peer buffer and how much... */
458 desc->bd_nob_transferred += ev->mlength;
459 desc->bd_sender = ev->sender;
463 desc->bd_failure = 1;
467 /* This is the last callback no matter what... */
468 if (desc->bd_md_count == 0)
469 wake_up(&desc->bd_waitq);
472 spin_unlock(&desc->bd_lock);
477 static void ptlrpc_master_callback(lnet_event_t *ev)
479 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
480 void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
482 /* Honestly, it's best to find out early. */
483 LASSERT (cbid->cbid_arg != LP_POISON);
484 LASSERT (callback == request_out_callback ||
485 callback == reply_in_callback ||
486 callback == client_bulk_callback ||
487 callback == request_in_callback ||
488 callback == reply_out_callback
489 #ifdef HAVE_SERVER_SUPPORT
490 || callback == server_bulk_callback
497 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
498 lnet_process_id_t *peer, lnet_nid_t *self)
501 __u32 best_order = 0;
504 int portals_compatibility;
510 portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
512 peer->pid = LNET_PID_LUSTRE;
514 /* Choose the matching UUID that's closest */
515 while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
516 dist = LNetDist(dst_nid, &src_nid, &order);
520 if (dist == 0) { /* local! use loopback LND */
521 peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
528 (dist == best_dist && order < best_order)) {
532 if (portals_compatibility > 1) {
533 /* Strong portals compatibility: Zero the nid's
534 * NET, so if I'm reading new config logs, or
535 * getting configured by (new) lconf I can
536 * still talk to old servers. */
537 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
538 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
546 CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
550 void ptlrpc_ni_fini(void)
552 wait_queue_head_t waitq;
553 struct l_wait_info lwi;
557 /* Wait for the event queue to become idle since there may still be
558 * messages in flight with pending events (i.e. the fire-and-forget
559 * messages == client requests and "non-difficult" server
562 for (retries = 0;; retries++) {
563 rc = LNetEQFree(ptlrpc_eq_h);
574 CWARN("Event queue still busy\n");
577 init_waitqueue_head(&waitq);
578 lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
579 l_wait_event(waitq, 0, &lwi);
586 lnet_pid_t ptl_get_pid(void)
588 return LNET_PID_LUSTRE;
591 int ptlrpc_ni_init(void)
597 CDEBUG(D_NET, "My pid is: %x\n", pid);
599 /* We're not passing any limits yet... */
600 rc = LNetNIInit(pid);
602 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
606 /* CAVEAT EMPTOR: how we process portals events is _radically_
607 * different depending on... */
608 /* kernel LNet calls our master callback when there are new event,
609 * because we are guaranteed to get every event via callback,
610 * so we just set EQ size to 0 to avoid overhread of serializing
611 * enqueue/dequeue operations in LNet. */
612 rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
616 CERROR ("Failed to allocate event queue: %d\n", rc);
623 int ptlrpc_init_portals(void)
625 int rc = ptlrpc_ni_init();
628 CERROR("network initialisation failed\n");
631 rc = ptlrpcd_addref();
635 CERROR("rpcd initialisation failed\n");
640 void ptlrpc_exit_portals(void)