4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_RPC
38 # include <liblustre.h>
40 # include <libcfs/libcfs.h>
42 # include <linux/kernel.h>
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_sec.h>
49 #include "ptlrpc_internal.h"
51 lnet_handle_eq_t ptlrpc_eq_h;
54 * Client's outgoing request callback
56 void request_out_callback(lnet_event_t *ev)
58 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
59 struct ptlrpc_request *req = cbid->cbid_arg;
62 LASSERT (ev->type == LNET_EVENT_SEND ||
63 ev->type == LNET_EVENT_UNLINK);
64 LASSERT (ev->unlinked);
66 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
68 sptlrpc_request_out_callback(req);
69 req->rq_real_sent = cfs_time_current_sec();
71 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
73 /* Failed send: make it seem like the reply timed out, just
74 * like failing sends in client.c does currently... */
76 cfs_spin_lock(&req->rq_lock);
78 cfs_spin_unlock(&req->rq_lock);
80 ptlrpc_client_wake_req(req);
83 ptlrpc_req_finished(req);
89 * Client's incoming reply callback
91 void reply_in_callback(lnet_event_t *ev)
93 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
94 struct ptlrpc_request *req = cbid->cbid_arg;
97 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
99 LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
100 LASSERT (ev->md.start == req->rq_repbuf);
101 LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
102 /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
103 for adaptive timeouts' early reply. */
104 LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
106 cfs_spin_lock(&req->rq_lock);
108 req->rq_receiving_reply = 0;
111 req->rq_must_unlink = 0;
116 if (ev->type == LNET_EVENT_UNLINK) {
117 LASSERT(ev->unlinked);
118 DEBUG_REQ(D_NET, req, "unlink");
122 if (ev->mlength < ev->rlength ) {
123 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
124 req->rq_replen, ev->rlength, ev->offset);
125 req->rq_reply_truncate = 1;
127 req->rq_status = -EOVERFLOW;
128 req->rq_nob_received = ev->rlength + ev->offset;
132 if ((ev->offset == 0) &&
133 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
135 DEBUG_REQ(D_ADAPTTO, req,
136 "Early reply received: mlen=%u offset=%d replen=%d "
137 "replied=%d unlinked=%d", ev->mlength, ev->offset,
138 req->rq_replen, req->rq_replied, ev->unlinked);
140 req->rq_early_count++; /* number received, client side */
142 if (req->rq_replied) /* already got the real reply */
146 req->rq_reply_off = ev->offset;
147 req->rq_nob_received = ev->mlength;
148 /* And we're still receiving */
149 req->rq_receiving_reply = 1;
152 req->rq_rep_swab_mask = 0;
154 req->rq_reply_off = ev->offset;
155 req->rq_nob_received = ev->mlength;
156 /* LNetMDUnlink can't be called under the LNET_LOCK,
157 so we must unlink in ptlrpc_unregister_reply */
158 DEBUG_REQ(D_INFO, req,
159 "reply in flags=%x mlen=%u offset=%d replen=%d",
160 lustre_msg_get_flags(req->rq_reqmsg),
161 ev->mlength, ev->offset, req->rq_replen);
164 req->rq_import->imp_last_reply_time = cfs_time_current_sec();
167 /* NB don't unlock till after wakeup; req can disappear under us
168 * since we don't have our own ref */
169 ptlrpc_client_wake_req(req);
170 cfs_spin_unlock(&req->rq_lock);
175 * Client's bulk has been written/read
177 void client_bulk_callback (lnet_event_t *ev)
179 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
180 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
181 struct ptlrpc_request *req;
184 LASSERT ((desc->bd_type == BULK_PUT_SINK &&
185 ev->type == LNET_EVENT_PUT) ||
186 (desc->bd_type == BULK_GET_SOURCE &&
187 ev->type == LNET_EVENT_GET) ||
188 ev->type == LNET_EVENT_UNLINK);
189 LASSERT (ev->unlinked);
191 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
194 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
197 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
198 "event type %d, status %d, desc %p\n",
199 ev->type, ev->status, desc);
201 cfs_spin_lock(&desc->bd_lock);
203 LASSERT(desc->bd_network_rw);
204 desc->bd_network_rw = 0;
206 if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
207 desc->bd_success = 1;
208 desc->bd_nob_transferred = ev->mlength;
209 desc->bd_sender = ev->sender;
211 /* start reconnect and resend if network error hit */
212 cfs_spin_lock(&req->rq_lock);
214 cfs_spin_unlock(&req->rq_lock);
217 /* release the encrypted pages for write */
218 if (desc->bd_req->rq_bulk_write)
219 sptlrpc_enc_pool_put_pages(desc);
221 /* NB don't unlock till after wakeup; desc can disappear under us
223 ptlrpc_client_wake_req(req);
225 cfs_spin_unlock(&desc->bd_lock);
230 * We will have percpt request history list for ptlrpc service in upcoming
231 * patches because we don't want to be serialized by current per-service
232 * history operations. So we require history ID can (somehow) show arriving
233 * order w/o grabbing global lock, and user can sort them in userspace.
235 * This is how we generate history ID for ptlrpc_request:
236 * ----------------------------------------------------
237 * | 32 bits | 16 bits | (16 - X)bits | X bits |
238 * ----------------------------------------------------
239 * | seconds | usec / 16 | sequence | CPT id |
240 * ----------------------------------------------------
242 * it might not be precise but should be good enough.
245 #define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
247 #define REQS_SEC_SHIFT 32
248 #define REQS_USEC_SHIFT 16
249 #define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
251 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
252 struct ptlrpc_request *req)
254 __u64 sec = req->rq_arrival_time.tv_sec;
255 __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
258 /* set sequence ID for request and add it to history list,
259 * it must be called with hold svcpt::scp_lock */
261 new_seq = (sec << REQS_SEC_SHIFT) |
262 (usec << REQS_USEC_SHIFT) | svcpt->scp_cpt;
263 if (new_seq > svcpt->scp_hist_seq) {
264 /* This handles the initial case of scp_hist_seq == 0 or
265 * we just jumped into a new time window */
266 svcpt->scp_hist_seq = new_seq;
268 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
269 /* NB: increase sequence number in current usec bucket,
270 * however, it's possible that we used up all bits for
271 * sequence and jumped into the next usec bucket (future time),
272 * then we hope there will be less RPCs per bucket at some
273 * point, and sequence will catch up again */
274 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
275 new_seq = svcpt->scp_hist_seq;
278 req->rq_history_seq = new_seq;
280 cfs_list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
284 * Server's incoming request callback
286 void request_in_callback(lnet_event_t *ev)
288 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
289 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
290 struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
291 struct ptlrpc_service *service = svcpt->scp_service;
292 struct ptlrpc_request *req;
295 LASSERT (ev->type == LNET_EVENT_PUT ||
296 ev->type == LNET_EVENT_UNLINK);
297 LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
298 LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
299 rqbd->rqbd_buffer + service->srv_buf_size);
301 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
302 "event type %d, status %d, service %s\n",
303 ev->type, ev->status, service->srv_name);
306 /* If this is the last request message to fit in the
307 * request buffer we can use the request object embedded in
308 * rqbd. Note that if we failed to allocate a request,
309 * we'd have to re-post the rqbd, which we can't do in this
311 req = &rqbd->rqbd_req;
312 memset(req, 0, sizeof (*req));
314 LASSERT (ev->type == LNET_EVENT_PUT);
315 if (ev->status != 0) {
316 /* We moaned above already... */
319 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
321 CERROR("Can't allocate incoming request descriptor: "
322 "Dropping %s RPC from %s\n",
324 libcfs_id2str(ev->initiator));
329 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
330 * flags are reset and scalars are zero. We only set the message
331 * size to non-zero if this was a successful receive. */
332 req->rq_xid = ev->match_bits;
333 req->rq_reqbuf = ev->md.start + ev->offset;
334 if (ev->type == LNET_EVENT_PUT && ev->status == 0)
335 req->rq_reqdata_len = ev->mlength;
336 cfs_gettimeofday(&req->rq_arrival_time);
337 req->rq_peer = ev->initiator;
338 req->rq_self = ev->target.nid;
340 req->rq_phase = RQ_PHASE_NEW;
341 cfs_spin_lock_init(&req->rq_lock);
342 CFS_INIT_LIST_HEAD(&req->rq_timed_list);
343 CFS_INIT_LIST_HEAD(&req->rq_exp_list);
344 cfs_atomic_set(&req->rq_refcount, 1);
345 if (ev->type == LNET_EVENT_PUT)
346 CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
347 req, req->rq_xid, ev->mlength);
349 CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
351 cfs_spin_lock(&svcpt->scp_lock);
353 ptlrpc_req_add_history(svcpt, req);
356 svcpt->scp_nrqbds_posted--;
357 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
358 svcpt->scp_nrqbds_posted);
360 /* Normally, don't complain about 0 buffers posted; LNET won't
361 * drop incoming reqs since we set the portal lazy */
362 if (test_req_buffer_pressure &&
363 ev->type != LNET_EVENT_UNLINK &&
364 svcpt->scp_nrqbds_posted == 0)
365 CWARN("All %s request buffers busy\n",
368 /* req takes over the network's ref on rqbd */
370 /* req takes a ref on rqbd */
371 rqbd->rqbd_refcount++;
374 cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
375 svcpt->scp_nreqs_incoming++;
377 /* NB everything can disappear under us once the request
378 * has been queued and we unlock, so do the wake now... */
379 cfs_waitq_signal(&svcpt->scp_waitq);
381 cfs_spin_unlock(&svcpt->scp_lock);
386 * Server's outgoing reply callback
388 void reply_out_callback(lnet_event_t *ev)
390 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
391 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
392 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
395 LASSERT (ev->type == LNET_EVENT_SEND ||
396 ev->type == LNET_EVENT_ACK ||
397 ev->type == LNET_EVENT_UNLINK);
399 if (!rs->rs_difficult) {
400 /* 'Easy' replies have no further processing so I drop the
401 * net's ref on 'rs' */
402 LASSERT (ev->unlinked);
403 ptlrpc_rs_decref(rs);
408 LASSERT (rs->rs_on_net);
411 /* Last network callback. The net's ref on 'rs' stays put
412 * until ptlrpc_handle_rs() is done with it */
413 cfs_spin_lock(&svcpt->scp_rep_lock);
414 cfs_spin_lock(&rs->rs_lock);
417 if (!rs->rs_no_ack ||
419 rs->rs_export->exp_obd->obd_last_committed)
420 ptlrpc_schedule_difficult_reply(rs);
422 cfs_spin_unlock(&rs->rs_lock);
423 cfs_spin_unlock(&svcpt->scp_rep_lock);
428 #ifdef HAVE_SERVER_SUPPORT
430 * Server's bulk completion callback
432 void server_bulk_callback (lnet_event_t *ev)
434 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
435 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
438 LASSERT (ev->type == LNET_EVENT_SEND ||
439 ev->type == LNET_EVENT_UNLINK ||
440 (desc->bd_type == BULK_PUT_SOURCE &&
441 ev->type == LNET_EVENT_ACK) ||
442 (desc->bd_type == BULK_GET_SINK &&
443 ev->type == LNET_EVENT_REPLY));
445 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
446 "event type %d, status %d, desc %p\n",
447 ev->type, ev->status, desc);
449 cfs_spin_lock(&desc->bd_lock);
451 if ((ev->type == LNET_EVENT_ACK ||
452 ev->type == LNET_EVENT_REPLY) &&
454 /* We heard back from the peer, so even if we get this
455 * before the SENT event (oh yes we can), we know we
456 * read/wrote the peer buffer and how much... */
457 desc->bd_success = 1;
458 desc->bd_nob_transferred = ev->mlength;
459 desc->bd_sender = ev->sender;
463 /* This is the last callback no matter what... */
464 desc->bd_network_rw = 0;
465 cfs_waitq_signal(&desc->bd_waitq);
468 cfs_spin_unlock(&desc->bd_lock);
473 static void ptlrpc_master_callback(lnet_event_t *ev)
475 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
476 void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
478 /* Honestly, it's best to find out early. */
479 LASSERT (cbid->cbid_arg != LP_POISON);
480 LASSERT (callback == request_out_callback ||
481 callback == reply_in_callback ||
482 callback == client_bulk_callback ||
483 callback == request_in_callback ||
484 callback == reply_out_callback
485 #ifdef HAVE_SERVER_SUPPORT
486 || callback == server_bulk_callback
493 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
494 lnet_process_id_t *peer, lnet_nid_t *self)
497 __u32 best_order = 0;
500 int portals_compatibility;
506 portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
508 peer->pid = LUSTRE_SRV_LNET_PID;
510 /* Choose the matching UUID that's closest */
511 while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
512 dist = LNetDist(dst_nid, &src_nid, &order);
516 if (dist == 0) { /* local! use loopback LND */
517 peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
524 (dist == best_dist && order < best_order)) {
528 if (portals_compatibility > 1) {
529 /* Strong portals compatibility: Zero the nid's
530 * NET, so if I'm reading new config logs, or
531 * getting configured by (new) lconf I can
532 * still talk to old servers. */
533 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
534 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
542 CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
546 void ptlrpc_ni_fini(void)
549 struct l_wait_info lwi;
553 /* Wait for the event queue to become idle since there may still be
554 * messages in flight with pending events (i.e. the fire-and-forget
555 * messages == client requests and "non-difficult" server
558 for (retries = 0;; retries++) {
559 rc = LNetEQFree(ptlrpc_eq_h);
570 CWARN("Event queue still busy\n");
573 cfs_waitq_init(&waitq);
574 lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
575 l_wait_event(waitq, 0, &lwi);
582 lnet_pid_t ptl_get_pid(void)
589 pid = LUSTRE_SRV_LNET_PID;
594 int ptlrpc_ni_init(void)
600 CDEBUG(D_NET, "My pid is: %x\n", pid);
602 /* We're not passing any limits yet... */
603 rc = LNetNIInit(pid);
605 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
609 /* CAVEAT EMPTOR: how we process portals events is _radically_
610 * different depending on... */
612 /* kernel LNet calls our master callback when there are new event,
613 * because we are guaranteed to get every event via callback,
614 * so we just set EQ size to 0 to avoid overhread of serializing
615 * enqueue/dequeue operations in LNet. */
616 rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
618 /* liblustre calls the master callback when it removes events from the
619 * event queue. The event queue has to be big enough not to drop
621 rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
626 CERROR ("Failed to allocate event queue: %d\n", rc);
633 CFS_LIST_HEAD(liblustre_wait_callbacks);
634 CFS_LIST_HEAD(liblustre_idle_callbacks);
635 void *liblustre_services_callback;
638 liblustre_register_waitidle_callback (cfs_list_t *callback_list,
640 int (*fn)(void *arg), void *arg)
642 struct liblustre_wait_callback *llwc;
644 OBD_ALLOC(llwc, sizeof(*llwc));
645 LASSERT (llwc != NULL);
647 llwc->llwc_name = name;
649 llwc->llwc_arg = arg;
650 cfs_list_add_tail(&llwc->llwc_list, callback_list);
656 liblustre_deregister_waitidle_callback (void *opaque)
658 struct liblustre_wait_callback *llwc = opaque;
660 cfs_list_del(&llwc->llwc_list);
661 OBD_FREE(llwc, sizeof(*llwc));
665 liblustre_register_wait_callback (const char *name,
666 int (*fn)(void *arg), void *arg)
668 return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
673 liblustre_deregister_wait_callback (void *opaque)
675 liblustre_deregister_waitidle_callback(opaque);
679 liblustre_register_idle_callback (const char *name,
680 int (*fn)(void *arg), void *arg)
682 return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
687 liblustre_deregister_idle_callback (void *opaque)
689 liblustre_deregister_waitidle_callback(opaque);
693 liblustre_check_events (int timeout)
700 rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
704 LASSERT (rc == -EOVERFLOW || rc == 1);
706 /* liblustre: no asynch callback so we can't afford to miss any
708 if (rc == -EOVERFLOW) {
709 CERROR ("Dropped an event!!!\n");
713 ptlrpc_master_callback (&ev);
717 int liblustre_waiting = 0;
720 liblustre_wait_event (int timeout)
723 struct liblustre_wait_callback *llwc;
724 int found_something = 0;
726 /* single threaded recursion check... */
727 liblustre_waiting = 1;
730 /* Deal with all pending events */
731 while (liblustre_check_events(0))
734 /* Give all registered callbacks a bite at the cherry */
735 cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
736 llwc = cfs_list_entry(tmp,
737 struct liblustre_wait_callback,
740 if (llwc->llwc_fn(llwc->llwc_arg))
744 if (found_something || timeout == 0)
747 /* Nothing so far, but I'm allowed to block... */
748 found_something = liblustre_check_events(timeout);
749 if (!found_something) /* still nothing */
750 break; /* I timed out */
753 liblustre_waiting = 0;
755 return found_something;
759 liblustre_wait_idle(void)
761 static int recursed = 0;
764 struct liblustre_wait_callback *llwc;
771 liblustre_wait_event(0);
775 cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
776 llwc = cfs_list_entry(tmp,
777 struct liblustre_wait_callback,
780 if (!llwc->llwc_fn(llwc->llwc_arg)) {
791 #endif /* __KERNEL__ */
793 int ptlrpc_init_portals(void)
795 int rc = ptlrpc_ni_init();
798 CERROR("network initialisation failed\n");
802 liblustre_services_callback =
803 liblustre_register_wait_callback("liblustre_check_services",
804 &liblustre_check_services,
806 cfs_init_completion_module(liblustre_wait_event);
808 rc = ptlrpcd_addref();
812 CERROR("rpcd initialisation failed\n");
814 liblustre_deregister_wait_callback(liblustre_services_callback);
820 void ptlrpc_exit_portals(void)
823 liblustre_deregister_wait_callback(liblustre_services_callback);