4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_RPC
34 #include <libcfs/libcfs.h>
35 #include <linux/kernel.h>
36 #include <linux/delay.h>
37 #include <obd_class.h>
38 #include <lustre_net.h>
39 #include <lustre_sec.h>
40 #include "ptlrpc_internal.h"
42 lnet_handler_t ptlrpc_handler;
43 struct percpu_ref ptlrpc_pending;
46 * Client's outgoing request callback
48 void request_out_callback(struct lnet_event *ev)
50 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
51 struct ptlrpc_request *req = cbid->cbid_arg;
55 LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
56 LASSERT(ev->unlinked);
58 if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val &&
59 CFS_FAIL_CHECK_RESET(OBD_FAIL_NET_ERROR_RPC,
60 OBD_FAIL_OSP_PRECREATE_PAUSE |
62 ev->status = -ECONNABORTED;
64 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
66 /* Do not update imp_next_ping for connection request */
67 if (lustre_msg_get_opc(req->rq_reqmsg) !=
68 req->rq_import->imp_connect_op)
69 ptlrpc_pinger_sending_on_import(req->rq_import);
71 sptlrpc_request_out_callback(req);
73 spin_lock(&req->rq_lock);
74 req->rq_real_sent = ktime_get_real_seconds();
75 req->rq_req_unlinked = 1;
76 /* reply_in_callback happened before request_out_callback? */
77 if (req->rq_reply_unlinked)
80 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
81 /* Failed send: make it seem like the reply timed out, just
82 * like failing sends in client.c does currently... */
88 ptlrpc_client_wake_req(req);
90 spin_unlock(&req->rq_lock);
92 ptlrpc_req_finished(req);
97 * Client's incoming reply callback
99 void reply_in_callback(struct lnet_event *ev)
101 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
102 struct ptlrpc_request *req = cbid->cbid_arg;
105 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
107 LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
108 LASSERT(ev->md_start == req->rq_repbuf);
109 LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
110 /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
111 * for adaptive timeouts' early reply.
113 LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0);
115 spin_lock(&req->rq_lock);
117 req->rq_receiving_reply = 0;
120 req->rq_reply_unlinked = 1;
125 if (ev->type == LNET_EVENT_UNLINK) {
126 LASSERT(ev->unlinked);
127 DEBUG_REQ(D_NET, req, "unlink");
131 if (ev->mlength < ev->rlength ) {
132 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
133 req->rq_replen, ev->rlength, ev->offset);
134 req->rq_reply_truncated = 1;
136 req->rq_status = -EOVERFLOW;
137 req->rq_nob_received = ev->rlength + ev->offset;
141 if ((ev->offset == 0) &&
142 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
144 DEBUG_REQ(D_ADAPTTO, req,
145 "Early reply received, mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
146 ev->mlength, ev->offset,
147 req->rq_replen, req->rq_replied, ev->unlinked);
149 req->rq_early_count++; /* number received, client side */
151 /* already got the real reply or buffers are already unlinked */
152 if (req->rq_replied ||
153 req->rq_reply_unlinked == 1)
157 req->rq_reply_off = ev->offset;
158 req->rq_nob_received = ev->mlength;
159 /* And we're still receiving */
160 req->rq_receiving_reply = 1;
163 req->rq_rep_swab_mask = 0;
165 /* Got reply, no resend required */
167 req->rq_reply_off = ev->offset;
168 req->rq_nob_received = ev->mlength;
169 /* LNetMDUnlink can't be called under the LNET_LOCK,
170 so we must unlink in ptlrpc_unregister_reply */
171 DEBUG_REQ(D_INFO, req,
172 "reply in flags=%x mlen=%u offset=%d replen=%d",
173 lustre_msg_get_flags(req->rq_reqmsg),
174 ev->mlength, ev->offset, req->rq_replen);
177 if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
178 req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
181 /* NB don't unlock till after wakeup; req can disappear under us
182 * since we don't have our own ref */
183 ptlrpc_client_wake_req(req);
184 spin_unlock(&req->rq_lock);
189 * Client's bulk has been written/read
191 void client_bulk_callback(struct lnet_event *ev)
193 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
194 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
195 struct ptlrpc_request *req;
198 LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
199 ev->type == LNET_EVENT_PUT) ||
200 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
201 ev->type == LNET_EVENT_GET) ||
202 ev->type == LNET_EVENT_UNLINK);
203 LASSERT(ev->unlinked);
205 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
208 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
211 CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
212 "event type %d, status %d, desc %p\n",
213 ev->type, ev->status, desc);
215 spin_lock(&desc->bd_lock);
217 LASSERT(desc->bd_refs > 0);
220 if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
221 desc->bd_nob_transferred += ev->mlength;
222 desc->bd_sender = ev->sender;
224 /* start reconnect and resend if network error hit */
225 spin_lock(&req->rq_lock);
227 spin_unlock(&req->rq_lock);
228 desc->bd_failure = 1;
232 /* NB don't unlock till after wakeup; desc can disappear under us
234 if (desc->bd_refs == 0)
235 ptlrpc_client_wake_req(desc->bd_req);
237 spin_unlock(&desc->bd_lock);
242 * We will have percpt request history list for ptlrpc service in upcoming
243 * patches because we don't want to be serialized by current per-service
244 * history operations. So we require history ID can (somehow) show arriving
245 * order w/o grabbing global lock, and user can sort them in userspace.
247 * This is how we generate history ID for ptlrpc_request:
248 * ----------------------------------------------------
249 * | 32 bits | 16 bits | (16 - X)bits | X bits |
250 * ----------------------------------------------------
251 * | seconds | usec / 16 | sequence | CPT id |
252 * ----------------------------------------------------
254 * it might not be precise but should be good enough.
257 #define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
259 #define REQS_SEC_SHIFT 32
260 #define REQS_USEC_SHIFT 16
261 #define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
263 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
264 struct ptlrpc_request *req)
266 u64 sec = req->rq_arrival_time.tv_sec;
267 u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
270 /* set sequence ID for request and add it to history list,
271 * it must be called with hold svcpt::scp_lock */
273 new_seq = (sec << REQS_SEC_SHIFT) |
274 (usec << REQS_USEC_SHIFT) |
275 (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
277 if (new_seq > svcpt->scp_hist_seq) {
278 /* This handles the initial case of scp_hist_seq == 0 or
279 * we just jumped into a new time window */
280 svcpt->scp_hist_seq = new_seq;
282 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
283 /* NB: increase sequence number in current usec bucket,
284 * however, it's possible that we used up all bits for
285 * sequence and jumped into the next usec bucket (future time),
286 * then we hope there will be less RPCs per bucket at some
287 * point, and sequence will catch up again */
288 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
289 new_seq = svcpt->scp_hist_seq;
292 req->rq_history_seq = new_seq;
294 list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
298 * Server's incoming request callback
300 void request_in_callback(struct lnet_event *ev)
302 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
303 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
304 struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
305 struct ptlrpc_service *service = svcpt->scp_service;
306 struct ptlrpc_request *req;
309 LASSERT(ev->type == LNET_EVENT_PUT ||
310 ev->type == LNET_EVENT_UNLINK);
311 LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer);
312 LASSERT((char *)ev->md_start + ev->offset + ev->mlength <=
313 rqbd->rqbd_buffer + service->srv_buf_size);
315 CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
316 "event type %d, status %d, service %s\n",
317 ev->type, ev->status, service->srv_name);
320 /* If this is the last request message to fit in the
321 * request buffer we can use the request object embedded in
322 * rqbd. Note that if we failed to allocate a request,
323 * we'd have to re-post the rqbd, which we can't do in this
326 req = &rqbd->rqbd_req;
327 memset(req, 0, sizeof(*req));
329 LASSERT(ev->type == LNET_EVENT_PUT);
330 if (ev->status != 0) /* We moaned above already... */
332 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
334 CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
336 libcfs_idstr(&ev->initiator));
341 ptlrpc_srv_req_init(req);
342 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
343 * flags are reset and scalars are zero. We only set the message
344 * size to non-zero if this was a successful receive. */
345 req->rq_xid = ev->match_bits;
346 req->rq_reqbuf = ev->md_start + ev->offset;
347 if (ev->type == LNET_EVENT_PUT && ev->status == 0)
348 req->rq_reqdata_len = ev->mlength;
349 ktime_get_real_ts64(&req->rq_arrival_time);
350 /* Multi-Rail: keep track of both initiator and source NID. */
351 req->rq_peer = ev->initiator;
352 req->rq_source = ev->source;
353 req->rq_self = ev->target.nid;
355 req->rq_phase = RQ_PHASE_NEW;
356 if (ev->type == LNET_EVENT_PUT)
357 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
358 req, req->rq_xid, ev->mlength);
360 CDEBUG(D_RPCTRACE, "peer: %s (source: %s)\n",
361 libcfs_idstr(&req->rq_peer), libcfs_idstr(&req->rq_source));
363 spin_lock(&svcpt->scp_lock);
365 ptlrpc_req_add_history(svcpt, req);
368 svcpt->scp_nrqbds_posted--;
369 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
370 svcpt->scp_nrqbds_posted);
372 /* Normally, don't complain about 0 buffers posted; LNET won't
373 * drop incoming reqs since we set the portal lazy */
374 if (test_req_buffer_pressure &&
375 ev->type != LNET_EVENT_UNLINK &&
376 svcpt->scp_nrqbds_posted == 0)
377 CWARN("All %s request buffers busy\n",
380 /* req takes over the network's ref on rqbd */
382 /* req takes a ref on rqbd */
383 rqbd->rqbd_refcount++;
386 list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
387 svcpt->scp_nreqs_incoming++;
389 /* NB everything can disappear under us once the request
390 * has been queued and we unlock, so do the wake now... */
391 wake_up(&svcpt->scp_waitq);
393 spin_unlock(&svcpt->scp_lock);
398 * Server's outgoing reply callback
400 void reply_out_callback(struct lnet_event *ev)
402 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
403 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
404 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
405 bool need_schedule = false;
409 LASSERT(ev->type == LNET_EVENT_SEND ||
410 ev->type == LNET_EVENT_ACK ||
411 ev->type == LNET_EVENT_UNLINK);
413 if (!rs->rs_difficult) {
414 /* 'Easy' replies have no further processing so I drop the
417 LASSERT(ev->unlinked);
418 ptlrpc_rs_decref(rs);
423 if (ev->type == LNET_EVENT_SEND) {
424 spin_lock(&rs->rs_lock);
426 /* If transaction was committed before the SEND, and the ACK
427 * is lost, then we need to schedule so ptlrpc_hr can unlink
431 need_schedule = true;
432 spin_unlock(&rs->rs_lock);
435 if (ev->unlinked || need_schedule) {
436 LASSERT(rs->rs_sent);
438 /* Last network callback. The net's ref on 'rs' stays put
439 * until ptlrpc_handle_rs() is done with it
441 spin_lock(&svcpt->scp_rep_lock);
442 spin_lock(&rs->rs_lock);
444 rs->rs_unlinked = ev->unlinked;
445 if (!rs->rs_no_ack ||
447 rs->rs_export->exp_obd->obd_last_committed ||
448 list_empty(&rs->rs_obd_list))
449 ptlrpc_schedule_difficult_reply(rs);
451 spin_unlock(&rs->rs_lock);
452 spin_unlock(&svcpt->scp_rep_lock);
457 #ifdef HAVE_SERVER_SUPPORT
459 * Server's bulk completion callback
461 void server_bulk_callback(struct lnet_event *ev)
463 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
464 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
467 LASSERT(ev->type == LNET_EVENT_SEND ||
468 ev->type == LNET_EVENT_UNLINK ||
469 (ptlrpc_is_bulk_put_source(desc->bd_type) &&
470 ev->type == LNET_EVENT_ACK) ||
471 (ptlrpc_is_bulk_get_sink(desc->bd_type) &&
472 ev->type == LNET_EVENT_REPLY));
474 CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
475 "event type %d, status %d, desc %p\n",
476 ev->type, ev->status, desc);
478 spin_lock(&desc->bd_lock);
480 LASSERT(desc->bd_refs > 0);
482 if ((ev->type == LNET_EVENT_ACK ||
483 ev->type == LNET_EVENT_REPLY) &&
485 /* We heard back from the peer, so even if we get this
486 * before the SENT event (oh yes we can), we know we
487 * read/wrote the peer buffer and how much... */
488 desc->bd_nob_transferred += ev->mlength;
489 desc->bd_sender = ev->sender;
493 desc->bd_failure = 1;
497 /* This is the last callback no matter what... */
498 if (desc->bd_refs == 0)
499 wake_up(&desc->bd_waitq);
502 spin_unlock(&desc->bd_lock);
507 static void ptlrpc_master_callback(struct lnet_event *ev)
509 struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
510 void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
512 /* Honestly, it's best to find out early. */
513 LASSERT(cbid->cbid_arg != LP_POISON);
514 LASSERT(callback == request_out_callback ||
515 callback == reply_in_callback ||
516 callback == client_bulk_callback ||
517 callback == request_in_callback ||
518 callback == reply_out_callback
519 #ifdef HAVE_SERVER_SUPPORT
520 || callback == server_bulk_callback
526 percpu_ref_put(&ptlrpc_pending);
529 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
530 struct lnet_processid *peer,
531 struct lnet_nid *self,
535 __u32 best_order = 0;
540 struct lnet_nid dst_nid;
541 struct lnet_nid src_nid;
543 peer->pid = LNET_PID_LUSTRE;
545 /* Choose the matching UUID that's closest */
546 while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
547 if (refnet != LNET_NET_ANY &&
548 LNET_NID_NET(&dst_nid) != refnet)
551 dist = LNetDist(&dst_nid, &src_nid, &order);
555 if (dist == 0) { /* local! use loopback LND */
556 lnet_nid4_to_nid(LNET_NID_LO_0, self);
564 (dist == best_dist && order < best_order)) {
574 CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_idstr(peer));
578 static struct completion ptlrpc_done;
580 static void ptlrpc_release(struct percpu_ref *ref)
582 complete(&ptlrpc_done);
585 static void ptlrpc_ni_fini(void)
587 /* Wait for the event queue to become idle since there may still be
588 * messages in flight with pending events (i.e. the fire-and-forget
589 * messages == client requests and "non-difficult" server
592 init_completion(&ptlrpc_done);
593 percpu_ref_kill(&ptlrpc_pending);
594 wait_for_completion(&ptlrpc_done);
596 lnet_assert_handler_unused(ptlrpc_handler);
600 lnet_pid_t ptl_get_pid(void)
602 return LNET_PID_LUSTRE;
605 int ptlrpc_ni_init(void)
611 CDEBUG(D_NET, "My pid is: %x\n", pid);
613 /* We're not passing any limits yet... */
614 rc = LNetNIInit(pid);
616 CDEBUG(D_NET, "ptlrpc: Can't init network interface: rc = %d\n",
621 rc = percpu_ref_init(&ptlrpc_pending, ptlrpc_release, 0, GFP_KERNEL);
623 CERROR("ptlrpc: Can't init percpu refcount: rc = %d\n", rc);
626 /* CAVEAT EMPTOR: how we process portals events is _radically_
627 * different depending on...
629 /* kernel LNet calls our master callback when there are new event,
630 * because we are guaranteed to get every event via callback,
631 * so we just set EQ size to 0 to avoid overhread of serializing
632 * enqueue/dequeue operations in LNet. */
633 ptlrpc_handler = ptlrpc_master_callback;
637 int ptlrpc_init_portals(void)
639 int rc = ptlrpc_ni_init();
642 CERROR("network initialisation failed\n");
645 rc = ptlrpcd_addref();
649 CERROR("rpcd initialisation failed\n");
654 void ptlrpc_exit_portals(void)