1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
26 #include <linux/module.h>
28 #include <liblustre.h>
30 #include <linux/obd_class.h>
31 #include <linux/lustre_net.h>
32 #include "ptlrpc_internal.h"
34 struct ptlrpc_ni ptlrpc_interfaces[NAL_MAX_NR];
35 int ptlrpc_ninterfaces;
38 * Client's outgoing request callback
40 void request_out_callback(ptl_event_t *ev)
42 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
43 struct ptlrpc_request *req = cbid->cbid_arg;
47 LASSERT (ev->type == PTL_EVENT_SEND_END ||
48 ev->type == PTL_EVENT_UNLINK);
49 LASSERT (ev->unlinked);
51 DEBUG_REQ((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, req,
52 "type %d, status %d", ev->type, ev->ni_fail_type);
54 if (ev->type == PTL_EVENT_UNLINK ||
55 ev->ni_fail_type != PTL_NI_OK) {
57 /* Failed send: make it seem like the reply timed out, just
58 * like failing sends in client.c does currently... */
60 spin_lock_irqsave(&req->rq_lock, flags);
62 spin_unlock_irqrestore(&req->rq_lock, flags);
64 ptlrpc_wake_client_req(req);
67 /* this balances the atomic_inc in ptl_send_rpc() */
68 ptlrpc_req_finished(req);
73 * Client's incoming reply callback
75 void reply_in_callback(ptl_event_t *ev)
77 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
78 struct ptlrpc_request *req = cbid->cbid_arg;
82 LASSERT (ev->type == PTL_EVENT_PUT_END ||
83 ev->type == PTL_EVENT_UNLINK);
84 LASSERT (ev->unlinked);
85 LASSERT (ev->mem_desc.start == req->rq_repmsg);
86 LASSERT (ev->offset == 0);
87 LASSERT (ev->mlength <= req->rq_replen);
89 DEBUG_REQ((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR, req,
90 "type %d, status %d", ev->type, ev->ni_fail_type);
92 spin_lock_irqsave (&req->rq_lock, flags);
94 LASSERT (req->rq_receiving_reply);
95 req->rq_receiving_reply = 0;
97 if (ev->type == PTL_EVENT_PUT_END &&
98 ev->ni_fail_type == PTL_NI_OK) {
100 req->rq_nob_received = ev->mlength;
103 /* NB don't unlock till after wakeup; req can disappear under us
104 * since we don't have our own ref */
105 ptlrpc_wake_client_req(req);
107 spin_unlock_irqrestore (&req->rq_lock, flags);
112 * Client's bulk has been written/read
114 void client_bulk_callback (ptl_event_t *ev)
116 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
117 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
121 LASSERT ((desc->bd_type == BULK_PUT_SINK &&
122 ev->type == PTL_EVENT_PUT_END) ||
123 (desc->bd_type == BULK_GET_SOURCE &&
124 ev->type == PTL_EVENT_GET_END) ||
125 ev->type == PTL_EVENT_UNLINK);
126 LASSERT (ev->unlinked);
128 CDEBUG((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR,
129 "event type %d, status %d, desc %p\n",
130 ev->type, ev->ni_fail_type, desc);
132 spin_lock_irqsave (&desc->bd_lock, flags);
134 LASSERT(desc->bd_network_rw);
135 desc->bd_network_rw = 0;
137 if (ev->type != PTL_EVENT_UNLINK &&
138 ev->ni_fail_type == PTL_NI_OK) {
139 desc->bd_success = 1;
140 desc->bd_nob_transferred = ev->mlength;
143 /* NB don't unlock till after wakeup; desc can disappear under us
145 ptlrpc_wake_client_req(desc->bd_req);
147 spin_unlock_irqrestore (&desc->bd_lock, flags);
152 * Server's incoming request callback
154 void request_in_callback(ptl_event_t *ev)
156 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
157 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
158 struct ptlrpc_srv_ni *srv_ni = rqbd->rqbd_srv_ni;
159 struct ptlrpc_service *service = srv_ni->sni_service;
160 struct ptlrpc_request *req;
164 LASSERT (ev->type == PTL_EVENT_PUT_END ||
165 ev->type == PTL_EVENT_UNLINK);
166 LASSERT ((char *)ev->mem_desc.start >= rqbd->rqbd_buffer);
167 LASSERT ((char *)ev->mem_desc.start + ev->offset + ev->mlength <=
168 rqbd->rqbd_buffer + service->srv_buf_size);
170 CDEBUG((ev->ni_fail_type == PTL_OK) ? D_NET : D_ERROR,
171 "event type %d, status %d, service %s\n",
172 ev->type, ev->ni_fail_type, service->srv_name);
175 /* If this is the last request message to fit in the
176 * request buffer we can use the request object embedded in
177 * rqbd. Note that if we failed to allocate a request,
178 * we'd have to re-post the rqbd, which we can't do in this
180 req = &rqbd->rqbd_req;
181 memset(req, 0, sizeof (*req));
183 LASSERT (ev->type == PTL_EVENT_PUT_END);
184 if (ev->ni_fail_type != PTL_NI_OK) {
185 /* We moaned above already... */
188 OBD_ALLOC_GFP(req, sizeof(*req), GFP_ATOMIC);
190 CERROR("Can't allocate incoming request descriptor: "
191 "Dropping %s RPC from "LPX64"\n",
192 service->srv_name, ev->initiator.nid);
197 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
198 * flags are reset and scalars are zero. We only set the message
199 * size to non-zero if this was a successful receive. */
200 req->rq_xid = ev->match_bits;
201 req->rq_reqmsg = ev->mem_desc.start + ev->offset;
202 if (ev->type == PTL_EVENT_PUT_END &&
203 ev->ni_fail_type == PTL_NI_OK)
204 req->rq_reqlen = ev->mlength;
205 do_gettimeofday(&req->rq_arrival_time);
206 req->rq_peer.peer_nid = ev->initiator.nid;
207 req->rq_peer.peer_ni = rqbd->rqbd_srv_ni->sni_ni;
210 spin_lock_irqsave (&service->srv_lock, flags);
213 srv_ni->sni_nrqbd_receiving--;
214 if (ev->type != PTL_EVENT_UNLINK &&
215 srv_ni->sni_nrqbd_receiving == 0) {
216 /* This service is off-air on this interface because
217 * all its request buffers are busy. Portals will
218 * start dropping incoming requests until more buffers
219 * get posted. NB don't moan if it's because we're
220 * tearing down the service. */
221 CWARN("All %s %s request buffers busy\n",
222 service->srv_name, srv_ni->sni_ni->pni_name);
224 /* req takes over the network's ref on rqbd */
226 /* req takes a ref on rqbd */
227 rqbd->rqbd_refcount++;
230 list_add_tail(&req->rq_list, &service->srv_request_queue);
231 service->srv_n_queued_reqs++;
233 /* NB everything can disappear under us once the request
234 * has been queued and we unlock, so do the wake now... */
235 wake_up(&service->srv_waitq);
237 spin_unlock_irqrestore(&service->srv_lock, flags);
242 * Server's outgoing reply callback
244 void reply_out_callback(ptl_event_t *ev)
246 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
247 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
248 struct ptlrpc_srv_ni *sni = rs->rs_srv_ni;
249 struct ptlrpc_service *svc = sni->sni_service;
253 LASSERT (ev->type == PTL_EVENT_SEND_END ||
254 ev->type == PTL_EVENT_ACK ||
255 ev->type == PTL_EVENT_UNLINK);
257 if (!rs->rs_difficult) {
258 /* I'm totally responsible for freeing "easy" replies */
259 LASSERT (ev->unlinked);
260 lustre_free_reply_state (rs);
261 atomic_dec (&svc->srv_outstanding_replies);
266 LASSERT (rs->rs_on_net);
269 /* Last network callback */
270 spin_lock_irqsave (&svc->srv_lock, flags);
272 ptlrpc_schedule_difficult_reply (rs);
273 spin_unlock_irqrestore (&svc->srv_lock, flags);
280 * Server's bulk completion callback
282 void server_bulk_callback (ptl_event_t *ev)
284 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
285 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
289 LASSERT (ev->type == PTL_EVENT_SEND_END ||
290 ev->type == PTL_EVENT_UNLINK ||
291 (desc->bd_type == BULK_PUT_SOURCE &&
292 ev->type == PTL_EVENT_ACK) ||
293 (desc->bd_type == BULK_GET_SINK &&
294 ev->type == PTL_EVENT_REPLY_END));
296 CDEBUG((ev->ni_fail_type == PTL_NI_OK) ? D_NET : D_ERROR,
297 "event type %d, status %d, desc %p\n",
298 ev->type, ev->ni_fail_type, desc);
300 spin_lock_irqsave (&desc->bd_lock, flags);
302 if ((ev->type == PTL_EVENT_ACK ||
303 ev->type == PTL_EVENT_REPLY_END) &&
304 ev->ni_fail_type == PTL_NI_OK) {
305 /* We heard back from the peer, so even if we get this
306 * before the SENT event (oh yes we can), we know we
307 * read/wrote the peer buffer and how much... */
308 desc->bd_success = 1;
309 desc->bd_nob_transferred = ev->mlength;
313 /* This is the last callback no matter what... */
314 desc->bd_network_rw = 0;
315 wake_up(&desc->bd_waitq);
318 spin_unlock_irqrestore (&desc->bd_lock, flags);
322 static void ptlrpc_master_callback(ptl_event_t *ev)
324 struct ptlrpc_cb_id *cbid = ev->mem_desc.user_ptr;
325 void (*callback)(ptl_event_t *ev) = cbid->cbid_fn;
327 /* Honestly, it's best to find out early. */
328 LASSERT (cbid->cbid_arg != LP_POISON);
329 LASSERT (callback == request_out_callback ||
330 callback == reply_in_callback ||
331 callback == client_bulk_callback ||
332 callback == request_in_callback ||
333 callback == reply_out_callback ||
334 callback == server_bulk_callback);
339 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid, struct ptlrpc_peer *peer)
341 struct ptlrpc_ni *pni;
345 char str[PTL_NALFMT_SIZE];
346 int rc = lustre_uuid_to_peer(uuid->uuid,
347 &peer_nal, &peer_nid);
351 for (i = 0; i < ptlrpc_ninterfaces; i++) {
352 pni = &ptlrpc_interfaces[i];
354 if (pni->pni_number == peer_nal) {
355 peer->peer_nid = peer_nid;
361 CERROR("Can't find ptlrpc interface for NAL %d, NID %s\n",
362 peer_nal, portals_nid2str(peer_nal, peer_nid, str));
366 void ptlrpc_ni_fini(struct ptlrpc_ni *pni)
368 wait_queue_head_t waitq;
369 struct l_wait_info lwi;
373 /* Wait for the event queue to become idle since there may still be
374 * messages in flight with pending events (i.e. the fire-and-forget
375 * messages == client requests and "non-difficult" server
378 for (retries = 0;; retries++) {
379 rc = PtlEQFree(pni->pni_eq_h);
385 PtlNIFini(pni->pni_ni_h);
390 CWARN("Event queue for %s still busy\n",
394 init_waitqueue_head(&waitq);
395 lwi = LWI_TIMEOUT(2*HZ, NULL, NULL);
396 l_wait_event(waitq, 0, &lwi);
403 int ptlrpc_ni_init(int number, char *name, struct ptlrpc_ni *pni)
409 /* We're not passing any limits yet... */
410 rc = PtlNIInit(number, 0, NULL, NULL, &nih);
411 if (rc != PTL_OK && rc != PTL_IFACE_DUP) {
412 CDEBUG (D_NET, "Can't init network interface %s: %d\n",
417 PtlSnprintHandle(str, sizeof(str), nih);
418 CDEBUG (D_NET, "init %d %s: %s\n", number, name, str);
420 pni->pni_name = name;
421 pni->pni_number = number;
424 pni->pni_eq_h = PTL_INVALID_HANDLE;
426 /* CAVEAT EMPTOR: how we process portals events is _radically_
427 * different depending on... */
429 /* kernel portals calls our master callback when events are added to
430 * the event queue. In fact lustre never pulls events off this queue,
431 * so it's only sized for some debug history. */
432 rc = PtlEQAlloc(pni->pni_ni_h, 1024, ptlrpc_master_callback,
435 /* liblustre calls the master callback when it removes events from the
436 * event queue. The event queue has to be big enough not to drop
439 /* cray portals implements a non-standard callback to notify us there
440 * are buffered events even when the app is not doing a filesystem
442 rc = PtlEQAlloc(pni->pni_ni_h, 10240, cray_portals_callback,
445 rc = PtlEQAlloc(pni->pni_ni_h, 10240, PTL_EQ_HANDLER_NONE,
450 GOTO (fail, rc = -ENOMEM);
454 CERROR ("Failed to initialise network interface %s: %d\n",
457 /* OK to do complete teardown since we invalidated the handles above */
458 ptlrpc_ni_fini (pni);
463 LIST_HEAD(liblustre_wait_callbacks);
464 void *liblustre_services_callback;
467 liblustre_register_wait_callback (int (*fn)(void *arg), void *arg)
469 struct liblustre_wait_callback *llwc;
471 OBD_ALLOC(llwc, sizeof(*llwc));
472 LASSERT (llwc != NULL);
475 llwc->llwc_arg = arg;
476 list_add_tail(&llwc->llwc_list, &liblustre_wait_callbacks);
482 liblustre_deregister_wait_callback (void *opaque)
484 struct liblustre_wait_callback *llwc = opaque;
486 list_del(&llwc->llwc_list);
487 OBD_FREE(llwc, sizeof(*llwc));
491 liblustre_check_events (int timeout)
498 rc = PtlEQPoll(&ptlrpc_interfaces[0].pni_eq_h, 1, timeout * 1000,
500 if (rc == PTL_EQ_EMPTY)
503 LASSERT (rc == PTL_EQ_DROPPED || rc == PTL_OK);
505 /* liblustre: no asynch callback so we can't affort to miss any
507 if (rc == PTL_EQ_DROPPED) {
508 CERROR ("Dropped an event!!!\n");
512 ptlrpc_master_callback (&ev);
516 int liblustre_waiting = 0;
519 liblustre_wait_event (int timeout)
521 struct list_head *tmp;
522 struct liblustre_wait_callback *llwc;
523 int found_something = 0;
525 /* single threaded recursion check... */
526 liblustre_waiting = 1;
529 /* Deal with all pending events */
530 while (liblustre_check_events(0))
533 /* Give all registered callbacks a bite at the cherry */
534 list_for_each(tmp, &liblustre_wait_callbacks) {
535 llwc = list_entry(tmp, struct liblustre_wait_callback,
538 if (llwc->llwc_fn(llwc->llwc_arg))
542 if (found_something || timeout == 0)
545 /* Nothing so far, but I'm allowed to block... */
546 found_something = liblustre_check_events(timeout);
547 if (!found_something) /* still nothing */
548 break; /* I timed out */
551 liblustre_waiting = 0;
553 return found_something;
557 static void cray_portals_callback(ptl_event_t *ev)
559 /* We get a callback from the client Cray portals implementation
560 * whenever anyone calls PtlEQPoll(), and an event queue with a
561 * callback handler has outstanding events.
563 * If it's not liblustre calling PtlEQPoll(), this lets us know we
564 * have outstanding events which we handle with
565 * liblustre_wait_event().
567 * Otherwise, we're already eagerly consuming events and we'd
568 * handle events out of order if we recursed. */
569 if (!liblustre_waiting)
570 liblustre_wait_event(0);
573 #endif /* __KERNEL__ */
575 int ptlrpc_default_nal(void)
577 if (ptlrpc_ninterfaces == 0)
580 return (ptlrpc_interfaces[0].pni_number);
583 int ptlrpc_init_portals(void)
585 /* Add new portals network interfaces here.
586 * Order is irrelevent! */
592 {SOCKNAL, "socknal"},
596 {SCIMACNAL, "scimacnal"},
597 {CRAY_KB_ERNAL, "cray_kb_ernal"}};
601 LASSERT(ptlrpc_ninterfaces == 0);
603 for (i = 0; i < sizeof (ptl_nis) / sizeof (ptl_nis[0]); i++) {
604 LASSERT(ptlrpc_ninterfaces < (sizeof(ptlrpc_interfaces) /
605 sizeof(ptlrpc_interfaces[0])));
607 rc = ptlrpc_ni_init(ptl_nis[i].number, ptl_nis[i].name,
608 &ptlrpc_interfaces[ptlrpc_ninterfaces]);
610 ptlrpc_ninterfaces++;
613 if (ptlrpc_ninterfaces == 0) {
614 CERROR("network initialisation failed: is a NAL module "
619 liblustre_services_callback =
620 liblustre_register_wait_callback(&liblustre_check_services, NULL);
625 void ptlrpc_exit_portals(void)
628 liblustre_deregister_wait_callback(liblustre_services_callback);
630 while (ptlrpc_ninterfaces > 0)
631 ptlrpc_ni_fini (&ptlrpc_interfaces[--ptlrpc_ninterfaces]);