Whamcloud - gitweb
LU-1422 lnet: eliminate obsolete Cray Catamount support
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  */
34
35 #define DEBUG_SUBSYSTEM S_RPC
36
37 #ifndef __KERNEL__
38 # include <liblustre.h>
39 #else
40 # include <libcfs/libcfs.h>
41 # ifdef __mips64__
42 #  include <linux/kernel.h>
43 # endif
44 #endif
45
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_sec.h>
49 #include "ptlrpc_internal.h"
50
51 lnet_handle_eq_t   ptlrpc_eq_h;
52
53 /*
54  *  Client's outgoing request callback
55  */
56 void request_out_callback(lnet_event_t *ev)
57 {
58         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
59         struct ptlrpc_request *req = cbid->cbid_arg;
60         ENTRY;
61
62         LASSERT (ev->type == LNET_EVENT_SEND ||
63                  ev->type == LNET_EVENT_UNLINK);
64         LASSERT (ev->unlinked);
65
66         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
67
68         sptlrpc_request_out_callback(req);
69         req->rq_real_sent = cfs_time_current_sec();
70
71         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
72
73                 /* Failed send: make it seem like the reply timed out, just
74                  * like failing sends in client.c does currently...  */
75
76                 cfs_spin_lock(&req->rq_lock);
77                 req->rq_net_err = 1;
78                 cfs_spin_unlock(&req->rq_lock);
79
80                 ptlrpc_client_wake_req(req);
81         }
82
83         ptlrpc_req_finished(req);
84
85         EXIT;
86 }
87
88 /*
89  * Client's incoming reply callback
90  */
91 void reply_in_callback(lnet_event_t *ev)
92 {
93         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
94         struct ptlrpc_request *req = cbid->cbid_arg;
95         ENTRY;
96
97         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
98
99         LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
100         LASSERT (ev->md.start == req->rq_repbuf);
101         LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
102         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
103            for adaptive timeouts' early reply. */
104         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
105
106         cfs_spin_lock(&req->rq_lock);
107
108         req->rq_receiving_reply = 0;
109         req->rq_early = 0;
110         if (ev->unlinked)
111                 req->rq_must_unlink = 0;
112
113         if (ev->status)
114                 goto out_wake;
115
116         if (ev->type == LNET_EVENT_UNLINK) {
117                 LASSERT(ev->unlinked);
118                 DEBUG_REQ(D_NET, req, "unlink");
119                 goto out_wake;
120         }
121
122         if (ev->mlength < ev->rlength ) {
123                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
124                        req->rq_replen, ev->rlength, ev->offset);
125                 req->rq_reply_truncate = 1;
126                 req->rq_replied = 1;
127                 req->rq_status = -EOVERFLOW;
128                 req->rq_nob_received = ev->rlength + ev->offset;
129                 goto out_wake;
130         }
131
132         if ((ev->offset == 0) &&
133             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
134                 /* Early reply */
135                 DEBUG_REQ(D_ADAPTTO, req,
136                           "Early reply received: mlen=%u offset=%d replen=%d "
137                           "replied=%d unlinked=%d", ev->mlength, ev->offset,
138                           req->rq_replen, req->rq_replied, ev->unlinked);
139
140                 req->rq_early_count++; /* number received, client side */
141
142                 if (req->rq_replied)   /* already got the real reply */
143                         goto out_wake;
144
145                 req->rq_early = 1;
146                 req->rq_reply_off = ev->offset;
147                 req->rq_nob_received = ev->mlength;
148                 /* And we're still receiving */
149                 req->rq_receiving_reply = 1;
150         } else {
151                 /* Real reply */
152                 req->rq_rep_swab_mask = 0;
153                 req->rq_replied = 1;
154                 req->rq_reply_off = ev->offset;
155                 req->rq_nob_received = ev->mlength;
156                 /* LNetMDUnlink can't be called under the LNET_LOCK,
157                    so we must unlink in ptlrpc_unregister_reply */
158                 DEBUG_REQ(D_INFO, req,
159                           "reply in flags=%x mlen=%u offset=%d replen=%d",
160                           lustre_msg_get_flags(req->rq_reqmsg),
161                           ev->mlength, ev->offset, req->rq_replen);
162         }
163
164         req->rq_import->imp_last_reply_time = cfs_time_current_sec();
165
166 out_wake:
167         /* NB don't unlock till after wakeup; req can disappear under us
168          * since we don't have our own ref */
169         ptlrpc_client_wake_req(req);
170         cfs_spin_unlock(&req->rq_lock);
171         EXIT;
172 }
173
174 /*
175  * Client's bulk has been written/read
176  */
177 void client_bulk_callback (lnet_event_t *ev)
178 {
179         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
180         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
181         ENTRY;
182
183         LASSERT ((desc->bd_type == BULK_PUT_SINK &&
184                   ev->type == LNET_EVENT_PUT) ||
185                  (desc->bd_type == BULK_GET_SOURCE &&
186                   ev->type == LNET_EVENT_GET) ||
187                  ev->type == LNET_EVENT_UNLINK);
188         LASSERT (ev->unlinked);
189
190         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
191                 ev->status = -EIO;
192
193         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
194                 ev->status = -EIO;
195
196         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
197                "event type %d, status %d, desc %p\n",
198                ev->type, ev->status, desc);
199
200         cfs_spin_lock(&desc->bd_lock);
201
202         LASSERT(desc->bd_network_rw);
203         desc->bd_network_rw = 0;
204
205         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
206                 desc->bd_success = 1;
207                 desc->bd_nob_transferred = ev->mlength;
208                 desc->bd_sender = ev->sender;
209         }
210
211         /* release the encrypted pages for write */
212         if (desc->bd_req->rq_bulk_write)
213                 sptlrpc_enc_pool_put_pages(desc);
214
215         /* NB don't unlock till after wakeup; desc can disappear under us
216          * otherwise */
217         ptlrpc_client_wake_req(desc->bd_req);
218
219         cfs_spin_unlock(&desc->bd_lock);
220         EXIT;
221 }
222
223 /*
224  * We will have percpt request history list for ptlrpc service in upcoming
225  * patches because we don't want to be serialized by current per-service
226  * history operations. So we require history ID can (somehow) show arriving
227  * order w/o grabbing global lock, and user can sort them in userspace.
228  *
229  * This is how we generate history ID for ptlrpc_request:
230  * ----------------------------------------------------
231  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
232  * ----------------------------------------------------
233  * |  seconds  | usec / 16 |   sequence    | CPT id   |
234  * ----------------------------------------------------
235  *
236  * it might not be precise but should be good enough.
237  */
238
239 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
240
241 #define REQS_SEC_SHIFT          32
242 #define REQS_USEC_SHIFT         16
243 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
244
245 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
246                                    struct ptlrpc_request *req)
247 {
248         __u64   sec = req->rq_arrival_time.tv_sec;
249         __u32   usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
250         __u64   new_seq;
251
252         /* set sequence ID for request and add it to history list,
253          * it must be called with hold svcpt::scp_lock */
254
255         new_seq = (sec << REQS_SEC_SHIFT) |
256                   (usec << REQS_USEC_SHIFT) | svcpt->scp_cpt;
257         if (new_seq > svcpt->scp_hist_seq) {
258                 /* This handles the initial case of scp_hist_seq == 0 or
259                  * we just jumped into a new time window */
260                 svcpt->scp_hist_seq = new_seq;
261         } else {
262                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
263                 /* NB: increase sequence number in current usec bucket,
264                  * however, it's possible that we used up all bits for
265                  * sequence and jumped into the next usec bucket (future time),
266                  * then we hope there will be less RPCs per bucket at some
267                  * point, and sequence will catch up again */
268                 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
269                 new_seq = svcpt->scp_hist_seq;
270         }
271
272         req->rq_history_seq = new_seq;
273
274         cfs_list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
275 }
276
277 /*
278  * Server's incoming request callback
279  */
280 void request_in_callback(lnet_event_t *ev)
281 {
282         struct ptlrpc_cb_id               *cbid = ev->md.user_ptr;
283         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
284         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
285         struct ptlrpc_service             *service = svcpt->scp_service;
286         struct ptlrpc_request             *req;
287         ENTRY;
288
289         LASSERT (ev->type == LNET_EVENT_PUT ||
290                  ev->type == LNET_EVENT_UNLINK);
291         LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
292         LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
293                  rqbd->rqbd_buffer + service->srv_buf_size);
294
295         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
296                "event type %d, status %d, service %s\n",
297                ev->type, ev->status, service->srv_name);
298
299         if (ev->unlinked) {
300                 /* If this is the last request message to fit in the
301                  * request buffer we can use the request object embedded in
302                  * rqbd.  Note that if we failed to allocate a request,
303                  * we'd have to re-post the rqbd, which we can't do in this
304                  * context. */
305                 req = &rqbd->rqbd_req;
306                 memset(req, 0, sizeof (*req));
307         } else {
308                 LASSERT (ev->type == LNET_EVENT_PUT);
309                 if (ev->status != 0) {
310                         /* We moaned above already... */
311                         return;
312                 }
313                 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
314                 if (req == NULL) {
315                         CERROR("Can't allocate incoming request descriptor: "
316                                "Dropping %s RPC from %s\n",
317                                service->srv_name,
318                                libcfs_id2str(ev->initiator));
319                         return;
320                 }
321         }
322
323         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
324          * flags are reset and scalars are zero.  We only set the message
325          * size to non-zero if this was a successful receive. */
326         req->rq_xid = ev->match_bits;
327         req->rq_reqbuf = ev->md.start + ev->offset;
328         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
329                 req->rq_reqdata_len = ev->mlength;
330         cfs_gettimeofday(&req->rq_arrival_time);
331         req->rq_peer = ev->initiator;
332         req->rq_self = ev->target.nid;
333         req->rq_rqbd = rqbd;
334         req->rq_phase = RQ_PHASE_NEW;
335         cfs_spin_lock_init(&req->rq_lock);
336         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
337         cfs_atomic_set(&req->rq_refcount, 1);
338         if (ev->type == LNET_EVENT_PUT)
339                 CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
340                        req, req->rq_xid, ev->mlength);
341
342         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
343
344         cfs_spin_lock(&svcpt->scp_lock);
345
346         ptlrpc_req_add_history(svcpt, req);
347
348         if (ev->unlinked) {
349                 svcpt->scp_nrqbds_posted--;
350                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
351                        svcpt->scp_nrqbds_posted);
352
353                 /* Normally, don't complain about 0 buffers posted; LNET won't
354                  * drop incoming reqs since we set the portal lazy */
355                 if (test_req_buffer_pressure &&
356                     ev->type != LNET_EVENT_UNLINK &&
357                     svcpt->scp_nrqbds_posted == 0)
358                         CWARN("All %s request buffers busy\n",
359                               service->srv_name);
360
361                 /* req takes over the network's ref on rqbd */
362         } else {
363                 /* req takes a ref on rqbd */
364                 rqbd->rqbd_refcount++;
365         }
366
367         cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
368         svcpt->scp_nreqs_incoming++;
369
370         /* NB everything can disappear under us once the request
371          * has been queued and we unlock, so do the wake now... */
372         cfs_waitq_signal(&svcpt->scp_waitq);
373
374         cfs_spin_unlock(&svcpt->scp_lock);
375         EXIT;
376 }
377
378 /*
379  *  Server's outgoing reply callback
380  */
381 void reply_out_callback(lnet_event_t *ev)
382 {
383         struct ptlrpc_cb_id       *cbid = ev->md.user_ptr;
384         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
385         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
386         ENTRY;
387
388         LASSERT (ev->type == LNET_EVENT_SEND ||
389                  ev->type == LNET_EVENT_ACK ||
390                  ev->type == LNET_EVENT_UNLINK);
391
392         if (!rs->rs_difficult) {
393                 /* 'Easy' replies have no further processing so I drop the
394                  * net's ref on 'rs' */
395                 LASSERT (ev->unlinked);
396                 ptlrpc_rs_decref(rs);
397                 EXIT;
398                 return;
399         }
400
401         LASSERT (rs->rs_on_net);
402
403         if (ev->unlinked) {
404                 /* Last network callback. The net's ref on 'rs' stays put
405                  * until ptlrpc_handle_rs() is done with it */
406                 cfs_spin_lock(&svcpt->scp_rep_lock);
407                 cfs_spin_lock(&rs->rs_lock);
408
409                 rs->rs_on_net = 0;
410                 if (!rs->rs_no_ack ||
411                     rs->rs_transno <=
412                     rs->rs_export->exp_obd->obd_last_committed)
413                         ptlrpc_schedule_difficult_reply(rs);
414
415                 cfs_spin_unlock(&rs->rs_lock);
416                 cfs_spin_unlock(&svcpt->scp_rep_lock);
417         }
418         EXIT;
419 }
420
421 #ifdef HAVE_SERVER_SUPPORT
422 /*
423  * Server's bulk completion callback
424  */
425 void server_bulk_callback (lnet_event_t *ev)
426 {
427         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
428         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
429         ENTRY;
430
431         LASSERT (ev->type == LNET_EVENT_SEND ||
432                  ev->type == LNET_EVENT_UNLINK ||
433                  (desc->bd_type == BULK_PUT_SOURCE &&
434                   ev->type == LNET_EVENT_ACK) ||
435                  (desc->bd_type == BULK_GET_SINK &&
436                   ev->type == LNET_EVENT_REPLY));
437
438         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
439                "event type %d, status %d, desc %p\n",
440                ev->type, ev->status, desc);
441
442         cfs_spin_lock(&desc->bd_lock);
443
444         if ((ev->type == LNET_EVENT_ACK ||
445              ev->type == LNET_EVENT_REPLY) &&
446             ev->status == 0) {
447                 /* We heard back from the peer, so even if we get this
448                  * before the SENT event (oh yes we can), we know we
449                  * read/wrote the peer buffer and how much... */
450                 desc->bd_success = 1;
451                 desc->bd_nob_transferred = ev->mlength;
452                 desc->bd_sender = ev->sender;
453         }
454
455         if (ev->unlinked) {
456                 /* This is the last callback no matter what... */
457                 desc->bd_network_rw = 0;
458                 cfs_waitq_signal(&desc->bd_waitq);
459         }
460
461         cfs_spin_unlock(&desc->bd_lock);
462         EXIT;
463 }
464 #endif
465
466 static void ptlrpc_master_callback(lnet_event_t *ev)
467 {
468         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
469         void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
470
471         /* Honestly, it's best to find out early. */
472         LASSERT (cbid->cbid_arg != LP_POISON);
473         LASSERT (callback == request_out_callback ||
474                  callback == reply_in_callback ||
475                  callback == client_bulk_callback ||
476                  callback == request_in_callback ||
477                  callback == reply_out_callback
478 #ifdef HAVE_SERVER_SUPPORT
479                  || callback == server_bulk_callback
480 #endif
481                  );
482
483         callback (ev);
484 }
485
486 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
487                          lnet_process_id_t *peer, lnet_nid_t *self)
488 {
489         int               best_dist = 0;
490         __u32             best_order = 0;
491         int               count = 0;
492         int               rc = -ENOENT;
493         int               portals_compatibility;
494         int               dist;
495         __u32             order;
496         lnet_nid_t        dst_nid;
497         lnet_nid_t        src_nid;
498
499         portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
500
501         peer->pid = LUSTRE_SRV_LNET_PID;
502
503         /* Choose the matching UUID that's closest */
504         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
505                 dist = LNetDist(dst_nid, &src_nid, &order);
506                 if (dist < 0)
507                         continue;
508
509                 if (dist == 0) {                /* local! use loopback LND */
510                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
511                         rc = 0;
512                         break;
513                 }
514
515                 if (rc < 0 ||
516                     dist < best_dist ||
517                     (dist == best_dist && order < best_order)) {
518                         best_dist = dist;
519                         best_order = order;
520
521                         if (portals_compatibility > 1) {
522                                 /* Strong portals compatibility: Zero the nid's
523                                  * NET, so if I'm reading new config logs, or
524                                  * getting configured by (new) lconf I can
525                                  * still talk to old servers. */
526                                 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
527                                 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
528                         }
529                         peer->nid = dst_nid;
530                         *self = src_nid;
531                         rc = 0;
532                 }
533         }
534
535         CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
536         return rc;
537 }
538
539 void ptlrpc_ni_fini(void)
540 {
541         cfs_waitq_t         waitq;
542         struct l_wait_info  lwi;
543         int                 rc;
544         int                 retries;
545
546         /* Wait for the event queue to become idle since there may still be
547          * messages in flight with pending events (i.e. the fire-and-forget
548          * messages == client requests and "non-difficult" server
549          * replies */
550
551         for (retries = 0;; retries++) {
552                 rc = LNetEQFree(ptlrpc_eq_h);
553                 switch (rc) {
554                 default:
555                         LBUG();
556
557                 case 0:
558                         LNetNIFini();
559                         return;
560
561                 case -EBUSY:
562                         if (retries != 0)
563                                 CWARN("Event queue still busy\n");
564
565                         /* Wait for a bit */
566                         cfs_waitq_init(&waitq);
567                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
568                         l_wait_event(waitq, 0, &lwi);
569                         break;
570                 }
571         }
572         /* notreached */
573 }
574
575 lnet_pid_t ptl_get_pid(void)
576 {
577         lnet_pid_t        pid;
578
579 #ifndef  __KERNEL__
580         pid = getpid();
581 #else
582         pid = LUSTRE_SRV_LNET_PID;
583 #endif
584         return pid;
585 }
586
587 int ptlrpc_ni_init(void)
588 {
589         int              rc;
590         lnet_pid_t       pid;
591
592         pid = ptl_get_pid();
593         CDEBUG(D_NET, "My pid is: %x\n", pid);
594
595         /* We're not passing any limits yet... */
596         rc = LNetNIInit(pid);
597         if (rc < 0) {
598                 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
599                 return (-ENOENT);
600         }
601
602         /* CAVEAT EMPTOR: how we process portals events is _radically_
603          * different depending on... */
604 #ifdef __KERNEL__
605         /* kernel LNet calls our master callback when there are new event,
606          * because we are guaranteed to get every event via callback,
607          * so we just set EQ size to 0 to avoid overhread of serializing
608          * enqueue/dequeue operations in LNet. */
609         rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
610 #else
611         /* liblustre calls the master callback when it removes events from the
612          * event queue.  The event queue has to be big enough not to drop
613          * anything */
614         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
615 #endif
616         if (rc == 0)
617                 return 0;
618
619         CERROR ("Failed to allocate event queue: %d\n", rc);
620         LNetNIFini();
621
622         return (-ENOMEM);
623 }
624
625 #ifndef __KERNEL__
626 CFS_LIST_HEAD(liblustre_wait_callbacks);
627 CFS_LIST_HEAD(liblustre_idle_callbacks);
628 void *liblustre_services_callback;
629
630 void *
631 liblustre_register_waitidle_callback (cfs_list_t *callback_list,
632                                       const char *name,
633                                       int (*fn)(void *arg), void *arg)
634 {
635         struct liblustre_wait_callback *llwc;
636
637         OBD_ALLOC(llwc, sizeof(*llwc));
638         LASSERT (llwc != NULL);
639
640         llwc->llwc_name = name;
641         llwc->llwc_fn = fn;
642         llwc->llwc_arg = arg;
643         cfs_list_add_tail(&llwc->llwc_list, callback_list);
644
645         return (llwc);
646 }
647
648 void
649 liblustre_deregister_waitidle_callback (void *opaque)
650 {
651         struct liblustre_wait_callback *llwc = opaque;
652
653         cfs_list_del(&llwc->llwc_list);
654         OBD_FREE(llwc, sizeof(*llwc));
655 }
656
657 void *
658 liblustre_register_wait_callback (const char *name,
659                                   int (*fn)(void *arg), void *arg)
660 {
661         return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
662                                                     name, fn, arg);
663 }
664
665 void
666 liblustre_deregister_wait_callback (void *opaque)
667 {
668         liblustre_deregister_waitidle_callback(opaque);
669 }
670
671 void *
672 liblustre_register_idle_callback (const char *name,
673                                   int (*fn)(void *arg), void *arg)
674 {
675         return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
676                                                     name, fn, arg);
677 }
678
679 void
680 liblustre_deregister_idle_callback (void *opaque)
681 {
682         liblustre_deregister_waitidle_callback(opaque);
683 }
684
685 int
686 liblustre_check_events (int timeout)
687 {
688         lnet_event_t ev;
689         int         rc;
690         int         i;
691         ENTRY;
692
693         rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
694         if (rc == 0)
695                 RETURN(0);
696
697         LASSERT (rc == -EOVERFLOW || rc == 1);
698
699         /* liblustre: no asynch callback so we can't afford to miss any
700          * events... */
701         if (rc == -EOVERFLOW) {
702                 CERROR ("Dropped an event!!!\n");
703                 abort();
704         }
705
706         ptlrpc_master_callback (&ev);
707         RETURN(1);
708 }
709
710 int liblustre_waiting = 0;
711
712 int
713 liblustre_wait_event (int timeout)
714 {
715         cfs_list_t                     *tmp;
716         struct liblustre_wait_callback *llwc;
717         int                             found_something = 0;
718
719         /* single threaded recursion check... */
720         liblustre_waiting = 1;
721
722         for (;;) {
723                 /* Deal with all pending events */
724                 while (liblustre_check_events(0))
725                         found_something = 1;
726
727                 /* Give all registered callbacks a bite at the cherry */
728                 cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
729                         llwc = cfs_list_entry(tmp,
730                                               struct liblustre_wait_callback,
731                                               llwc_list);
732
733                         if (llwc->llwc_fn(llwc->llwc_arg))
734                                 found_something = 1;
735                 }
736
737                 if (found_something || timeout == 0)
738                         break;
739
740                 /* Nothing so far, but I'm allowed to block... */
741                 found_something = liblustre_check_events(timeout);
742                 if (!found_something)           /* still nothing */
743                         break;                  /* I timed out */
744         }
745
746         liblustre_waiting = 0;
747
748         return found_something;
749 }
750
751 void
752 liblustre_wait_idle(void)
753 {
754         static int recursed = 0;
755
756         cfs_list_t                     *tmp;
757         struct liblustre_wait_callback *llwc;
758         int                             idle = 0;
759
760         LASSERT(!recursed);
761         recursed = 1;
762
763         do {
764                 liblustre_wait_event(0);
765
766                 idle = 1;
767
768                 cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
769                         llwc = cfs_list_entry(tmp,
770                                               struct liblustre_wait_callback,
771                                               llwc_list);
772
773                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
774                                 idle = 0;
775                                 break;
776                         }
777                 }
778
779         } while (!idle);
780
781         recursed = 0;
782 }
783
784 #endif /* __KERNEL__ */
785
786 int ptlrpc_init_portals(void)
787 {
788         int   rc = ptlrpc_ni_init();
789
790         if (rc != 0) {
791                 CERROR("network initialisation failed\n");
792                 return -EIO;
793         }
794 #ifndef __KERNEL__
795         liblustre_services_callback =
796                 liblustre_register_wait_callback("liblustre_check_services",
797                                                  &liblustre_check_services,
798                                                  NULL);
799         cfs_init_completion_module(liblustre_wait_event);
800 #endif
801         rc = ptlrpcd_addref();
802         if (rc == 0)
803                 return 0;
804
805         CERROR("rpcd initialisation failed\n");
806 #ifndef __KERNEL__
807         liblustre_deregister_wait_callback(liblustre_services_callback);
808 #endif
809         ptlrpc_ni_fini();
810         return rc;
811 }
812
813 void ptlrpc_exit_portals(void)
814 {
815 #ifndef __KERNEL__
816         liblustre_deregister_wait_callback(liblustre_services_callback);
817 #endif
818         ptlrpcd_decref();
819         ptlrpc_ni_fini();
820 }