Whamcloud - gitweb
b=21636 diagnostic patch to detect request delay send/reply
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38
39 #ifndef __KERNEL__
40 # include <liblustre.h>
41 #else
42 # include <libcfs/libcfs.h>
43 # ifdef __mips64__
44 #  include <linux/kernel.h>
45 # endif
46 #endif
47
48 #include <obd_class.h>
49 #include <lustre_net.h>
50 #include <lustre_sec.h>
51 #include "ptlrpc_internal.h"
52
53 lnet_handle_eq_t   ptlrpc_eq_h;
54
55 /*
56  *  Client's outgoing request callback
57  */
58 void request_out_callback(lnet_event_t *ev)
59 {
60         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
61         struct ptlrpc_request *req = cbid->cbid_arg;
62         ENTRY;
63
64         LASSERT (ev->type == LNET_EVENT_SEND ||
65                  ev->type == LNET_EVENT_UNLINK);
66         LASSERT (ev->unlinked);
67
68         DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req,
69                   "type %d, status %d", ev->type, ev->status);
70
71         sptlrpc_request_out_callback(req);
72         req->rq_real_sent = cfs_time_current_sec();
73
74         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
75
76                 /* Failed send: make it seem like the reply timed out, just
77                  * like failing sends in client.c does currently...  */
78
79                 cfs_spin_lock(&req->rq_lock);
80                 req->rq_net_err = 1;
81                 cfs_spin_unlock(&req->rq_lock);
82
83                 ptlrpc_client_wake_req(req);
84         }
85
86         ptlrpc_req_finished(req);
87
88         EXIT;
89 }
90
91 /*
92  * Client's incoming reply callback
93  */
94 void reply_in_callback(lnet_event_t *ev)
95 {
96         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
97         struct ptlrpc_request *req = cbid->cbid_arg;
98         ENTRY;
99
100         DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req,
101                   "type %d, status %d", ev->type, ev->status);
102
103         LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
104         LASSERT (ev->md.start == req->rq_repbuf);
105         LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
106         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
107            for adaptive timeouts' early reply. */
108         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
109
110         cfs_spin_lock(&req->rq_lock);
111
112         req->rq_receiving_reply = 0;
113         req->rq_early = 0;
114         if (ev->unlinked)
115                 req->rq_must_unlink = 0;
116
117         if (ev->status)
118                 goto out_wake;
119
120         if (ev->type == LNET_EVENT_UNLINK) {
121                 LASSERT(ev->unlinked);
122                 DEBUG_REQ(D_RPCTRACE, req, "unlink");
123                 goto out_wake;
124         }
125
126         if (ev->mlength < ev->rlength ) {
127                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
128                        req->rq_replen, ev->rlength, ev->offset);
129                 req->rq_reply_truncate = 1;
130                 req->rq_replied = 1;
131                 req->rq_status = -EOVERFLOW;
132                 req->rq_nob_received = ev->rlength + ev->offset;
133                 goto out_wake;
134         }
135
136         if ((ev->offset == 0) &&
137             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
138                 /* Early reply */
139                 DEBUG_REQ(D_ADAPTTO, req,
140                           "Early reply received: mlen=%u offset=%d replen=%d "
141                           "replied=%d unlinked=%d", ev->mlength, ev->offset,
142                           req->rq_replen, req->rq_replied, ev->unlinked);
143
144                 req->rq_early_count++; /* number received, client side */
145
146                 if (req->rq_replied)   /* already got the real reply */
147                         goto out_wake;
148
149                 req->rq_early = 1;
150                 req->rq_reply_off = ev->offset;
151                 req->rq_nob_received = ev->mlength;
152                 /* And we're still receiving */
153                 req->rq_receiving_reply = 1;
154         } else {
155                 /* Real reply */
156                 req->rq_rep_swab_mask = 0;
157                 req->rq_replied = 1;
158                 req->rq_reply_off = ev->offset;
159                 req->rq_nob_received = ev->mlength;
160                 /* LNetMDUnlink can't be called under the LNET_LOCK,
161                    so we must unlink in ptlrpc_unregister_reply */
162                 DEBUG_REQ(D_INFO, req,
163                           "reply in flags=%x mlen=%u offset=%d replen=%d",
164                           lustre_msg_get_flags(req->rq_reqmsg),
165                           ev->mlength, ev->offset, req->rq_replen);
166         }
167
168         req->rq_import->imp_last_reply_time = cfs_time_current_sec();
169
170 out_wake:
171         /* NB don't unlock till after wakeup; req can disappear under us
172          * since we don't have our own ref */
173         ptlrpc_client_wake_req(req);
174         cfs_spin_unlock(&req->rq_lock);
175         EXIT;
176 }
177
178 /*
179  * Client's bulk has been written/read
180  */
181 void client_bulk_callback (lnet_event_t *ev)
182 {
183         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
184         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
185         ENTRY;
186
187         LASSERT ((desc->bd_type == BULK_PUT_SINK &&
188                   ev->type == LNET_EVENT_PUT) ||
189                  (desc->bd_type == BULK_GET_SOURCE &&
190                   ev->type == LNET_EVENT_GET) ||
191                  ev->type == LNET_EVENT_UNLINK);
192         LASSERT (ev->unlinked);
193
194         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
195                "event type %d, status %d, desc %p\n",
196                ev->type, ev->status, desc);
197
198         cfs_spin_lock(&desc->bd_lock);
199
200         LASSERT(desc->bd_network_rw);
201         desc->bd_network_rw = 0;
202
203         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
204                 desc->bd_success = 1;
205                 desc->bd_nob_transferred = ev->mlength;
206                 desc->bd_sender = ev->sender;
207         }
208
209         /* release the encrypted pages for write */
210         if (desc->bd_req->rq_bulk_write)
211                 sptlrpc_enc_pool_put_pages(desc);
212
213         /* NB don't unlock till after wakeup; desc can disappear under us
214          * otherwise */
215         ptlrpc_client_wake_req(desc->bd_req);
216
217         cfs_spin_unlock(&desc->bd_lock);
218         EXIT;
219 }
220
221 /*
222  * Server's incoming request callback
223  */
224 void request_in_callback(lnet_event_t *ev)
225 {
226         struct ptlrpc_cb_id               *cbid = ev->md.user_ptr;
227         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
228         struct ptlrpc_service             *service = rqbd->rqbd_service;
229         struct ptlrpc_request             *req;
230         ENTRY;
231
232         LASSERT (ev->type == LNET_EVENT_PUT ||
233                  ev->type == LNET_EVENT_UNLINK);
234         LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
235         LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
236                  rqbd->rqbd_buffer + service->srv_buf_size);
237
238         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
239                "event type %d, status %d, service %s\n",
240                ev->type, ev->status, service->srv_name);
241
242         if (ev->unlinked) {
243                 /* If this is the last request message to fit in the
244                  * request buffer we can use the request object embedded in
245                  * rqbd.  Note that if we failed to allocate a request,
246                  * we'd have to re-post the rqbd, which we can't do in this
247                  * context. */
248                 req = &rqbd->rqbd_req;
249                 memset(req, 0, sizeof (*req));
250         } else {
251                 LASSERT (ev->type == LNET_EVENT_PUT);
252                 if (ev->status != 0) {
253                         /* We moaned above already... */
254                         return;
255                 }
256                 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
257                 if (req == NULL) {
258                         CERROR("Can't allocate incoming request descriptor: "
259                                "Dropping %s RPC from %s\n",
260                                service->srv_name,
261                                libcfs_id2str(ev->initiator));
262                         return;
263                 }
264         }
265
266         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
267          * flags are reset and scalars are zero.  We only set the message
268          * size to non-zero if this was a successful receive. */
269         req->rq_xid = ev->match_bits;
270         req->rq_reqbuf = ev->md.start + ev->offset;
271         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
272                 req->rq_reqdata_len = ev->mlength;
273         cfs_gettimeofday(&req->rq_arrival_time);
274         req->rq_peer = ev->initiator;
275         req->rq_self = ev->target.nid;
276         req->rq_rqbd = rqbd;
277         req->rq_phase = RQ_PHASE_NEW;
278 #ifdef CRAY_XT3
279         req->rq_uid = ev->uid;
280 #endif
281         cfs_spin_lock_init(&req->rq_lock);
282         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
283         cfs_atomic_set(&req->rq_refcount, 1);
284         if (ev->type == LNET_EVENT_PUT)
285                 CDEBUG(D_RPCTRACE, "incoming req@%p x"LPU64" msgsize %u\n",
286                        req, req->rq_xid, ev->mlength);
287
288         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
289
290         cfs_spin_lock(&service->srv_lock);
291
292         req->rq_history_seq = service->srv_request_seq++;
293         cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history);
294
295         if (ev->unlinked) {
296                 service->srv_nrqbd_receiving--;
297                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
298                        service->srv_nrqbd_receiving);
299
300                 /* Normally, don't complain about 0 buffers posted; LNET won't
301                  * drop incoming reqs since we set the portal lazy */
302                 if (test_req_buffer_pressure &&
303                     ev->type != LNET_EVENT_UNLINK &&
304                     service->srv_nrqbd_receiving == 0)
305                         CWARN("All %s request buffers busy\n",
306                               service->srv_name);
307
308                 /* req takes over the network's ref on rqbd */
309         } else {
310                 /* req takes a ref on rqbd */
311                 rqbd->rqbd_refcount++;
312         }
313
314         cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue);
315         service->srv_n_queued_reqs++;
316
317         /* NB everything can disappear under us once the request
318          * has been queued and we unlock, so do the wake now... */
319         cfs_waitq_signal(&service->srv_waitq);
320
321         cfs_spin_unlock(&service->srv_lock);
322         EXIT;
323 }
324
325 /*
326  *  Server's outgoing reply callback
327  */
328 void reply_out_callback(lnet_event_t *ev)
329 {
330         struct ptlrpc_cb_id       *cbid = ev->md.user_ptr;
331         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
332         struct ptlrpc_service     *svc = rs->rs_service;
333         ENTRY;
334
335         LASSERT (ev->type == LNET_EVENT_SEND ||
336                  ev->type == LNET_EVENT_ACK ||
337                  ev->type == LNET_EVENT_UNLINK);
338
339         if (!rs->rs_difficult) {
340                 /* 'Easy' replies have no further processing so I drop the
341                  * net's ref on 'rs' */
342                 LASSERT (ev->unlinked);
343                 ptlrpc_rs_decref(rs);
344                 cfs_atomic_dec (&svc->srv_outstanding_replies);
345                 EXIT;
346                 return;
347         }
348
349         LASSERT (rs->rs_on_net);
350
351         if (ev->unlinked) {
352                 /* Last network callback. The net's ref on 'rs' stays put
353                  * until ptlrpc_handle_rs() is done with it */
354                 cfs_spin_lock(&svc->srv_lock);
355                 cfs_spin_lock(&rs->rs_lock);
356                 rs->rs_on_net = 0;
357                 if (!rs->rs_no_ack ||
358                     rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed)
359                         ptlrpc_schedule_difficult_reply (rs);
360                 cfs_spin_unlock(&rs->rs_lock);
361                 cfs_spin_unlock(&svc->srv_lock);
362         }
363
364         EXIT;
365 }
366
367 /*
368  * Server's bulk completion callback
369  */
370 void server_bulk_callback (lnet_event_t *ev)
371 {
372         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
373         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
374         ENTRY;
375
376         LASSERT (ev->type == LNET_EVENT_SEND ||
377                  ev->type == LNET_EVENT_UNLINK ||
378                  (desc->bd_type == BULK_PUT_SOURCE &&
379                   ev->type == LNET_EVENT_ACK) ||
380                  (desc->bd_type == BULK_GET_SINK &&
381                   ev->type == LNET_EVENT_REPLY));
382
383         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
384                "event type %d, status %d, desc %p\n",
385                ev->type, ev->status, desc);
386
387         cfs_spin_lock(&desc->bd_lock);
388
389         if ((ev->type == LNET_EVENT_ACK ||
390              ev->type == LNET_EVENT_REPLY) &&
391             ev->status == 0) {
392                 /* We heard back from the peer, so even if we get this
393                  * before the SENT event (oh yes we can), we know we
394                  * read/wrote the peer buffer and how much... */
395                 desc->bd_success = 1;
396                 desc->bd_nob_transferred = ev->mlength;
397                 desc->bd_sender = ev->sender;
398         }
399
400         if (ev->unlinked) {
401                 /* This is the last callback no matter what... */
402                 desc->bd_network_rw = 0;
403                 cfs_waitq_signal(&desc->bd_waitq);
404         }
405
406         cfs_spin_unlock(&desc->bd_lock);
407         EXIT;
408 }
409
410 static void ptlrpc_master_callback(lnet_event_t *ev)
411 {
412         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
413         void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
414
415         /* Honestly, it's best to find out early. */
416         LASSERT (cbid->cbid_arg != LP_POISON);
417         LASSERT (callback == request_out_callback ||
418                  callback == reply_in_callback ||
419                  callback == client_bulk_callback ||
420                  callback == request_in_callback ||
421                  callback == reply_out_callback ||
422                  callback == server_bulk_callback);
423
424         callback (ev);
425 }
426
427 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
428                          lnet_process_id_t *peer, lnet_nid_t *self)
429 {
430         int               best_dist = 0;
431         __u32             best_order = 0;
432         int               count = 0;
433         int               rc = -ENOENT;
434         int               portals_compatibility;
435         int               dist;
436         __u32             order;
437         lnet_nid_t        dst_nid;
438         lnet_nid_t        src_nid;
439
440         portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
441
442         peer->pid = LUSTRE_SRV_LNET_PID;
443
444         /* Choose the matching UUID that's closest */
445         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
446                 dist = LNetDist(dst_nid, &src_nid, &order);
447                 if (dist < 0)
448                         continue;
449
450                 if (dist == 0) {                /* local! use loopback LND */
451                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
452                         rc = 0;
453                         break;
454                 }
455
456                 if (rc < 0 ||
457                     dist < best_dist ||
458                     (dist == best_dist && order < best_order)) {
459                         best_dist = dist;
460                         best_order = order;
461
462                         if (portals_compatibility > 1) {
463                                 /* Strong portals compatibility: Zero the nid's
464                                  * NET, so if I'm reading new config logs, or
465                                  * getting configured by (new) lconf I can
466                                  * still talk to old servers. */
467                                 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
468                                 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
469                         }
470                         peer->nid = dst_nid;
471                         *self = src_nid;
472                         rc = 0;
473                 }
474         }
475
476         CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
477         return rc;
478 }
479
480 void ptlrpc_ni_fini(void)
481 {
482         cfs_waitq_t         waitq;
483         struct l_wait_info  lwi;
484         int                 rc;
485         int                 retries;
486
487         /* Wait for the event queue to become idle since there may still be
488          * messages in flight with pending events (i.e. the fire-and-forget
489          * messages == client requests and "non-difficult" server
490          * replies */
491
492         for (retries = 0;; retries++) {
493                 rc = LNetEQFree(ptlrpc_eq_h);
494                 switch (rc) {
495                 default:
496                         LBUG();
497
498                 case 0:
499                         LNetNIFini();
500                         return;
501
502                 case -EBUSY:
503                         if (retries != 0)
504                                 CWARN("Event queue still busy\n");
505
506                         /* Wait for a bit */
507                         cfs_waitq_init(&waitq);
508                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
509                         l_wait_event(waitq, 0, &lwi);
510                         break;
511                 }
512         }
513         /* notreached */
514 }
515
516 lnet_pid_t ptl_get_pid(void)
517 {
518         lnet_pid_t        pid;
519
520 #ifndef  __KERNEL__
521         pid = getpid();
522 #else
523         pid = LUSTRE_SRV_LNET_PID;
524 #endif
525         return pid;
526 }
527
528 int ptlrpc_ni_init(void)
529 {
530         int              rc;
531         lnet_pid_t       pid;
532
533         pid = ptl_get_pid();
534         CDEBUG(D_NET, "My pid is: %x\n", pid);
535
536         /* We're not passing any limits yet... */
537         rc = LNetNIInit(pid);
538         if (rc < 0) {
539                 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
540                 return (-ENOENT);
541         }
542
543         /* CAVEAT EMPTOR: how we process portals events is _radically_
544          * different depending on... */
545 #ifdef __KERNEL__
546         /* kernel portals calls our master callback when events are added to
547          * the event queue.  In fact lustre never pulls events off this queue,
548          * so it's only sized for some debug history. */
549         rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h);
550 #else
551         /* liblustre calls the master callback when it removes events from the
552          * event queue.  The event queue has to be big enough not to drop
553          * anything */
554         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
555 #endif
556         if (rc == 0)
557                 return 0;
558
559         CERROR ("Failed to allocate event queue: %d\n", rc);
560         LNetNIFini();
561
562         return (-ENOMEM);
563 }
564
565 #ifndef __KERNEL__
566 CFS_LIST_HEAD(liblustre_wait_callbacks);
567 CFS_LIST_HEAD(liblustre_idle_callbacks);
568 void *liblustre_services_callback;
569
570 void *
571 liblustre_register_waitidle_callback (cfs_list_t *callback_list,
572                                       const char *name,
573                                       int (*fn)(void *arg), void *arg)
574 {
575         struct liblustre_wait_callback *llwc;
576
577         OBD_ALLOC(llwc, sizeof(*llwc));
578         LASSERT (llwc != NULL);
579
580         llwc->llwc_name = name;
581         llwc->llwc_fn = fn;
582         llwc->llwc_arg = arg;
583         cfs_list_add_tail(&llwc->llwc_list, callback_list);
584
585         return (llwc);
586 }
587
588 void
589 liblustre_deregister_waitidle_callback (void *opaque)
590 {
591         struct liblustre_wait_callback *llwc = opaque;
592
593         cfs_list_del(&llwc->llwc_list);
594         OBD_FREE(llwc, sizeof(*llwc));
595 }
596
597 void *
598 liblustre_register_wait_callback (const char *name,
599                                   int (*fn)(void *arg), void *arg)
600 {
601         return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
602                                                     name, fn, arg);
603 }
604
605 void
606 liblustre_deregister_wait_callback (void *opaque)
607 {
608         liblustre_deregister_waitidle_callback(opaque);
609 }
610
611 void *
612 liblustre_register_idle_callback (const char *name,
613                                   int (*fn)(void *arg), void *arg)
614 {
615         return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
616                                                     name, fn, arg);
617 }
618
619 void
620 liblustre_deregister_idle_callback (void *opaque)
621 {
622         liblustre_deregister_waitidle_callback(opaque);
623 }
624
625 int
626 liblustre_check_events (int timeout)
627 {
628         lnet_event_t ev;
629         int         rc;
630         int         i;
631         ENTRY;
632
633         rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
634         if (rc == 0)
635                 RETURN(0);
636
637         LASSERT (rc == -EOVERFLOW || rc == 1);
638
639         /* liblustre: no asynch callback so we can't afford to miss any
640          * events... */
641         if (rc == -EOVERFLOW) {
642                 CERROR ("Dropped an event!!!\n");
643                 abort();
644         }
645
646         ptlrpc_master_callback (&ev);
647         RETURN(1);
648 }
649
650 int liblustre_waiting = 0;
651
652 int
653 liblustre_wait_event (int timeout)
654 {
655         cfs_list_t                     *tmp;
656         struct liblustre_wait_callback *llwc;
657         int                             found_something = 0;
658
659         /* single threaded recursion check... */
660         liblustre_waiting = 1;
661
662         for (;;) {
663                 /* Deal with all pending events */
664                 while (liblustre_check_events(0))
665                         found_something = 1;
666
667                 /* Give all registered callbacks a bite at the cherry */
668                 cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
669                         llwc = cfs_list_entry(tmp,
670                                               struct liblustre_wait_callback,
671                                               llwc_list);
672
673                         if (llwc->llwc_fn(llwc->llwc_arg))
674                                 found_something = 1;
675                 }
676
677                 if (found_something || timeout == 0)
678                         break;
679
680                 /* Nothing so far, but I'm allowed to block... */
681                 found_something = liblustre_check_events(timeout);
682                 if (!found_something)           /* still nothing */
683                         break;                  /* I timed out */
684         }
685
686         liblustre_waiting = 0;
687
688         return found_something;
689 }
690
691 void
692 liblustre_wait_idle(void)
693 {
694         static int recursed = 0;
695
696         cfs_list_t                     *tmp;
697         struct liblustre_wait_callback *llwc;
698         int                             idle = 0;
699
700         LASSERT(!recursed);
701         recursed = 1;
702
703         do {
704                 liblustre_wait_event(0);
705
706                 idle = 1;
707
708                 cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
709                         llwc = cfs_list_entry(tmp,
710                                               struct liblustre_wait_callback,
711                                               llwc_list);
712
713                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
714                                 idle = 0;
715                                 break;
716                         }
717                 }
718
719         } while (!idle);
720
721         recursed = 0;
722 }
723
724 #endif /* __KERNEL__ */
725
726 int ptlrpc_init_portals(void)
727 {
728         int   rc = ptlrpc_ni_init();
729
730         if (rc != 0) {
731                 CERROR("network initialisation failed\n");
732                 return -EIO;
733         }
734 #ifndef __KERNEL__
735         liblustre_services_callback =
736                 liblustre_register_wait_callback("liblustre_check_services",
737                                                  &liblustre_check_services,
738                                                  NULL);
739         cfs_init_completion_module(liblustre_wait_event);
740 #endif
741         rc = ptlrpcd_addref();
742         if (rc == 0)
743                 return 0;
744
745         CERROR("rpcd initialisation failed\n");
746 #ifndef __KERNEL__
747         liblustre_deregister_wait_callback(liblustre_services_callback);
748 #endif
749         ptlrpc_ni_fini();
750         return rc;
751 }
752
753 void ptlrpc_exit_portals(void)
754 {
755 #ifndef __KERNEL__
756         liblustre_deregister_wait_callback(liblustre_services_callback);
757 #endif
758         ptlrpcd_decref();
759         ptlrpc_ni_fini();
760 }