Whamcloud - gitweb
LU-1039 ptlrpc: handle bulk IO errors correctly.
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38
39 #ifndef __KERNEL__
40 # include <liblustre.h>
41 #else
42 # include <libcfs/libcfs.h>
43 # ifdef __mips64__
44 #  include <linux/kernel.h>
45 # endif
46 #endif
47
48 #include <obd_class.h>
49 #include <lustre_net.h>
50 #include <lustre_sec.h>
51 #include "ptlrpc_internal.h"
52
53 lnet_handle_eq_t   ptlrpc_eq_h;
54
55 /*
56  *  Client's outgoing request callback
57  */
58 void request_out_callback(lnet_event_t *ev)
59 {
60         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
61         struct ptlrpc_request *req = cbid->cbid_arg;
62         ENTRY;
63
64         LASSERT (ev->type == LNET_EVENT_SEND ||
65                  ev->type == LNET_EVENT_UNLINK);
66         LASSERT (ev->unlinked);
67
68         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
69
70         sptlrpc_request_out_callback(req);
71         req->rq_real_sent = cfs_time_current_sec();
72
73         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
74
75                 /* Failed send: make it seem like the reply timed out, just
76                  * like failing sends in client.c does currently...  */
77
78                 cfs_spin_lock(&req->rq_lock);
79                 req->rq_net_err = 1;
80                 cfs_spin_unlock(&req->rq_lock);
81
82                 ptlrpc_client_wake_req(req);
83         }
84
85         ptlrpc_req_finished(req);
86
87         EXIT;
88 }
89
90 /*
91  * Client's incoming reply callback
92  */
93 void reply_in_callback(lnet_event_t *ev)
94 {
95         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
96         struct ptlrpc_request *req = cbid->cbid_arg;
97         ENTRY;
98
99         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
100
101         LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
102         LASSERT (ev->md.start == req->rq_repbuf);
103         LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
104         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
105            for adaptive timeouts' early reply. */
106         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
107
108         cfs_spin_lock(&req->rq_lock);
109
110         req->rq_receiving_reply = 0;
111         req->rq_early = 0;
112         if (ev->unlinked)
113                 req->rq_must_unlink = 0;
114
115         if (ev->status)
116                 goto out_wake;
117
118         if (ev->type == LNET_EVENT_UNLINK) {
119                 LASSERT(ev->unlinked);
120                 DEBUG_REQ(D_NET, req, "unlink");
121                 goto out_wake;
122         }
123
124         if (ev->mlength < ev->rlength ) {
125                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
126                        req->rq_replen, ev->rlength, ev->offset);
127                 req->rq_reply_truncate = 1;
128                 req->rq_replied = 1;
129                 req->rq_status = -EOVERFLOW;
130                 req->rq_nob_received = ev->rlength + ev->offset;
131                 goto out_wake;
132         }
133
134         if ((ev->offset == 0) &&
135             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
136                 /* Early reply */
137                 DEBUG_REQ(D_ADAPTTO, req,
138                           "Early reply received: mlen=%u offset=%d replen=%d "
139                           "replied=%d unlinked=%d", ev->mlength, ev->offset,
140                           req->rq_replen, req->rq_replied, ev->unlinked);
141
142                 req->rq_early_count++; /* number received, client side */
143
144                 if (req->rq_replied)   /* already got the real reply */
145                         goto out_wake;
146
147                 req->rq_early = 1;
148                 req->rq_reply_off = ev->offset;
149                 req->rq_nob_received = ev->mlength;
150                 /* And we're still receiving */
151                 req->rq_receiving_reply = 1;
152         } else {
153                 /* Real reply */
154                 req->rq_rep_swab_mask = 0;
155                 req->rq_replied = 1;
156                 req->rq_reply_off = ev->offset;
157                 req->rq_nob_received = ev->mlength;
158                 /* LNetMDUnlink can't be called under the LNET_LOCK,
159                    so we must unlink in ptlrpc_unregister_reply */
160                 DEBUG_REQ(D_INFO, req,
161                           "reply in flags=%x mlen=%u offset=%d replen=%d",
162                           lustre_msg_get_flags(req->rq_reqmsg),
163                           ev->mlength, ev->offset, req->rq_replen);
164         }
165
166         req->rq_import->imp_last_reply_time = cfs_time_current_sec();
167
168 out_wake:
169         /* NB don't unlock till after wakeup; req can disappear under us
170          * since we don't have our own ref */
171         ptlrpc_client_wake_req(req);
172         cfs_spin_unlock(&req->rq_lock);
173         EXIT;
174 }
175
176 /*
177  * Client's bulk has been written/read
178  */
179 void client_bulk_callback (lnet_event_t *ev)
180 {
181         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
182         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
183         ENTRY;
184
185         LASSERT ((desc->bd_type == BULK_PUT_SINK &&
186                   ev->type == LNET_EVENT_PUT) ||
187                  (desc->bd_type == BULK_GET_SOURCE &&
188                   ev->type == LNET_EVENT_GET) ||
189                  ev->type == LNET_EVENT_UNLINK);
190         LASSERT (ev->unlinked);
191
192         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
193                 ev->status = -EIO;
194
195         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
196                 ev->status = -EIO;
197
198         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
199                "event type %d, status %d, desc %p\n",
200                ev->type, ev->status, desc);
201
202         cfs_spin_lock(&desc->bd_lock);
203
204         LASSERT(desc->bd_network_rw);
205         desc->bd_network_rw = 0;
206
207         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
208                 desc->bd_success = 1;
209                 desc->bd_nob_transferred = ev->mlength;
210                 desc->bd_sender = ev->sender;
211         }
212
213         /* release the encrypted pages for write */
214         if (desc->bd_req->rq_bulk_write)
215                 sptlrpc_enc_pool_put_pages(desc);
216
217         /* NB don't unlock till after wakeup; desc can disappear under us
218          * otherwise */
219         ptlrpc_client_wake_req(desc->bd_req);
220
221         cfs_spin_unlock(&desc->bd_lock);
222         EXIT;
223 }
224
225 /*
226  * Server's incoming request callback
227  */
228 void request_in_callback(lnet_event_t *ev)
229 {
230         struct ptlrpc_cb_id               *cbid = ev->md.user_ptr;
231         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
232         struct ptlrpc_service             *service = rqbd->rqbd_service;
233         struct ptlrpc_request             *req;
234         ENTRY;
235
236         LASSERT (ev->type == LNET_EVENT_PUT ||
237                  ev->type == LNET_EVENT_UNLINK);
238         LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
239         LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
240                  rqbd->rqbd_buffer + service->srv_buf_size);
241
242         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
243                "event type %d, status %d, service %s\n",
244                ev->type, ev->status, service->srv_name);
245
246         if (ev->unlinked) {
247                 /* If this is the last request message to fit in the
248                  * request buffer we can use the request object embedded in
249                  * rqbd.  Note that if we failed to allocate a request,
250                  * we'd have to re-post the rqbd, which we can't do in this
251                  * context. */
252                 req = &rqbd->rqbd_req;
253                 memset(req, 0, sizeof (*req));
254         } else {
255                 LASSERT (ev->type == LNET_EVENT_PUT);
256                 if (ev->status != 0) {
257                         /* We moaned above already... */
258                         return;
259                 }
260                 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
261                 if (req == NULL) {
262                         CERROR("Can't allocate incoming request descriptor: "
263                                "Dropping %s RPC from %s\n",
264                                service->srv_name,
265                                libcfs_id2str(ev->initiator));
266                         return;
267                 }
268         }
269
270         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
271          * flags are reset and scalars are zero.  We only set the message
272          * size to non-zero if this was a successful receive. */
273         req->rq_xid = ev->match_bits;
274         req->rq_reqbuf = ev->md.start + ev->offset;
275         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
276                 req->rq_reqdata_len = ev->mlength;
277         cfs_gettimeofday(&req->rq_arrival_time);
278         req->rq_peer = ev->initiator;
279         req->rq_self = ev->target.nid;
280         req->rq_rqbd = rqbd;
281         req->rq_phase = RQ_PHASE_NEW;
282 #ifdef CRAY_XT3
283         req->rq_uid = ev->uid;
284 #endif
285         cfs_spin_lock_init(&req->rq_lock);
286         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
287         cfs_atomic_set(&req->rq_refcount, 1);
288         if (ev->type == LNET_EVENT_PUT)
289                 CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
290                        req, req->rq_xid, ev->mlength);
291
292         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
293
294         cfs_spin_lock(&service->srv_lock);
295
296         req->rq_history_seq = service->srv_request_seq++;
297         cfs_list_add_tail(&req->rq_history_list, &service->srv_request_history);
298
299         if (ev->unlinked) {
300                 service->srv_nrqbd_receiving--;
301                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
302                        service->srv_nrqbd_receiving);
303
304                 /* Normally, don't complain about 0 buffers posted; LNET won't
305                  * drop incoming reqs since we set the portal lazy */
306                 if (test_req_buffer_pressure &&
307                     ev->type != LNET_EVENT_UNLINK &&
308                     service->srv_nrqbd_receiving == 0)
309                         CWARN("All %s request buffers busy\n",
310                               service->srv_name);
311
312                 /* req takes over the network's ref on rqbd */
313         } else {
314                 /* req takes a ref on rqbd */
315                 rqbd->rqbd_refcount++;
316         }
317
318         cfs_list_add_tail(&req->rq_list, &service->srv_req_in_queue);
319         service->srv_n_queued_reqs++;
320
321         /* NB everything can disappear under us once the request
322          * has been queued and we unlock, so do the wake now... */
323         cfs_waitq_signal(&service->srv_waitq);
324
325         cfs_spin_unlock(&service->srv_lock);
326         EXIT;
327 }
328
329 /*
330  *  Server's outgoing reply callback
331  */
332 void reply_out_callback(lnet_event_t *ev)
333 {
334         struct ptlrpc_cb_id       *cbid = ev->md.user_ptr;
335         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
336         struct ptlrpc_service     *svc = rs->rs_service;
337         ENTRY;
338
339         LASSERT (ev->type == LNET_EVENT_SEND ||
340                  ev->type == LNET_EVENT_ACK ||
341                  ev->type == LNET_EVENT_UNLINK);
342
343         if (!rs->rs_difficult) {
344                 /* 'Easy' replies have no further processing so I drop the
345                  * net's ref on 'rs' */
346                 LASSERT (ev->unlinked);
347                 ptlrpc_rs_decref(rs);
348                 EXIT;
349                 return;
350         }
351
352         LASSERT (rs->rs_on_net);
353
354         if (ev->unlinked) {
355                 /* Last network callback. The net's ref on 'rs' stays put
356                  * until ptlrpc_handle_rs() is done with it */
357                 cfs_spin_lock(&svc->srv_rs_lock);
358                 cfs_spin_lock(&rs->rs_lock);
359                 rs->rs_on_net = 0;
360                 if (!rs->rs_no_ack ||
361                     rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed)
362                         ptlrpc_schedule_difficult_reply (rs);
363                 cfs_spin_unlock(&rs->rs_lock);
364                 cfs_spin_unlock(&svc->srv_rs_lock);
365         }
366
367         EXIT;
368 }
369
370 /*
371  * Server's bulk completion callback
372  */
373 void server_bulk_callback (lnet_event_t *ev)
374 {
375         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
376         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
377         ENTRY;
378
379         LASSERT (ev->type == LNET_EVENT_SEND ||
380                  ev->type == LNET_EVENT_UNLINK ||
381                  (desc->bd_type == BULK_PUT_SOURCE &&
382                   ev->type == LNET_EVENT_ACK) ||
383                  (desc->bd_type == BULK_GET_SINK &&
384                   ev->type == LNET_EVENT_REPLY));
385
386         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
387                "event type %d, status %d, desc %p\n",
388                ev->type, ev->status, desc);
389
390         cfs_spin_lock(&desc->bd_lock);
391
392         if ((ev->type == LNET_EVENT_ACK ||
393              ev->type == LNET_EVENT_REPLY) &&
394             ev->status == 0) {
395                 /* We heard back from the peer, so even if we get this
396                  * before the SENT event (oh yes we can), we know we
397                  * read/wrote the peer buffer and how much... */
398                 desc->bd_success = 1;
399                 desc->bd_nob_transferred = ev->mlength;
400                 desc->bd_sender = ev->sender;
401         }
402
403         if (ev->unlinked) {
404                 /* This is the last callback no matter what... */
405                 desc->bd_network_rw = 0;
406                 cfs_waitq_signal(&desc->bd_waitq);
407         }
408
409         cfs_spin_unlock(&desc->bd_lock);
410         EXIT;
411 }
412
413 static void ptlrpc_master_callback(lnet_event_t *ev)
414 {
415         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
416         void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
417
418         /* Honestly, it's best to find out early. */
419         LASSERT (cbid->cbid_arg != LP_POISON);
420         LASSERT (callback == request_out_callback ||
421                  callback == reply_in_callback ||
422                  callback == client_bulk_callback ||
423                  callback == request_in_callback ||
424                  callback == reply_out_callback ||
425                  callback == server_bulk_callback);
426
427         callback (ev);
428 }
429
430 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
431                          lnet_process_id_t *peer, lnet_nid_t *self)
432 {
433         int               best_dist = 0;
434         __u32             best_order = 0;
435         int               count = 0;
436         int               rc = -ENOENT;
437         int               portals_compatibility;
438         int               dist;
439         __u32             order;
440         lnet_nid_t        dst_nid;
441         lnet_nid_t        src_nid;
442
443         portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
444
445         peer->pid = LUSTRE_SRV_LNET_PID;
446
447         /* Choose the matching UUID that's closest */
448         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
449                 dist = LNetDist(dst_nid, &src_nid, &order);
450                 if (dist < 0)
451                         continue;
452
453                 if (dist == 0) {                /* local! use loopback LND */
454                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
455                         rc = 0;
456                         break;
457                 }
458
459                 if (rc < 0 ||
460                     dist < best_dist ||
461                     (dist == best_dist && order < best_order)) {
462                         best_dist = dist;
463                         best_order = order;
464
465                         if (portals_compatibility > 1) {
466                                 /* Strong portals compatibility: Zero the nid's
467                                  * NET, so if I'm reading new config logs, or
468                                  * getting configured by (new) lconf I can
469                                  * still talk to old servers. */
470                                 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
471                                 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
472                         }
473                         peer->nid = dst_nid;
474                         *self = src_nid;
475                         rc = 0;
476                 }
477         }
478
479         CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
480         return rc;
481 }
482
483 void ptlrpc_ni_fini(void)
484 {
485         cfs_waitq_t         waitq;
486         struct l_wait_info  lwi;
487         int                 rc;
488         int                 retries;
489
490         /* Wait for the event queue to become idle since there may still be
491          * messages in flight with pending events (i.e. the fire-and-forget
492          * messages == client requests and "non-difficult" server
493          * replies */
494
495         for (retries = 0;; retries++) {
496                 rc = LNetEQFree(ptlrpc_eq_h);
497                 switch (rc) {
498                 default:
499                         LBUG();
500
501                 case 0:
502                         LNetNIFini();
503                         return;
504
505                 case -EBUSY:
506                         if (retries != 0)
507                                 CWARN("Event queue still busy\n");
508
509                         /* Wait for a bit */
510                         cfs_waitq_init(&waitq);
511                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
512                         l_wait_event(waitq, 0, &lwi);
513                         break;
514                 }
515         }
516         /* notreached */
517 }
518
519 lnet_pid_t ptl_get_pid(void)
520 {
521         lnet_pid_t        pid;
522
523 #ifndef  __KERNEL__
524         pid = getpid();
525 #else
526         pid = LUSTRE_SRV_LNET_PID;
527 #endif
528         return pid;
529 }
530
531 int ptlrpc_ni_init(void)
532 {
533         int              rc;
534         lnet_pid_t       pid;
535
536         pid = ptl_get_pid();
537         CDEBUG(D_NET, "My pid is: %x\n", pid);
538
539         /* We're not passing any limits yet... */
540         rc = LNetNIInit(pid);
541         if (rc < 0) {
542                 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
543                 return (-ENOENT);
544         }
545
546         /* CAVEAT EMPTOR: how we process portals events is _radically_
547          * different depending on... */
548 #ifdef __KERNEL__
549         /* kernel portals calls our master callback when events are added to
550          * the event queue.  In fact lustre never pulls events off this queue,
551          * so it's only sized for some debug history. */
552         rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h);
553 #else
554         /* liblustre calls the master callback when it removes events from the
555          * event queue.  The event queue has to be big enough not to drop
556          * anything */
557         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
558 #endif
559         if (rc == 0)
560                 return 0;
561
562         CERROR ("Failed to allocate event queue: %d\n", rc);
563         LNetNIFini();
564
565         return (-ENOMEM);
566 }
567
568 #ifndef __KERNEL__
569 CFS_LIST_HEAD(liblustre_wait_callbacks);
570 CFS_LIST_HEAD(liblustre_idle_callbacks);
571 void *liblustre_services_callback;
572
573 void *
574 liblustre_register_waitidle_callback (cfs_list_t *callback_list,
575                                       const char *name,
576                                       int (*fn)(void *arg), void *arg)
577 {
578         struct liblustre_wait_callback *llwc;
579
580         OBD_ALLOC(llwc, sizeof(*llwc));
581         LASSERT (llwc != NULL);
582
583         llwc->llwc_name = name;
584         llwc->llwc_fn = fn;
585         llwc->llwc_arg = arg;
586         cfs_list_add_tail(&llwc->llwc_list, callback_list);
587
588         return (llwc);
589 }
590
591 void
592 liblustre_deregister_waitidle_callback (void *opaque)
593 {
594         struct liblustre_wait_callback *llwc = opaque;
595
596         cfs_list_del(&llwc->llwc_list);
597         OBD_FREE(llwc, sizeof(*llwc));
598 }
599
600 void *
601 liblustre_register_wait_callback (const char *name,
602                                   int (*fn)(void *arg), void *arg)
603 {
604         return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
605                                                     name, fn, arg);
606 }
607
608 void
609 liblustre_deregister_wait_callback (void *opaque)
610 {
611         liblustre_deregister_waitidle_callback(opaque);
612 }
613
614 void *
615 liblustre_register_idle_callback (const char *name,
616                                   int (*fn)(void *arg), void *arg)
617 {
618         return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
619                                                     name, fn, arg);
620 }
621
622 void
623 liblustre_deregister_idle_callback (void *opaque)
624 {
625         liblustre_deregister_waitidle_callback(opaque);
626 }
627
628 int
629 liblustre_check_events (int timeout)
630 {
631         lnet_event_t ev;
632         int         rc;
633         int         i;
634         ENTRY;
635
636         rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
637         if (rc == 0)
638                 RETURN(0);
639
640         LASSERT (rc == -EOVERFLOW || rc == 1);
641
642         /* liblustre: no asynch callback so we can't afford to miss any
643          * events... */
644         if (rc == -EOVERFLOW) {
645                 CERROR ("Dropped an event!!!\n");
646                 abort();
647         }
648
649         ptlrpc_master_callback (&ev);
650         RETURN(1);
651 }
652
653 int liblustre_waiting = 0;
654
655 int
656 liblustre_wait_event (int timeout)
657 {
658         cfs_list_t                     *tmp;
659         struct liblustre_wait_callback *llwc;
660         int                             found_something = 0;
661
662         /* single threaded recursion check... */
663         liblustre_waiting = 1;
664
665         for (;;) {
666                 /* Deal with all pending events */
667                 while (liblustre_check_events(0))
668                         found_something = 1;
669
670                 /* Give all registered callbacks a bite at the cherry */
671                 cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
672                         llwc = cfs_list_entry(tmp,
673                                               struct liblustre_wait_callback,
674                                               llwc_list);
675
676                         if (llwc->llwc_fn(llwc->llwc_arg))
677                                 found_something = 1;
678                 }
679
680                 if (found_something || timeout == 0)
681                         break;
682
683                 /* Nothing so far, but I'm allowed to block... */
684                 found_something = liblustre_check_events(timeout);
685                 if (!found_something)           /* still nothing */
686                         break;                  /* I timed out */
687         }
688
689         liblustre_waiting = 0;
690
691         return found_something;
692 }
693
694 void
695 liblustre_wait_idle(void)
696 {
697         static int recursed = 0;
698
699         cfs_list_t                     *tmp;
700         struct liblustre_wait_callback *llwc;
701         int                             idle = 0;
702
703         LASSERT(!recursed);
704         recursed = 1;
705
706         do {
707                 liblustre_wait_event(0);
708
709                 idle = 1;
710
711                 cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
712                         llwc = cfs_list_entry(tmp,
713                                               struct liblustre_wait_callback,
714                                               llwc_list);
715
716                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
717                                 idle = 0;
718                                 break;
719                         }
720                 }
721
722         } while (!idle);
723
724         recursed = 0;
725 }
726
727 #endif /* __KERNEL__ */
728
729 int ptlrpc_init_portals(void)
730 {
731         int   rc = ptlrpc_ni_init();
732
733         if (rc != 0) {
734                 CERROR("network initialisation failed\n");
735                 return -EIO;
736         }
737 #ifndef __KERNEL__
738         liblustre_services_callback =
739                 liblustre_register_wait_callback("liblustre_check_services",
740                                                  &liblustre_check_services,
741                                                  NULL);
742         cfs_init_completion_module(liblustre_wait_event);
743 #endif
744         rc = ptlrpcd_addref();
745         if (rc == 0)
746                 return 0;
747
748         CERROR("rpcd initialisation failed\n");
749 #ifndef __KERNEL__
750         liblustre_deregister_wait_callback(liblustre_services_callback);
751 #endif
752         ptlrpc_ni_fini();
753         return rc;
754 }
755
756 void ptlrpc_exit_portals(void)
757 {
758 #ifndef __KERNEL__
759         liblustre_deregister_wait_callback(liblustre_services_callback);
760 #endif
761         ptlrpcd_decref();
762         ptlrpc_ni_fini();
763 }