Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #define DEBUG_SUBSYSTEM S_RPC
38
39 #ifndef __KERNEL__
40 # include <liblustre.h>
41 #else
42 # include <libcfs/libcfs.h>
43 # ifdef __mips64__
44 #  include <linux/kernel.h>
45 # endif
46 #endif
47
48 #include <obd_class.h>
49 #include <lustre_net.h>
50 #include <lustre_sec.h>
51 #include "ptlrpc_internal.h"
52
53 lnet_handle_eq_t   ptlrpc_eq_h;
54
55 /*
56  *  Client's outgoing request callback
57  */
58 void request_out_callback(lnet_event_t *ev)
59 {
60         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
61         struct ptlrpc_request *req = cbid->cbid_arg;
62         ENTRY;
63
64         LASSERT (ev->type == LNET_EVENT_SEND ||
65                  ev->type == LNET_EVENT_UNLINK);
66         LASSERT (ev->unlinked);
67
68         DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req,
69                   "type %d, status %d", ev->type, ev->status);
70
71         sptlrpc_request_out_callback(req);
72
73         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
74
75                 /* Failed send: make it seem like the reply timed out, just
76                  * like failing sends in client.c does currently...  */
77
78                 spin_lock(&req->rq_lock);
79                 req->rq_net_err = 1;
80                 spin_unlock(&req->rq_lock);
81
82                 ptlrpc_client_wake_req(req);
83         }
84
85         ptlrpc_req_finished(req);
86
87         EXIT;
88 }
89
90 /*
91  * Client's incoming reply callback
92  */
93 void reply_in_callback(lnet_event_t *ev)
94 {
95         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
96         struct ptlrpc_request *req = cbid->cbid_arg;
97         ENTRY;
98
99         DEBUG_REQ((ev->status == 0) ? D_NET : D_ERROR, req,
100                   "type %d, status %d", ev->type, ev->status);
101
102         LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
103         LASSERT (ev->md.start == req->rq_repbuf);
104         LASSERT (ev->mlength <= req->rq_repbuf_len);
105         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
106            for adaptive timeouts' early reply. */
107         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
108
109         spin_lock(&req->rq_lock);
110
111         req->rq_receiving_reply = 0;
112         req->rq_early = 0;
113
114         if (ev->status)
115                 goto out_wake;
116         if (ev->type == LNET_EVENT_UNLINK) {
117                 req->rq_must_unlink = 0;
118                 DEBUG_REQ(D_RPCTRACE, req, "unlink");
119                 goto out_wake;
120         }
121
122         if ((ev->offset == 0) &&
123             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
124                 /* Early reply */
125                 DEBUG_REQ(D_ADAPTTO, req,
126                           "Early reply received: mlen=%u offset=%d replen=%d "
127                           "replied=%d unlinked=%d", ev->mlength, ev->offset,
128                           req->rq_replen, req->rq_replied, ev->unlinked);
129
130                 req->rq_early_count++; /* number received, client side */
131                 if (req->rq_replied) {
132                         /* If we already got the real reply, then we need to
133                          * check if lnet_finalize() unlinked the md.  In that
134                          * case, there will be no further callback of type
135                          * LNET_EVENT_UNLINK.
136                          */
137                         if (ev->unlinked)
138                                 req->rq_must_unlink = 0;
139                         else
140                                 DEBUG_REQ(D_RPCTRACE, req, "unlinked in reply");
141                         goto out_wake;
142                 }
143                 req->rq_early = 1;
144                 req->rq_reply_off = ev->offset;
145                 req->rq_nob_received = ev->mlength;
146                 /* And we're still receiving */
147                 req->rq_receiving_reply = 1;
148         } else {
149                 /* Real reply */
150                 req->rq_replied = 1;
151                 req->rq_reply_off = ev->offset;
152                 req->rq_nob_received = ev->mlength;
153                 /* LNetMDUnlink can't be called under the LNET_LOCK,
154                    so we must unlink in ptlrpc_unregister_reply */
155                 DEBUG_REQ(D_INFO, req,
156                           "reply in flags=%x mlen=%u offset=%d replen=%d",
157                           lustre_msg_get_flags(req->rq_reqmsg),
158                           ev->mlength, ev->offset, req->rq_replen);
159         }
160
161         req->rq_import->imp_last_reply_time = cfs_time_current_sec();
162
163 out_wake:
164         /* NB don't unlock till after wakeup; req can disappear under us
165          * since we don't have our own ref */
166         ptlrpc_client_wake_req(req);
167         spin_unlock(&req->rq_lock);
168         EXIT;
169 }
170
171 /*
172  * Client's bulk has been written/read
173  */
174 void client_bulk_callback (lnet_event_t *ev)
175 {
176         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
177         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
178         ENTRY;
179
180         LASSERT ((desc->bd_type == BULK_PUT_SINK &&
181                   ev->type == LNET_EVENT_PUT) ||
182                  (desc->bd_type == BULK_GET_SOURCE &&
183                   ev->type == LNET_EVENT_GET) ||
184                  ev->type == LNET_EVENT_UNLINK);
185         LASSERT (ev->unlinked);
186
187         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
188                "event type %d, status %d, desc %p\n",
189                ev->type, ev->status, desc);
190
191         spin_lock(&desc->bd_lock);
192
193         LASSERT(desc->bd_network_rw);
194         desc->bd_network_rw = 0;
195
196         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
197                 desc->bd_success = 1;
198                 desc->bd_nob_transferred = ev->mlength;
199                 desc->bd_sender = ev->sender;
200         }
201
202         sptlrpc_enc_pool_put_pages(desc);
203
204         /* NB don't unlock till after wakeup; desc can disappear under us
205          * otherwise */
206         ptlrpc_client_wake_req(desc->bd_req);
207
208         spin_unlock(&desc->bd_lock);
209         EXIT;
210 }
211
212 /*
213  * Server's incoming request callback
214  */
215 void request_in_callback(lnet_event_t *ev)
216 {
217         struct ptlrpc_cb_id               *cbid = ev->md.user_ptr;
218         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
219         struct ptlrpc_service             *service = rqbd->rqbd_service;
220         struct ptlrpc_request             *req;
221         ENTRY;
222
223         LASSERT (ev->type == LNET_EVENT_PUT ||
224                  ev->type == LNET_EVENT_UNLINK);
225         LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
226         LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
227                  rqbd->rqbd_buffer + service->srv_buf_size);
228
229         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
230                "event type %d, status %d, service %s\n",
231                ev->type, ev->status, service->srv_name);
232
233         if (ev->unlinked) {
234                 /* If this is the last request message to fit in the
235                  * request buffer we can use the request object embedded in
236                  * rqbd.  Note that if we failed to allocate a request,
237                  * we'd have to re-post the rqbd, which we can't do in this
238                  * context. */
239                 req = &rqbd->rqbd_req;
240                 memset(req, 0, sizeof (*req));
241         } else {
242                 LASSERT (ev->type == LNET_EVENT_PUT);
243                 if (ev->status != 0) {
244                         /* We moaned above already... */
245                         return;
246                 }
247                 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
248                 if (req == NULL) {
249                         CERROR("Can't allocate incoming request descriptor: "
250                                "Dropping %s RPC from %s\n",
251                                service->srv_name,
252                                libcfs_id2str(ev->initiator));
253                         return;
254                 }
255         }
256
257         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
258          * flags are reset and scalars are zero.  We only set the message
259          * size to non-zero if this was a successful receive. */
260         req->rq_xid = ev->match_bits;
261         req->rq_reqbuf = ev->md.start + ev->offset;
262         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
263                 req->rq_reqdata_len = ev->mlength;
264         do_gettimeofday(&req->rq_arrival_time);
265         req->rq_peer = ev->initiator;
266         req->rq_self = ev->target.nid;
267         req->rq_rqbd = rqbd;
268         req->rq_phase = RQ_PHASE_NEW;
269 #ifdef CRAY_XT3
270         req->rq_uid = ev->uid;
271 #endif
272         spin_lock_init(&req->rq_lock);
273         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
274         atomic_set(&req->rq_refcount, 1);
275         if (ev->type == LNET_EVENT_PUT)
276                 DEBUG_REQ(D_RPCTRACE, req, "incoming req");
277
278         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
279
280         spin_lock(&service->srv_lock);
281
282         req->rq_history_seq = service->srv_request_seq++;
283         list_add_tail(&req->rq_history_list, &service->srv_request_history);
284
285         if (ev->unlinked) {
286                 service->srv_nrqbd_receiving--;
287                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
288                        service->srv_nrqbd_receiving);
289
290                 /* Normally, don't complain about 0 buffers posted; LNET won't
291                  * drop incoming reqs since we set the portal lazy */
292                 if (test_req_buffer_pressure &&
293                     ev->type != LNET_EVENT_UNLINK &&
294                     service->srv_nrqbd_receiving == 0)
295                         CWARN("All %s request buffers busy\n",
296                               service->srv_name);
297
298                 /* req takes over the network's ref on rqbd */
299         } else {
300                 /* req takes a ref on rqbd */
301                 rqbd->rqbd_refcount++;
302         }
303
304         list_add_tail(&req->rq_list, &service->srv_req_in_queue);
305         service->srv_n_queued_reqs++;
306
307         /* NB everything can disappear under us once the request
308          * has been queued and we unlock, so do the wake now... */
309         cfs_waitq_signal(&service->srv_waitq);
310
311         spin_unlock(&service->srv_lock);
312         EXIT;
313 }
314
315 /*
316  *  Server's outgoing reply callback
317  */
318 void reply_out_callback(lnet_event_t *ev)
319 {
320         struct ptlrpc_cb_id       *cbid = ev->md.user_ptr;
321         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
322         struct ptlrpc_service     *svc = rs->rs_service;
323         ENTRY;
324
325         LASSERT (ev->type == LNET_EVENT_SEND ||
326                  ev->type == LNET_EVENT_ACK ||
327                  ev->type == LNET_EVENT_UNLINK);
328
329         if (!rs->rs_difficult) {
330                 /* 'Easy' replies have no further processing so I drop the
331                  * net's ref on 'rs' */
332                 LASSERT (ev->unlinked);
333                 ptlrpc_rs_decref(rs);
334                 atomic_dec (&svc->srv_outstanding_replies);
335                 EXIT;
336                 return;
337         }
338
339         LASSERT (rs->rs_on_net);
340
341         if (ev->unlinked) {
342                 /* Last network callback.  The net's ref on 'rs' stays put
343                  * until ptlrpc_server_handle_reply() is done with it */
344                 spin_lock(&svc->srv_lock);
345                 spin_lock(&rs->rs_lock);
346                 rs->rs_on_net = 0;
347                 if (!rs->rs_no_ack ||
348                     rs->rs_transno <= rs->rs_export->exp_obd->obd_last_committed)
349                         ptlrpc_schedule_difficult_reply (rs);
350                 spin_unlock(&rs->rs_lock);
351                 spin_unlock(&svc->srv_lock);
352         }
353
354         EXIT;
355 }
356
357 /*
358  * Server's bulk completion callback
359  */
360 void server_bulk_callback (lnet_event_t *ev)
361 {
362         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
363         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
364         ENTRY;
365
366         LASSERT (ev->type == LNET_EVENT_SEND ||
367                  ev->type == LNET_EVENT_UNLINK ||
368                  (desc->bd_type == BULK_PUT_SOURCE &&
369                   ev->type == LNET_EVENT_ACK) ||
370                  (desc->bd_type == BULK_GET_SINK &&
371                   ev->type == LNET_EVENT_REPLY));
372
373         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
374                "event type %d, status %d, desc %p\n",
375                ev->type, ev->status, desc);
376
377         spin_lock(&desc->bd_lock);
378
379         if ((ev->type == LNET_EVENT_ACK ||
380              ev->type == LNET_EVENT_REPLY) &&
381             ev->status == 0) {
382                 /* We heard back from the peer, so even if we get this
383                  * before the SENT event (oh yes we can), we know we
384                  * read/wrote the peer buffer and how much... */
385                 desc->bd_success = 1;
386                 desc->bd_nob_transferred = ev->mlength;
387                 desc->bd_sender = ev->sender;
388         }
389
390         if (ev->unlinked) {
391                 /* This is the last callback no matter what... */
392                 desc->bd_network_rw = 0;
393                 cfs_waitq_signal(&desc->bd_waitq);
394         }
395
396         spin_unlock(&desc->bd_lock);
397         EXIT;
398 }
399
400 static void ptlrpc_master_callback(lnet_event_t *ev)
401 {
402         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
403         void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
404
405         /* Honestly, it's best to find out early. */
406         LASSERT (cbid->cbid_arg != LP_POISON);
407         LASSERT (callback == request_out_callback ||
408                  callback == reply_in_callback ||
409                  callback == client_bulk_callback ||
410                  callback == request_in_callback ||
411                  callback == reply_out_callback ||
412                  callback == server_bulk_callback);
413
414         callback (ev);
415 }
416
417 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
418                          lnet_process_id_t *peer, lnet_nid_t *self)
419 {
420         int               best_dist = 0;
421         __u32             best_order = 0;
422         int               count = 0;
423         int               rc = -ENOENT;
424         int               portals_compatibility;
425         int               dist;
426         __u32             order;
427         lnet_nid_t        dst_nid;
428         lnet_nid_t        src_nid;
429
430         portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
431
432         peer->pid = LUSTRE_SRV_LNET_PID;
433
434         /* Choose the matching UUID that's closest */
435         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
436                 dist = LNetDist(dst_nid, &src_nid, &order);
437                 if (dist < 0)
438                         continue;
439
440                 if (dist == 0) {                /* local! use loopback LND */
441                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
442                         rc = 0;
443                         break;
444                 }
445
446                 if (rc < 0 ||
447                     dist < best_dist ||
448                     (dist == best_dist && order < best_order)) {
449                         best_dist = dist;
450                         best_order = order;
451
452                         if (portals_compatibility > 1) {
453                                 /* Strong portals compatibility: Zero the nid's
454                                  * NET, so if I'm reading new config logs, or
455                                  * getting configured by (new) lconf I can
456                                  * still talk to old servers. */
457                                 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
458                                 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
459                         }
460                         peer->nid = dst_nid;
461                         *self = src_nid;
462                         rc = 0;
463                 }
464         }
465
466         CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
467         if (rc != 0)
468                 CERROR("No NID found for %s\n", uuid->uuid);
469         return rc;
470 }
471
472 void ptlrpc_ni_fini(void)
473 {
474         cfs_waitq_t         waitq;
475         struct l_wait_info  lwi;
476         int                 rc;
477         int                 retries;
478
479         /* Wait for the event queue to become idle since there may still be
480          * messages in flight with pending events (i.e. the fire-and-forget
481          * messages == client requests and "non-difficult" server
482          * replies */
483
484         for (retries = 0;; retries++) {
485                 rc = LNetEQFree(ptlrpc_eq_h);
486                 switch (rc) {
487                 default:
488                         LBUG();
489
490                 case 0:
491                         LNetNIFini();
492                         return;
493
494                 case -EBUSY:
495                         if (retries != 0)
496                                 CWARN("Event queue still busy\n");
497
498                         /* Wait for a bit */
499                         cfs_waitq_init(&waitq);
500                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
501                         l_wait_event(waitq, 0, &lwi);
502                         break;
503                 }
504         }
505         /* notreached */
506 }
507
508 lnet_pid_t ptl_get_pid(void)
509 {
510         lnet_pid_t        pid;
511
512 #ifndef  __KERNEL__
513         pid = getpid();
514 #else
515         pid = LUSTRE_SRV_LNET_PID;
516 #endif
517         return pid;
518 }
519
520 int ptlrpc_ni_init(void)
521 {
522         int              rc;
523         lnet_pid_t       pid;
524
525         pid = ptl_get_pid();
526         CDEBUG(D_NET, "My pid is: %x\n", pid);
527
528         /* We're not passing any limits yet... */
529         rc = LNetNIInit(pid);
530         if (rc < 0) {
531                 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
532                 return (-ENOENT);
533         }
534
535         /* CAVEAT EMPTOR: how we process portals events is _radically_
536          * different depending on... */
537 #ifdef __KERNEL__
538         /* kernel portals calls our master callback when events are added to
539          * the event queue.  In fact lustre never pulls events off this queue,
540          * so it's only sized for some debug history. */
541         rc = LNetEQAlloc(1024, ptlrpc_master_callback, &ptlrpc_eq_h);
542 #else
543         /* liblustre calls the master callback when it removes events from the
544          * event queue.  The event queue has to be big enough not to drop
545          * anything */
546         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
547 #endif
548         if (rc == 0)
549                 return 0;
550
551         CERROR ("Failed to allocate event queue: %d\n", rc);
552         LNetNIFini();
553
554         return (-ENOMEM);
555 }
556
557 #ifndef __KERNEL__
558 CFS_LIST_HEAD(liblustre_wait_callbacks);
559 CFS_LIST_HEAD(liblustre_idle_callbacks);
560 void *liblustre_services_callback;
561
562 void *
563 liblustre_register_waitidle_callback (struct list_head *callback_list,
564                                       const char *name,
565                                       int (*fn)(void *arg), void *arg)
566 {
567         struct liblustre_wait_callback *llwc;
568
569         OBD_ALLOC(llwc, sizeof(*llwc));
570         LASSERT (llwc != NULL);
571
572         llwc->llwc_name = name;
573         llwc->llwc_fn = fn;
574         llwc->llwc_arg = arg;
575         list_add_tail(&llwc->llwc_list, callback_list);
576
577         return (llwc);
578 }
579
580 void
581 liblustre_deregister_waitidle_callback (void *opaque)
582 {
583         struct liblustre_wait_callback *llwc = opaque;
584
585         list_del(&llwc->llwc_list);
586         OBD_FREE(llwc, sizeof(*llwc));
587 }
588
589 void *
590 liblustre_register_wait_callback (const char *name,
591                                   int (*fn)(void *arg), void *arg)
592 {
593         return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
594                                                     name, fn, arg);
595 }
596
597 void
598 liblustre_deregister_wait_callback (void *opaque)
599 {
600         liblustre_deregister_waitidle_callback(opaque);
601 }
602
603 void *
604 liblustre_register_idle_callback (const char *name,
605                                   int (*fn)(void *arg), void *arg)
606 {
607         return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
608                                                     name, fn, arg);
609 }
610
611 void
612 liblustre_deregister_idle_callback (void *opaque)
613 {
614         liblustre_deregister_waitidle_callback(opaque);
615 }
616
617 int
618 liblustre_check_events (int timeout)
619 {
620         lnet_event_t ev;
621         int         rc;
622         int         i;
623         ENTRY;
624
625         rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
626         if (rc == 0)
627                 RETURN(0);
628
629         LASSERT (rc == -EOVERFLOW || rc == 1);
630
631         /* liblustre: no asynch callback so we can't affort to miss any
632          * events... */
633         if (rc == -EOVERFLOW) {
634                 CERROR ("Dropped an event!!!\n");
635                 abort();
636         }
637
638         ptlrpc_master_callback (&ev);
639         RETURN(1);
640 }
641
642 int liblustre_waiting = 0;
643
644 int
645 liblustre_wait_event (int timeout)
646 {
647         struct list_head               *tmp;
648         struct liblustre_wait_callback *llwc;
649         int                             found_something = 0;
650
651         /* single threaded recursion check... */
652         liblustre_waiting = 1;
653
654         for (;;) {
655                 /* Deal with all pending events */
656                 while (liblustre_check_events(0))
657                         found_something = 1;
658
659                 /* Give all registered callbacks a bite at the cherry */
660                 list_for_each(tmp, &liblustre_wait_callbacks) {
661                         llwc = list_entry(tmp, struct liblustre_wait_callback,
662                                           llwc_list);
663
664                         if (llwc->llwc_fn(llwc->llwc_arg))
665                                 found_something = 1;
666                 }
667
668                 if (found_something || timeout == 0)
669                         break;
670
671                 /* Nothing so far, but I'm allowed to block... */
672                 found_something = liblustre_check_events(timeout);
673                 if (!found_something)           /* still nothing */
674                         break;                  /* I timed out */
675         }
676
677         liblustre_waiting = 0;
678
679         return found_something;
680 }
681
682 void
683 liblustre_wait_idle(void)
684 {
685         static int recursed = 0;
686
687         struct list_head               *tmp;
688         struct liblustre_wait_callback *llwc;
689         int                             idle = 0;
690
691         LASSERT(!recursed);
692         recursed = 1;
693
694         do {
695                 liblustre_wait_event(0);
696
697                 idle = 1;
698
699                 list_for_each(tmp, &liblustre_idle_callbacks) {
700                         llwc = list_entry(tmp, struct liblustre_wait_callback,
701                                           llwc_list);
702
703                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
704                                 idle = 0;
705                                 break;
706                         }
707                 }
708
709         } while (!idle);
710
711         recursed = 0;
712 }
713
714 #endif /* __KERNEL__ */
715
716 int ptlrpc_init_portals(void)
717 {
718         int   rc = ptlrpc_ni_init();
719
720         if (rc != 0) {
721                 CERROR("network initialisation failed\n");
722                 return -EIO;
723         }
724 #ifndef __KERNEL__
725         liblustre_services_callback =
726                 liblustre_register_wait_callback("liblustre_check_services",
727                                                  &liblustre_check_services,
728                                                  NULL);
729         init_completion_module(liblustre_wait_event);
730 #endif
731         rc = ptlrpcd_addref();
732         if (rc == 0)
733                 return 0;
734
735         CERROR("rpcd initialisation failed\n");
736 #ifndef __KERNEL__
737         liblustre_deregister_wait_callback(liblustre_services_callback);
738 #endif
739         ptlrpc_ni_fini();
740         return rc;
741 }
742
743 void ptlrpc_exit_portals(void)
744 {
745 #ifndef __KERNEL__
746         liblustre_deregister_wait_callback(liblustre_services_callback);
747 #endif
748         ptlrpcd_decref();
749         ptlrpc_ni_fini();
750 }