Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  */
34
35 #define DEBUG_SUBSYSTEM S_RPC
36
37 #ifndef __KERNEL__
38 # include <liblustre.h>
39 #else
40 # include <libcfs/libcfs.h>
41 # ifdef __mips64__
42 #  include <linux/kernel.h>
43 # endif
44 #endif
45
46 #include <obd_class.h>
47 #include <lustre_net.h>
48 #include <lustre_sec.h>
49 #include "ptlrpc_internal.h"
50
51 lnet_handle_eq_t   ptlrpc_eq_h;
52
53 /*
54  *  Client's outgoing request callback
55  */
56 void request_out_callback(lnet_event_t *ev)
57 {
58         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
59         struct ptlrpc_request *req = cbid->cbid_arg;
60         ENTRY;
61
62         LASSERT (ev->type == LNET_EVENT_SEND ||
63                  ev->type == LNET_EVENT_UNLINK);
64         LASSERT (ev->unlinked);
65
66         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
67
68         sptlrpc_request_out_callback(req);
69         req->rq_real_sent = cfs_time_current_sec();
70
71         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
72
73                 /* Failed send: make it seem like the reply timed out, just
74                  * like failing sends in client.c does currently...  */
75
76                 spin_lock(&req->rq_lock);
77                 req->rq_net_err = 1;
78                 spin_unlock(&req->rq_lock);
79
80                 ptlrpc_client_wake_req(req);
81         }
82
83         ptlrpc_req_finished(req);
84
85         EXIT;
86 }
87
88 /*
89  * Client's incoming reply callback
90  */
91 void reply_in_callback(lnet_event_t *ev)
92 {
93         struct ptlrpc_cb_id   *cbid = ev->md.user_ptr;
94         struct ptlrpc_request *req = cbid->cbid_arg;
95         ENTRY;
96
97         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
98
99         LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
100         LASSERT (ev->md.start == req->rq_repbuf);
101         LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
102         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
103            for adaptive timeouts' early reply. */
104         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
105
106         spin_lock(&req->rq_lock);
107
108         req->rq_receiving_reply = 0;
109         req->rq_early = 0;
110         if (ev->unlinked)
111                 req->rq_must_unlink = 0;
112
113         if (ev->status)
114                 goto out_wake;
115
116         if (ev->type == LNET_EVENT_UNLINK) {
117                 LASSERT(ev->unlinked);
118                 DEBUG_REQ(D_NET, req, "unlink");
119                 goto out_wake;
120         }
121
122         if (ev->mlength < ev->rlength ) {
123                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
124                        req->rq_replen, ev->rlength, ev->offset);
125                 req->rq_reply_truncate = 1;
126                 req->rq_replied = 1;
127                 req->rq_status = -EOVERFLOW;
128                 req->rq_nob_received = ev->rlength + ev->offset;
129                 goto out_wake;
130         }
131
132         if ((ev->offset == 0) &&
133             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
134                 /* Early reply */
135                 DEBUG_REQ(D_ADAPTTO, req,
136                           "Early reply received: mlen=%u offset=%d replen=%d "
137                           "replied=%d unlinked=%d", ev->mlength, ev->offset,
138                           req->rq_replen, req->rq_replied, ev->unlinked);
139
140                 req->rq_early_count++; /* number received, client side */
141
142                 if (req->rq_replied)   /* already got the real reply */
143                         goto out_wake;
144
145                 req->rq_early = 1;
146                 req->rq_reply_off = ev->offset;
147                 req->rq_nob_received = ev->mlength;
148                 /* And we're still receiving */
149                 req->rq_receiving_reply = 1;
150         } else {
151                 /* Real reply */
152                 req->rq_rep_swab_mask = 0;
153                 req->rq_replied = 1;
154                 req->rq_reply_off = ev->offset;
155                 req->rq_nob_received = ev->mlength;
156                 /* LNetMDUnlink can't be called under the LNET_LOCK,
157                    so we must unlink in ptlrpc_unregister_reply */
158                 DEBUG_REQ(D_INFO, req,
159                           "reply in flags=%x mlen=%u offset=%d replen=%d",
160                           lustre_msg_get_flags(req->rq_reqmsg),
161                           ev->mlength, ev->offset, req->rq_replen);
162         }
163
164         req->rq_import->imp_last_reply_time = cfs_time_current_sec();
165
166 out_wake:
167         /* NB don't unlock till after wakeup; req can disappear under us
168          * since we don't have our own ref */
169         ptlrpc_client_wake_req(req);
170         spin_unlock(&req->rq_lock);
171         EXIT;
172 }
173
174 /*
175  * Client's bulk has been written/read
176  */
177 void client_bulk_callback (lnet_event_t *ev)
178 {
179         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
180         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
181         struct ptlrpc_request   *req;
182         ENTRY;
183
184         LASSERT ((desc->bd_type == BULK_PUT_SINK &&
185                   ev->type == LNET_EVENT_PUT) ||
186                  (desc->bd_type == BULK_GET_SOURCE &&
187                   ev->type == LNET_EVENT_GET) ||
188                  ev->type == LNET_EVENT_UNLINK);
189         LASSERT (ev->unlinked);
190
191         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
192                 ev->status = -EIO;
193
194         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
195                 ev->status = -EIO;
196
197         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
198                "event type %d, status %d, desc %p\n",
199                ev->type, ev->status, desc);
200
201         spin_lock(&desc->bd_lock);
202         req = desc->bd_req;
203         LASSERT(desc->bd_network_rw);
204         desc->bd_network_rw = 0;
205
206         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
207                 desc->bd_success = 1;
208                 desc->bd_nob_transferred = ev->mlength;
209                 desc->bd_sender = ev->sender;
210         } else {
211                 /* start reconnect and resend if network error hit */
212                 spin_lock(&req->rq_lock);
213                 req->rq_net_err = 1;
214                 spin_unlock(&req->rq_lock);
215         }
216
217         /* release the encrypted pages for write */
218         if (desc->bd_req->rq_bulk_write)
219                 sptlrpc_enc_pool_put_pages(desc);
220
221         /* NB don't unlock till after wakeup; desc can disappear under us
222          * otherwise */
223         ptlrpc_client_wake_req(req);
224
225         spin_unlock(&desc->bd_lock);
226         EXIT;
227 }
228
229 /*
230  * We will have percpt request history list for ptlrpc service in upcoming
231  * patches because we don't want to be serialized by current per-service
232  * history operations. So we require history ID can (somehow) show arriving
233  * order w/o grabbing global lock, and user can sort them in userspace.
234  *
235  * This is how we generate history ID for ptlrpc_request:
236  * ----------------------------------------------------
237  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
238  * ----------------------------------------------------
239  * |  seconds  | usec / 16 |   sequence    | CPT id   |
240  * ----------------------------------------------------
241  *
242  * it might not be precise but should be good enough.
243  */
244
245 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
246
247 #define REQS_SEC_SHIFT          32
248 #define REQS_USEC_SHIFT         16
249 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
250
251 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
252                                    struct ptlrpc_request *req)
253 {
254         __u64   sec = req->rq_arrival_time.tv_sec;
255         __u32   usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
256         __u64   new_seq;
257
258         /* set sequence ID for request and add it to history list,
259          * it must be called with hold svcpt::scp_lock */
260
261         new_seq = (sec << REQS_SEC_SHIFT) |
262                   (usec << REQS_USEC_SHIFT) | svcpt->scp_cpt;
263         if (new_seq > svcpt->scp_hist_seq) {
264                 /* This handles the initial case of scp_hist_seq == 0 or
265                  * we just jumped into a new time window */
266                 svcpt->scp_hist_seq = new_seq;
267         } else {
268                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
269                 /* NB: increase sequence number in current usec bucket,
270                  * however, it's possible that we used up all bits for
271                  * sequence and jumped into the next usec bucket (future time),
272                  * then we hope there will be less RPCs per bucket at some
273                  * point, and sequence will catch up again */
274                 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
275                 new_seq = svcpt->scp_hist_seq;
276         }
277
278         req->rq_history_seq = new_seq;
279
280         cfs_list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
281 }
282
283 /*
284  * Server's incoming request callback
285  */
286 void request_in_callback(lnet_event_t *ev)
287 {
288         struct ptlrpc_cb_id               *cbid = ev->md.user_ptr;
289         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
290         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
291         struct ptlrpc_service             *service = svcpt->scp_service;
292         struct ptlrpc_request             *req;
293         ENTRY;
294
295         LASSERT (ev->type == LNET_EVENT_PUT ||
296                  ev->type == LNET_EVENT_UNLINK);
297         LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
298         LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
299                  rqbd->rqbd_buffer + service->srv_buf_size);
300
301         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
302                "event type %d, status %d, service %s\n",
303                ev->type, ev->status, service->srv_name);
304
305         if (ev->unlinked) {
306                 /* If this is the last request message to fit in the
307                  * request buffer we can use the request object embedded in
308                  * rqbd.  Note that if we failed to allocate a request,
309                  * we'd have to re-post the rqbd, which we can't do in this
310                  * context. */
311                 req = &rqbd->rqbd_req;
312                 memset(req, 0, sizeof (*req));
313         } else {
314                 LASSERT (ev->type == LNET_EVENT_PUT);
315                 if (ev->status != 0) {
316                         /* We moaned above already... */
317                         return;
318                 }
319                 OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
320                 if (req == NULL) {
321                         CERROR("Can't allocate incoming request descriptor: "
322                                "Dropping %s RPC from %s\n",
323                                service->srv_name,
324                                libcfs_id2str(ev->initiator));
325                         return;
326                 }
327         }
328
329         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
330          * flags are reset and scalars are zero.  We only set the message
331          * size to non-zero if this was a successful receive. */
332         req->rq_xid = ev->match_bits;
333         req->rq_reqbuf = ev->md.start + ev->offset;
334         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
335                 req->rq_reqdata_len = ev->mlength;
336         cfs_gettimeofday(&req->rq_arrival_time);
337         req->rq_peer = ev->initiator;
338         req->rq_self = ev->target.nid;
339         req->rq_rqbd = rqbd;
340         req->rq_phase = RQ_PHASE_NEW;
341         spin_lock_init(&req->rq_lock);
342         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
343         CFS_INIT_LIST_HEAD(&req->rq_exp_list);
344         cfs_atomic_set(&req->rq_refcount, 1);
345         if (ev->type == LNET_EVENT_PUT)
346                 CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
347                        req, req->rq_xid, ev->mlength);
348
349         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
350
351         spin_lock(&svcpt->scp_lock);
352
353         ptlrpc_req_add_history(svcpt, req);
354
355         if (ev->unlinked) {
356                 svcpt->scp_nrqbds_posted--;
357                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
358                        svcpt->scp_nrqbds_posted);
359
360                 /* Normally, don't complain about 0 buffers posted; LNET won't
361                  * drop incoming reqs since we set the portal lazy */
362                 if (test_req_buffer_pressure &&
363                     ev->type != LNET_EVENT_UNLINK &&
364                     svcpt->scp_nrqbds_posted == 0)
365                         CWARN("All %s request buffers busy\n",
366                               service->srv_name);
367
368                 /* req takes over the network's ref on rqbd */
369         } else {
370                 /* req takes a ref on rqbd */
371                 rqbd->rqbd_refcount++;
372         }
373
374         cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
375         svcpt->scp_nreqs_incoming++;
376
377         /* NB everything can disappear under us once the request
378          * has been queued and we unlock, so do the wake now... */
379         cfs_waitq_signal(&svcpt->scp_waitq);
380
381         spin_unlock(&svcpt->scp_lock);
382         EXIT;
383 }
384
385 /*
386  *  Server's outgoing reply callback
387  */
388 void reply_out_callback(lnet_event_t *ev)
389 {
390         struct ptlrpc_cb_id       *cbid = ev->md.user_ptr;
391         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
392         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
393         ENTRY;
394
395         LASSERT (ev->type == LNET_EVENT_SEND ||
396                  ev->type == LNET_EVENT_ACK ||
397                  ev->type == LNET_EVENT_UNLINK);
398
399         if (!rs->rs_difficult) {
400                 /* 'Easy' replies have no further processing so I drop the
401                  * net's ref on 'rs' */
402                 LASSERT (ev->unlinked);
403                 ptlrpc_rs_decref(rs);
404                 EXIT;
405                 return;
406         }
407
408         LASSERT (rs->rs_on_net);
409
410         if (ev->unlinked) {
411                 /* Last network callback. The net's ref on 'rs' stays put
412                  * until ptlrpc_handle_rs() is done with it */
413                 spin_lock(&svcpt->scp_rep_lock);
414                 spin_lock(&rs->rs_lock);
415
416                 rs->rs_on_net = 0;
417                 if (!rs->rs_no_ack ||
418                     rs->rs_transno <=
419                     rs->rs_export->exp_obd->obd_last_committed)
420                         ptlrpc_schedule_difficult_reply(rs);
421
422                 spin_unlock(&rs->rs_lock);
423                 spin_unlock(&svcpt->scp_rep_lock);
424         }
425         EXIT;
426 }
427
428 #ifdef HAVE_SERVER_SUPPORT
429 /*
430  * Server's bulk completion callback
431  */
432 void server_bulk_callback (lnet_event_t *ev)
433 {
434         struct ptlrpc_cb_id     *cbid = ev->md.user_ptr;
435         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
436         ENTRY;
437
438         LASSERT (ev->type == LNET_EVENT_SEND ||
439                  ev->type == LNET_EVENT_UNLINK ||
440                  (desc->bd_type == BULK_PUT_SOURCE &&
441                   ev->type == LNET_EVENT_ACK) ||
442                  (desc->bd_type == BULK_GET_SINK &&
443                   ev->type == LNET_EVENT_REPLY));
444
445         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
446                "event type %d, status %d, desc %p\n",
447                ev->type, ev->status, desc);
448
449         spin_lock(&desc->bd_lock);
450
451         if ((ev->type == LNET_EVENT_ACK ||
452              ev->type == LNET_EVENT_REPLY) &&
453             ev->status == 0) {
454                 /* We heard back from the peer, so even if we get this
455                  * before the SENT event (oh yes we can), we know we
456                  * read/wrote the peer buffer and how much... */
457                 desc->bd_success = 1;
458                 desc->bd_nob_transferred = ev->mlength;
459                 desc->bd_sender = ev->sender;
460         }
461
462         if (ev->unlinked) {
463                 /* This is the last callback no matter what... */
464                 desc->bd_network_rw = 0;
465                 cfs_waitq_signal(&desc->bd_waitq);
466         }
467
468         spin_unlock(&desc->bd_lock);
469         EXIT;
470 }
471 #endif
472
473 static void ptlrpc_master_callback(lnet_event_t *ev)
474 {
475         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
476         void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
477
478         /* Honestly, it's best to find out early. */
479         LASSERT (cbid->cbid_arg != LP_POISON);
480         LASSERT (callback == request_out_callback ||
481                  callback == reply_in_callback ||
482                  callback == client_bulk_callback ||
483                  callback == request_in_callback ||
484                  callback == reply_out_callback
485 #ifdef HAVE_SERVER_SUPPORT
486                  || callback == server_bulk_callback
487 #endif
488                  );
489
490         callback (ev);
491 }
492
493 int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
494                          lnet_process_id_t *peer, lnet_nid_t *self)
495 {
496         int               best_dist = 0;
497         __u32             best_order = 0;
498         int               count = 0;
499         int               rc = -ENOENT;
500         int               portals_compatibility;
501         int               dist;
502         __u32             order;
503         lnet_nid_t        dst_nid;
504         lnet_nid_t        src_nid;
505
506         portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
507
508         peer->pid = LUSTRE_SRV_LNET_PID;
509
510         /* Choose the matching UUID that's closest */
511         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
512                 dist = LNetDist(dst_nid, &src_nid, &order);
513                 if (dist < 0)
514                         continue;
515
516                 if (dist == 0) {                /* local! use loopback LND */
517                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
518                         rc = 0;
519                         break;
520                 }
521
522                 if (rc < 0 ||
523                     dist < best_dist ||
524                     (dist == best_dist && order < best_order)) {
525                         best_dist = dist;
526                         best_order = order;
527
528                         if (portals_compatibility > 1) {
529                                 /* Strong portals compatibility: Zero the nid's
530                                  * NET, so if I'm reading new config logs, or
531                                  * getting configured by (new) lconf I can
532                                  * still talk to old servers. */
533                                 dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
534                                 src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
535                         }
536                         peer->nid = dst_nid;
537                         *self = src_nid;
538                         rc = 0;
539                 }
540         }
541
542         CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
543         return rc;
544 }
545
546 void ptlrpc_ni_fini(void)
547 {
548         cfs_waitq_t         waitq;
549         struct l_wait_info  lwi;
550         int                 rc;
551         int                 retries;
552
553         /* Wait for the event queue to become idle since there may still be
554          * messages in flight with pending events (i.e. the fire-and-forget
555          * messages == client requests and "non-difficult" server
556          * replies */
557
558         for (retries = 0;; retries++) {
559                 rc = LNetEQFree(ptlrpc_eq_h);
560                 switch (rc) {
561                 default:
562                         LBUG();
563
564                 case 0:
565                         LNetNIFini();
566                         return;
567
568                 case -EBUSY:
569                         if (retries != 0)
570                                 CWARN("Event queue still busy\n");
571
572                         /* Wait for a bit */
573                         cfs_waitq_init(&waitq);
574                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
575                         l_wait_event(waitq, 0, &lwi);
576                         break;
577                 }
578         }
579         /* notreached */
580 }
581
582 lnet_pid_t ptl_get_pid(void)
583 {
584         lnet_pid_t        pid;
585
586 #ifndef  __KERNEL__
587         pid = getpid();
588 #else
589         pid = LUSTRE_SRV_LNET_PID;
590 #endif
591         return pid;
592 }
593
594 int ptlrpc_ni_init(void)
595 {
596         int              rc;
597         lnet_pid_t       pid;
598
599         pid = ptl_get_pid();
600         CDEBUG(D_NET, "My pid is: %x\n", pid);
601
602         /* We're not passing any limits yet... */
603         rc = LNetNIInit(pid);
604         if (rc < 0) {
605                 CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
606                 return (-ENOENT);
607         }
608
609         /* CAVEAT EMPTOR: how we process portals events is _radically_
610          * different depending on... */
611 #ifdef __KERNEL__
612         /* kernel LNet calls our master callback when there are new event,
613          * because we are guaranteed to get every event via callback,
614          * so we just set EQ size to 0 to avoid overhread of serializing
615          * enqueue/dequeue operations in LNet. */
616         rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
617 #else
618         /* liblustre calls the master callback when it removes events from the
619          * event queue.  The event queue has to be big enough not to drop
620          * anything */
621         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &ptlrpc_eq_h);
622 #endif
623         if (rc == 0)
624                 return 0;
625
626         CERROR ("Failed to allocate event queue: %d\n", rc);
627         LNetNIFini();
628
629         return (-ENOMEM);
630 }
631
632 #ifndef __KERNEL__
633 CFS_LIST_HEAD(liblustre_wait_callbacks);
634 CFS_LIST_HEAD(liblustre_idle_callbacks);
635 void *liblustre_services_callback;
636
637 void *
638 liblustre_register_waitidle_callback (cfs_list_t *callback_list,
639                                       const char *name,
640                                       int (*fn)(void *arg), void *arg)
641 {
642         struct liblustre_wait_callback *llwc;
643
644         OBD_ALLOC(llwc, sizeof(*llwc));
645         LASSERT (llwc != NULL);
646
647         llwc->llwc_name = name;
648         llwc->llwc_fn = fn;
649         llwc->llwc_arg = arg;
650         cfs_list_add_tail(&llwc->llwc_list, callback_list);
651
652         return (llwc);
653 }
654
655 void
656 liblustre_deregister_waitidle_callback (void *opaque)
657 {
658         struct liblustre_wait_callback *llwc = opaque;
659
660         cfs_list_del(&llwc->llwc_list);
661         OBD_FREE(llwc, sizeof(*llwc));
662 }
663
664 void *
665 liblustre_register_wait_callback (const char *name,
666                                   int (*fn)(void *arg), void *arg)
667 {
668         return liblustre_register_waitidle_callback(&liblustre_wait_callbacks,
669                                                     name, fn, arg);
670 }
671
672 void
673 liblustre_deregister_wait_callback (void *opaque)
674 {
675         liblustre_deregister_waitidle_callback(opaque);
676 }
677
678 void *
679 liblustre_register_idle_callback (const char *name,
680                                   int (*fn)(void *arg), void *arg)
681 {
682         return liblustre_register_waitidle_callback(&liblustre_idle_callbacks,
683                                                     name, fn, arg);
684 }
685
686 void
687 liblustre_deregister_idle_callback (void *opaque)
688 {
689         liblustre_deregister_waitidle_callback(opaque);
690 }
691
692 int
693 liblustre_check_events (int timeout)
694 {
695         lnet_event_t ev;
696         int         rc;
697         int         i;
698         ENTRY;
699
700         rc = LNetEQPoll(&ptlrpc_eq_h, 1, timeout * 1000, &ev, &i);
701         if (rc == 0)
702                 RETURN(0);
703
704         LASSERT (rc == -EOVERFLOW || rc == 1);
705
706         /* liblustre: no asynch callback so we can't afford to miss any
707          * events... */
708         if (rc == -EOVERFLOW) {
709                 CERROR ("Dropped an event!!!\n");
710                 abort();
711         }
712
713         ptlrpc_master_callback (&ev);
714         RETURN(1);
715 }
716
717 int liblustre_waiting = 0;
718
719 int
720 liblustre_wait_event (int timeout)
721 {
722         cfs_list_t                     *tmp;
723         struct liblustre_wait_callback *llwc;
724         int                             found_something = 0;
725
726         /* single threaded recursion check... */
727         liblustre_waiting = 1;
728
729         for (;;) {
730                 /* Deal with all pending events */
731                 while (liblustre_check_events(0))
732                         found_something = 1;
733
734                 /* Give all registered callbacks a bite at the cherry */
735                 cfs_list_for_each(tmp, &liblustre_wait_callbacks) {
736                         llwc = cfs_list_entry(tmp,
737                                               struct liblustre_wait_callback,
738                                               llwc_list);
739
740                         if (llwc->llwc_fn(llwc->llwc_arg))
741                                 found_something = 1;
742                 }
743
744                 if (found_something || timeout == 0)
745                         break;
746
747                 /* Nothing so far, but I'm allowed to block... */
748                 found_something = liblustre_check_events(timeout);
749                 if (!found_something)           /* still nothing */
750                         break;                  /* I timed out */
751         }
752
753         liblustre_waiting = 0;
754
755         return found_something;
756 }
757
758 void
759 liblustre_wait_idle(void)
760 {
761         static int recursed = 0;
762
763         cfs_list_t                     *tmp;
764         struct liblustre_wait_callback *llwc;
765         int                             idle = 0;
766
767         LASSERT(!recursed);
768         recursed = 1;
769
770         do {
771                 liblustre_wait_event(0);
772
773                 idle = 1;
774
775                 cfs_list_for_each(tmp, &liblustre_idle_callbacks) {
776                         llwc = cfs_list_entry(tmp,
777                                               struct liblustre_wait_callback,
778                                               llwc_list);
779
780                         if (!llwc->llwc_fn(llwc->llwc_arg)) {
781                                 idle = 0;
782                                 break;
783                         }
784                 }
785
786         } while (!idle);
787
788         recursed = 0;
789 }
790
791 #endif /* __KERNEL__ */
792
793 int ptlrpc_init_portals(void)
794 {
795         int   rc = ptlrpc_ni_init();
796
797         if (rc != 0) {
798                 CERROR("network initialisation failed\n");
799                 return -EIO;
800         }
801 #ifndef __KERNEL__
802         liblustre_services_callback =
803                 liblustre_register_wait_callback("liblustre_check_services",
804                                                  &liblustre_check_services,
805                                                  NULL);
806         init_completion_module(liblustre_wait_event);
807 #endif
808         rc = ptlrpcd_addref();
809         if (rc == 0)
810                 return 0;
811
812         CERROR("rpcd initialisation failed\n");
813 #ifndef __KERNEL__
814         liblustre_deregister_wait_callback(liblustre_services_callback);
815 #endif
816         ptlrpc_ni_fini();
817         return rc;
818 }
819
820 void ptlrpc_exit_portals(void)
821 {
822 #ifndef __KERNEL__
823         liblustre_deregister_wait_callback(liblustre_services_callback);
824 #endif
825         ptlrpcd_decref();
826         ptlrpc_ni_fini();
827 }