Whamcloud - gitweb
LU-11085 nodemap: switch interval tree to in-kernel impl.
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_RPC
34
35 #include <libcfs/libcfs.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <obd_class.h>
39 #include <lustre_net.h>
40 #include <lustre_sec.h>
41 #include "ptlrpc_internal.h"
42
43 lnet_handler_t ptlrpc_handler;
44 struct percpu_ref ptlrpc_pending;
45
46 /*
47  *  Client's outgoing request callback
48  */
49 void request_out_callback(struct lnet_event *ev)
50 {
51         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
52         struct ptlrpc_request *req = cbid->cbid_arg;
53         bool                   wakeup = false;
54         ENTRY;
55
56         LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
57         LASSERT(ev->unlinked);
58
59         if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val &&
60                      CFS_FAIL_CHECK_RESET(OBD_FAIL_NET_ERROR_RPC,
61                                           OBD_FAIL_OSP_PRECREATE_PAUSE |
62                                           CFS_FAIL_ONCE)))
63                 ev->status = -ECONNABORTED;
64
65         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
66
67         /* Do not update imp_next_ping for connection request */
68         if (lustre_msg_get_opc(req->rq_reqmsg) !=
69             req->rq_import->imp_connect_op)
70                 ptlrpc_pinger_sending_on_import(req->rq_import);
71
72         sptlrpc_request_out_callback(req);
73
74         spin_lock(&req->rq_lock);
75         req->rq_real_sent = ktime_get_real_seconds();
76         req->rq_req_unlinked = 1;
77         /* reply_in_callback happened before request_out_callback? */
78         if (req->rq_reply_unlinked)
79                 wakeup = true;
80
81         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
82                 /* Failed send: make it seem like the reply timed out, just
83                  * like failing sends in client.c does currently...  */
84                 req->rq_net_err = 1;
85                 wakeup = true;
86         }
87
88         if (wakeup)
89                 ptlrpc_client_wake_req(req);
90
91         spin_unlock(&req->rq_lock);
92
93         ptlrpc_req_finished(req);
94         EXIT;
95 }
96
97 /*
98  * Client's incoming reply callback
99  */
100 void reply_in_callback(struct lnet_event *ev)
101 {
102         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
103         struct ptlrpc_request *req = cbid->cbid_arg;
104         ENTRY;
105
106         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
107
108         LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
109         LASSERT(ev->md_start == req->rq_repbuf);
110         LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
111         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
112          * for adaptive timeouts' early reply.
113          */
114         LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0);
115
116         spin_lock(&req->rq_lock);
117
118         req->rq_receiving_reply = 0;
119         req->rq_early = 0;
120         if (ev->unlinked)
121                 req->rq_reply_unlinked = 1;
122
123         if (ev->status)
124                 goto out_wake;
125
126         if (ev->type == LNET_EVENT_UNLINK) {
127                 LASSERT(ev->unlinked);
128                 DEBUG_REQ(D_NET, req, "unlink");
129                 goto out_wake;
130         }
131
132         if (ev->mlength < ev->rlength ) {
133                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
134                        req->rq_replen, ev->rlength, ev->offset);
135                 req->rq_reply_truncated = 1;
136                 req->rq_replied = 1;
137                 req->rq_status = -EOVERFLOW;
138                 req->rq_nob_received = ev->rlength + ev->offset;
139                 goto out_wake;
140         }
141
142         if ((ev->offset == 0) &&
143             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
144                 /* Early reply */
145                 DEBUG_REQ(D_ADAPTTO, req,
146                           "Early reply received, mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
147                           ev->mlength, ev->offset,
148                           req->rq_replen, req->rq_replied, ev->unlinked);
149
150                 req->rq_early_count++; /* number received, client side */
151
152                 /* already got the real reply or buffers are already unlinked */
153                 if (req->rq_replied ||
154                     req->rq_reply_unlinked == 1)
155                         goto out_wake;
156
157                 req->rq_early = 1;
158                 req->rq_reply_off = ev->offset;
159                 req->rq_nob_received = ev->mlength;
160                 /* And we're still receiving */
161                 req->rq_receiving_reply = 1;
162         } else {
163                 /* Real reply */
164                 req->rq_rep_swab_mask = 0;
165                 req->rq_replied = 1;
166                 /* Got reply, no resend required */
167                 req->rq_resend = 0;
168                 req->rq_reply_off = ev->offset;
169                 req->rq_nob_received = ev->mlength;
170                 /* LNetMDUnlink can't be called under the LNET_LOCK,
171                    so we must unlink in ptlrpc_unregister_reply */
172                 DEBUG_REQ(D_INFO, req,
173                           "reply in flags=%x mlen=%u offset=%d replen=%d",
174                           lustre_msg_get_flags(req->rq_reqmsg),
175                           ev->mlength, ev->offset, req->rq_replen);
176         }
177
178         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
179                 req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
180
181 out_wake:
182         /* NB don't unlock till after wakeup; req can disappear under us
183          * since we don't have our own ref */
184         ptlrpc_client_wake_req(req);
185         spin_unlock(&req->rq_lock);
186         EXIT;
187 }
188
189 /*
190  * Client's bulk has been written/read
191  */
192 void client_bulk_callback(struct lnet_event *ev)
193 {
194         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
195         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
196         struct ptlrpc_request   *req;
197         ENTRY;
198
199         LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
200                  ev->type == LNET_EVENT_PUT) ||
201                 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
202                  ev->type == LNET_EVENT_GET) ||
203                 ev->type == LNET_EVENT_UNLINK);
204         LASSERT(ev->unlinked);
205
206         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
207                 ev->status = -EIO;
208
209         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
210                 ev->status = -EIO;
211
212         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
213                "event type %d, status %d, desc %p\n",
214                ev->type, ev->status, desc);
215
216         spin_lock(&desc->bd_lock);
217         req = desc->bd_req;
218         LASSERT(desc->bd_refs > 0);
219         desc->bd_refs--;
220
221         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
222                 desc->bd_nob_transferred += ev->mlength;
223                 desc->bd_sender = ev->sender;
224         } else {
225                 /* start reconnect and resend if network error hit */
226                 spin_lock(&req->rq_lock);
227                 req->rq_net_err = 1;
228                 spin_unlock(&req->rq_lock);
229         }
230
231         if (ev->status != 0)
232                 desc->bd_failure = 1;
233
234         /* NB don't unlock till after wakeup; desc can disappear under us
235          * otherwise */
236         if (desc->bd_refs == 0)
237                 ptlrpc_client_wake_req(desc->bd_req);
238
239         spin_unlock(&desc->bd_lock);
240         EXIT;
241 }
242
243 /*
244  * We will have percpt request history list for ptlrpc service in upcoming
245  * patches because we don't want to be serialized by current per-service
246  * history operations. So we require history ID can (somehow) show arriving
247  * order w/o grabbing global lock, and user can sort them in userspace.
248  *
249  * This is how we generate history ID for ptlrpc_request:
250  * ----------------------------------------------------
251  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
252  * ----------------------------------------------------
253  * |  seconds  | usec / 16 |   sequence    | CPT id   |
254  * ----------------------------------------------------
255  *
256  * it might not be precise but should be good enough.
257  */
258
259 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
260
261 #define REQS_SEC_SHIFT          32
262 #define REQS_USEC_SHIFT         16
263 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
264
265 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
266                                    struct ptlrpc_request *req)
267 {
268         u64 sec = req->rq_arrival_time.tv_sec;
269         u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
270         u64 new_seq;
271
272         /* set sequence ID for request and add it to history list,
273          * it must be called with hold svcpt::scp_lock */
274
275         new_seq = (sec << REQS_SEC_SHIFT) |
276                   (usec << REQS_USEC_SHIFT) |
277                   (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
278
279         if (new_seq > svcpt->scp_hist_seq) {
280                 /* This handles the initial case of scp_hist_seq == 0 or
281                  * we just jumped into a new time window */
282                 svcpt->scp_hist_seq = new_seq;
283         } else {
284                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
285                 /* NB: increase sequence number in current usec bucket,
286                  * however, it's possible that we used up all bits for
287                  * sequence and jumped into the next usec bucket (future time),
288                  * then we hope there will be less RPCs per bucket at some
289                  * point, and sequence will catch up again */
290                 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
291                 new_seq = svcpt->scp_hist_seq;
292         }
293
294         req->rq_history_seq = new_seq;
295
296         list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
297 }
298
299 /*
300  * Server's incoming request callback
301  */
302 void request_in_callback(struct lnet_event *ev)
303 {
304         struct ptlrpc_cb_id               *cbid = ev->md_user_ptr;
305         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
306         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
307         struct ptlrpc_service             *service = svcpt->scp_service;
308         struct ptlrpc_request             *req;
309         ENTRY;
310
311         LASSERT(ev->type == LNET_EVENT_PUT ||
312                 ev->type == LNET_EVENT_UNLINK);
313         LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer);
314         LASSERT((char *)ev->md_start + ev->offset + ev->mlength <=
315                 rqbd->rqbd_buffer + service->srv_buf_size);
316
317         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
318                "event type %d, status %d, service %s\n",
319                ev->type, ev->status, service->srv_name);
320
321         if (ev->unlinked) {
322                 /* If this is the last request message to fit in the
323                  * request buffer we can use the request object embedded in
324                  * rqbd.  Note that if we failed to allocate a request,
325                  * we'd have to re-post the rqbd, which we can't do in this
326                  * context. */
327                 req = &rqbd->rqbd_req;
328                 memset(req, 0, sizeof (*req));
329         } else {
330                 LASSERT (ev->type == LNET_EVENT_PUT);
331                 if (ev->status != 0) {
332                         /* We moaned above already... */
333                         return;
334                 }
335                 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
336                 if (req == NULL) {
337                         CERROR("Can't allocate incoming request descriptor: "
338                                "Dropping %s RPC from %s\n",
339                                service->srv_name,
340                                libcfs_id2str(ev->initiator));
341                         return;
342                 }
343         }
344
345         ptlrpc_srv_req_init(req);
346         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
347          * flags are reset and scalars are zero.  We only set the message
348          * size to non-zero if this was a successful receive. */
349         req->rq_xid = ev->match_bits;
350         req->rq_reqbuf = ev->md_start + ev->offset;
351         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
352                 req->rq_reqdata_len = ev->mlength;
353         ktime_get_real_ts64(&req->rq_arrival_time);
354         /* Multi-Rail: keep track of both initiator and source NID. */
355         req->rq_peer = ev->initiator;
356         req->rq_source = ev->source;
357         req->rq_self = ev->target.nid;
358         req->rq_rqbd = rqbd;
359         req->rq_phase = RQ_PHASE_NEW;
360         if (ev->type == LNET_EVENT_PUT)
361                 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
362                        req, req->rq_xid, ev->mlength);
363
364         CDEBUG(D_RPCTRACE, "peer: %s (source: %s)\n",
365                 libcfs_id2str(req->rq_peer), libcfs_id2str(req->rq_source));
366
367         spin_lock(&svcpt->scp_lock);
368
369         ptlrpc_req_add_history(svcpt, req);
370
371         if (ev->unlinked) {
372                 svcpt->scp_nrqbds_posted--;
373                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
374                        svcpt->scp_nrqbds_posted);
375
376                 /* Normally, don't complain about 0 buffers posted; LNET won't
377                  * drop incoming reqs since we set the portal lazy */
378                 if (test_req_buffer_pressure &&
379                     ev->type != LNET_EVENT_UNLINK &&
380                     svcpt->scp_nrqbds_posted == 0)
381                         CWARN("All %s request buffers busy\n",
382                               service->srv_name);
383
384                 /* req takes over the network's ref on rqbd */
385         } else {
386                 /* req takes a ref on rqbd */
387                 rqbd->rqbd_refcount++;
388         }
389
390         list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
391         svcpt->scp_nreqs_incoming++;
392
393         /* NB everything can disappear under us once the request
394          * has been queued and we unlock, so do the wake now... */
395         wake_up(&svcpt->scp_waitq);
396
397         spin_unlock(&svcpt->scp_lock);
398         EXIT;
399 }
400
401 /*
402  *  Server's outgoing reply callback
403  */
404 void reply_out_callback(struct lnet_event *ev)
405 {
406         struct ptlrpc_cb_id       *cbid = ev->md_user_ptr;
407         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
408         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
409         ENTRY;
410
411         LASSERT (ev->type == LNET_EVENT_SEND ||
412                  ev->type == LNET_EVENT_ACK ||
413                  ev->type == LNET_EVENT_UNLINK);
414
415         if (!rs->rs_difficult) {
416                 /* 'Easy' replies have no further processing so I drop the
417                  * net's ref on 'rs' */
418                 LASSERT (ev->unlinked);
419                 ptlrpc_rs_decref(rs);
420                 EXIT;
421                 return;
422         }
423
424         LASSERT (rs->rs_on_net);
425
426         if (ev->unlinked) {
427                 /* Last network callback. The net's ref on 'rs' stays put
428                  * until ptlrpc_handle_rs() is done with it */
429                 spin_lock(&svcpt->scp_rep_lock);
430                 spin_lock(&rs->rs_lock);
431
432                 rs->rs_on_net = 0;
433                 if (!rs->rs_no_ack ||
434                     rs->rs_transno <=
435                     rs->rs_export->exp_obd->obd_last_committed ||
436                     list_empty(&rs->rs_obd_list))
437                         ptlrpc_schedule_difficult_reply(rs);
438
439                 spin_unlock(&rs->rs_lock);
440                 spin_unlock(&svcpt->scp_rep_lock);
441         }
442         EXIT;
443 }
444
445 #ifdef HAVE_SERVER_SUPPORT
446 /*
447  * Server's bulk completion callback
448  */
449 void server_bulk_callback(struct lnet_event *ev)
450 {
451         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
452         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
453         ENTRY;
454
455         LASSERT(ev->type == LNET_EVENT_SEND ||
456                 ev->type == LNET_EVENT_UNLINK ||
457                 (ptlrpc_is_bulk_put_source(desc->bd_type) &&
458                  ev->type == LNET_EVENT_ACK) ||
459                 (ptlrpc_is_bulk_get_sink(desc->bd_type) &&
460                  ev->type == LNET_EVENT_REPLY));
461
462         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
463                "event type %d, status %d, desc %p\n",
464                ev->type, ev->status, desc);
465
466         spin_lock(&desc->bd_lock);
467
468         LASSERT(desc->bd_refs > 0);
469
470         if ((ev->type == LNET_EVENT_ACK ||
471              ev->type == LNET_EVENT_REPLY) &&
472             ev->status == 0) {
473                 /* We heard back from the peer, so even if we get this
474                  * before the SENT event (oh yes we can), we know we
475                  * read/wrote the peer buffer and how much... */
476                 desc->bd_nob_transferred += ev->mlength;
477                 desc->bd_sender = ev->sender;
478         }
479
480         if (ev->status != 0)
481                 desc->bd_failure = 1;
482
483         if (ev->unlinked) {
484                 desc->bd_refs--;
485                 /* This is the last callback no matter what... */
486                 if (desc->bd_refs == 0)
487                         wake_up(&desc->bd_waitq);
488         }
489
490         spin_unlock(&desc->bd_lock);
491         EXIT;
492 }
493 #endif
494
495 static void ptlrpc_master_callback(struct lnet_event *ev)
496 {
497         struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
498         void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
499
500         /* Honestly, it's best to find out early. */
501         LASSERT(cbid->cbid_arg != LP_POISON);
502         LASSERT(callback == request_out_callback ||
503                 callback == reply_in_callback ||
504                 callback == client_bulk_callback ||
505                 callback == request_in_callback ||
506                 callback == reply_out_callback
507 #ifdef HAVE_SERVER_SUPPORT
508                 || callback == server_bulk_callback
509 #endif
510                 );
511
512         callback(ev);
513         if (ev->unlinked)
514                 percpu_ref_put(&ptlrpc_pending);
515 }
516
517 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
518                         struct lnet_process_id *peer, lnet_nid_t *self)
519 {
520         int best_dist = 0;
521         __u32 best_order = 0;
522         int count = 0;
523         int rc = -ENOENT;
524         int dist;
525         __u32 order;
526         lnet_nid_t dst_nid;
527         lnet_nid_t src_nid;
528
529         peer->pid = LNET_PID_LUSTRE;
530
531         /* Choose the matching UUID that's closest */
532         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
533                 if (peer->nid != LNET_NID_ANY && LNET_NIDADDR(peer->nid) == 0 &&
534                     LNET_NIDNET(dst_nid) != LNET_NIDNET(peer->nid))
535                         continue;
536
537                 dist = LNetDist(dst_nid, &src_nid, &order);
538                 if (dist < 0)
539                         continue;
540
541                 if (dist == 0) {                /* local! use loopback LND */
542                         peer->nid = *self = LNET_NID_LO_0;
543                         rc = 0;
544                         break;
545                 }
546
547                 if (rc < 0 ||
548                     dist < best_dist ||
549                     (dist == best_dist && order < best_order)) {
550                         best_dist = dist;
551                         best_order = order;
552
553                         peer->nid = dst_nid;
554                         *self = src_nid;
555                         rc = 0;
556                 }
557         }
558
559         CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
560         return rc;
561 }
562
563 static struct completion ptlrpc_done;
564
565 static void ptlrpc_release(struct percpu_ref *ref)
566 {
567         complete(&ptlrpc_done);
568 }
569
570 static void ptlrpc_ni_fini(void)
571 {
572         /* Wait for the event queue to become idle since there may still be
573          * messages in flight with pending events (i.e. the fire-and-forget
574          * messages == client requests and "non-difficult" server
575          * replies */
576
577         init_completion(&ptlrpc_done);
578         percpu_ref_kill(&ptlrpc_pending);
579         wait_for_completion(&ptlrpc_done);
580
581         lnet_assert_handler_unused(ptlrpc_handler);
582         LNetNIFini();
583 }
584
585 lnet_pid_t ptl_get_pid(void)
586 {
587         return LNET_PID_LUSTRE;
588 }
589
590 int ptlrpc_ni_init(void)
591 {
592         int rc;
593         lnet_pid_t pid;
594
595         pid = ptl_get_pid();
596         CDEBUG(D_NET, "My pid is: %x\n", pid);
597
598         /* We're not passing any limits yet... */
599         rc = LNetNIInit(pid);
600         if (rc < 0) {
601                 CDEBUG(D_NET, "ptlrpc: Can't init network interface: rc = %d\n",
602                        rc);
603                 return rc;
604         }
605
606         rc = percpu_ref_init(&ptlrpc_pending, ptlrpc_release, 0, GFP_KERNEL);
607         if (rc) {
608                 CERROR("ptlrpc: Can't init percpu refcount: rc = %d\n", rc);
609                 return rc;
610         }
611         /* CAVEAT EMPTOR: how we process portals events is _radically_
612          * different depending on...
613          */
614         /* kernel LNet calls our master callback when there are new event,
615          * because we are guaranteed to get every event via callback,
616          * so we just set EQ size to 0 to avoid overhread of serializing
617          * enqueue/dequeue operations in LNet. */
618         ptlrpc_handler = ptlrpc_master_callback;
619         return 0;
620 }
621
622 int ptlrpc_init_portals(void)
623 {
624         int   rc = ptlrpc_ni_init();
625
626         if (rc != 0) {
627                 CERROR("network initialisation failed\n");
628                 return rc;
629         }
630         rc = ptlrpcd_addref();
631         if (rc == 0)
632                 return 0;
633
634         CERROR("rpcd initialisation failed\n");
635         ptlrpc_ni_fini();
636         return rc;
637 }
638
639 void ptlrpc_exit_portals(void)
640 {
641         ptlrpcd_decref();
642         ptlrpc_ni_fini();
643 }