Whamcloud - gitweb
LU-12567 ptlrpc: handle reply and resend reorder
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_RPC
33
34 #include <libcfs/libcfs.h>
35 #include <linux/kernel.h>
36 #include <linux/delay.h>
37 #include <obd_class.h>
38 #include <lustre_net.h>
39 #include <lustre_sec.h>
40 #include "ptlrpc_internal.h"
41
42 lnet_handler_t ptlrpc_handler;
43 struct percpu_ref ptlrpc_pending;
44
45 /*
46  *  Client's outgoing request callback
47  */
48 void request_out_callback(struct lnet_event *ev)
49 {
50         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
51         struct ptlrpc_request *req = cbid->cbid_arg;
52         bool                   wakeup = false;
53         ENTRY;
54
55         LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
56         LASSERT(ev->unlinked);
57
58         if (unlikely(lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val &&
59                      CFS_FAIL_CHECK_RESET(OBD_FAIL_NET_ERROR_RPC,
60                                           OBD_FAIL_OSP_PRECREATE_PAUSE |
61                                           CFS_FAIL_ONCE)))
62                 ev->status = -ECONNABORTED;
63
64         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
65
66         /* Do not update imp_next_ping for connection request */
67         if (lustre_msg_get_opc(req->rq_reqmsg) !=
68             req->rq_import->imp_connect_op)
69                 ptlrpc_pinger_sending_on_import(req->rq_import);
70
71         sptlrpc_request_out_callback(req);
72
73         spin_lock(&req->rq_lock);
74         req->rq_real_sent = ktime_get_real_seconds();
75         req->rq_req_unlinked = 1;
76         /* reply_in_callback happened before request_out_callback? */
77         if (req->rq_reply_unlinked)
78                 wakeup = true;
79
80         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
81                 /* Failed send: make it seem like the reply timed out, just
82                  * like failing sends in client.c does currently...  */
83                 req->rq_net_err = 1;
84                 wakeup = true;
85         }
86
87         if (wakeup)
88                 ptlrpc_client_wake_req(req);
89
90         spin_unlock(&req->rq_lock);
91
92         ptlrpc_req_finished(req);
93         EXIT;
94 }
95
96 /*
97  * Client's incoming reply callback
98  */
99 void reply_in_callback(struct lnet_event *ev)
100 {
101         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
102         struct ptlrpc_request *req = cbid->cbid_arg;
103         ENTRY;
104
105         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
106
107         LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
108         LASSERT(ev->md_start == req->rq_repbuf);
109         LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
110         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
111          * for adaptive timeouts' early reply.
112          */
113         LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0);
114
115         spin_lock(&req->rq_lock);
116
117         req->rq_receiving_reply = 0;
118         req->rq_early = 0;
119         if (ev->unlinked)
120                 req->rq_reply_unlinked = 1;
121
122         if (ev->status)
123                 goto out_wake;
124
125         if (ev->type == LNET_EVENT_UNLINK) {
126                 LASSERT(ev->unlinked);
127                 DEBUG_REQ(D_NET, req, "unlink");
128                 goto out_wake;
129         }
130
131         if (ev->mlength < ev->rlength ) {
132                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
133                        req->rq_replen, ev->rlength, ev->offset);
134                 req->rq_reply_truncated = 1;
135                 req->rq_replied = 1;
136                 req->rq_status = -EOVERFLOW;
137                 req->rq_nob_received = ev->rlength + ev->offset;
138                 goto out_wake;
139         }
140
141         if ((ev->offset == 0) &&
142             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
143                 /* Early reply */
144                 DEBUG_REQ(D_ADAPTTO, req,
145                           "Early reply received, mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
146                           ev->mlength, ev->offset,
147                           req->rq_replen, req->rq_replied, ev->unlinked);
148
149                 req->rq_early_count++; /* number received, client side */
150
151                 /* already got the real reply or buffers are already unlinked */
152                 if (req->rq_replied ||
153                     req->rq_reply_unlinked == 1)
154                         goto out_wake;
155
156                 req->rq_early = 1;
157                 req->rq_reply_off = ev->offset;
158                 req->rq_nob_received = ev->mlength;
159                 /* And we're still receiving */
160                 req->rq_receiving_reply = 1;
161         } else {
162                 /* Real reply */
163                 req->rq_rep_swab_mask = 0;
164                 req->rq_replied = 1;
165                 /* Got reply, no resend required */
166                 req->rq_resend = 0;
167                 req->rq_reply_off = ev->offset;
168                 req->rq_nob_received = ev->mlength;
169                 /* LNetMDUnlink can't be called under the LNET_LOCK,
170                    so we must unlink in ptlrpc_unregister_reply */
171                 DEBUG_REQ(D_INFO, req,
172                           "reply in flags=%x mlen=%u offset=%d replen=%d",
173                           lustre_msg_get_flags(req->rq_reqmsg),
174                           ev->mlength, ev->offset, req->rq_replen);
175         }
176
177         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
178                 req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
179
180 out_wake:
181         /* NB don't unlock till after wakeup; req can disappear under us
182          * since we don't have our own ref */
183         ptlrpc_client_wake_req(req);
184         spin_unlock(&req->rq_lock);
185         EXIT;
186 }
187
188 /*
189  * Client's bulk has been written/read
190  */
191 void client_bulk_callback(struct lnet_event *ev)
192 {
193         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
194         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
195         struct ptlrpc_request   *req;
196         ENTRY;
197
198         LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
199                  ev->type == LNET_EVENT_PUT) ||
200                 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
201                  ev->type == LNET_EVENT_GET) ||
202                 ev->type == LNET_EVENT_UNLINK);
203         LASSERT(ev->unlinked);
204
205         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
206                 ev->status = -EIO;
207
208         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
209                 ev->status = -EIO;
210
211         CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
212                      "event type %d, status %d, desc %p\n",
213                      ev->type, ev->status, desc);
214
215         spin_lock(&desc->bd_lock);
216         req = desc->bd_req;
217         LASSERT(desc->bd_refs > 0);
218         desc->bd_refs--;
219
220         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
221                 desc->bd_nob_transferred += ev->mlength;
222                 desc->bd_sender = ev->sender;
223         } else {
224                 /* start reconnect and resend if network error hit */
225                 spin_lock(&req->rq_lock);
226                 req->rq_net_err = 1;
227                 spin_unlock(&req->rq_lock);
228                 desc->bd_failure = 1;
229         }
230
231
232         /* NB don't unlock till after wakeup; desc can disappear under us
233          * otherwise */
234         if (desc->bd_refs == 0)
235                 ptlrpc_client_wake_req(desc->bd_req);
236
237         spin_unlock(&desc->bd_lock);
238         EXIT;
239 }
240
241 /*
242  * We will have percpt request history list for ptlrpc service in upcoming
243  * patches because we don't want to be serialized by current per-service
244  * history operations. So we require history ID can (somehow) show arriving
245  * order w/o grabbing global lock, and user can sort them in userspace.
246  *
247  * This is how we generate history ID for ptlrpc_request:
248  * ----------------------------------------------------
249  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
250  * ----------------------------------------------------
251  * |  seconds  | usec / 16 |   sequence    | CPT id   |
252  * ----------------------------------------------------
253  *
254  * it might not be precise but should be good enough.
255  */
256
257 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
258
259 #define REQS_SEC_SHIFT          32
260 #define REQS_USEC_SHIFT         16
261 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
262
263 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
264                                    struct ptlrpc_request *req)
265 {
266         u64 sec = req->rq_arrival_time.tv_sec;
267         u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
268         u64 new_seq;
269
270         /* set sequence ID for request and add it to history list,
271          * it must be called with hold svcpt::scp_lock */
272
273         new_seq = (sec << REQS_SEC_SHIFT) |
274                   (usec << REQS_USEC_SHIFT) |
275                   (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
276
277         if (new_seq > svcpt->scp_hist_seq) {
278                 /* This handles the initial case of scp_hist_seq == 0 or
279                  * we just jumped into a new time window */
280                 svcpt->scp_hist_seq = new_seq;
281         } else {
282                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
283                 /* NB: increase sequence number in current usec bucket,
284                  * however, it's possible that we used up all bits for
285                  * sequence and jumped into the next usec bucket (future time),
286                  * then we hope there will be less RPCs per bucket at some
287                  * point, and sequence will catch up again */
288                 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
289                 new_seq = svcpt->scp_hist_seq;
290         }
291
292         req->rq_history_seq = new_seq;
293
294         list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
295 }
296
297 /*
298  * Server's incoming request callback
299  */
300 void request_in_callback(struct lnet_event *ev)
301 {
302         struct ptlrpc_cb_id               *cbid = ev->md_user_ptr;
303         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
304         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
305         struct ptlrpc_service             *service = svcpt->scp_service;
306         struct ptlrpc_request             *req;
307         ENTRY;
308
309         LASSERT(ev->type == LNET_EVENT_PUT ||
310                 ev->type == LNET_EVENT_UNLINK);
311         LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer);
312         LASSERT((char *)ev->md_start + ev->offset + ev->mlength <=
313                 rqbd->rqbd_buffer + service->srv_buf_size);
314
315         CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
316                      "event type %d, status %d, service %s\n",
317                      ev->type, ev->status, service->srv_name);
318
319         if (ev->unlinked) {
320                 /* If this is the last request message to fit in the
321                  * request buffer we can use the request object embedded in
322                  * rqbd.  Note that if we failed to allocate a request,
323                  * we'd have to re-post the rqbd, which we can't do in this
324                  * context.
325                  */
326                 req = &rqbd->rqbd_req;
327                 memset(req, 0, sizeof(*req));
328         } else {
329                 LASSERT(ev->type == LNET_EVENT_PUT);
330                 if (ev->status != 0) /* We moaned above already... */
331                         return;
332                 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
333                 if (req == NULL) {
334                         CERROR("Can't allocate incoming request descriptor: "
335                                "Dropping %s RPC from %s\n",
336                                service->srv_name,
337                                libcfs_id2str(ev->initiator));
338                         return;
339                 }
340         }
341
342         ptlrpc_srv_req_init(req);
343         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
344          * flags are reset and scalars are zero.  We only set the message
345          * size to non-zero if this was a successful receive. */
346         req->rq_xid = ev->match_bits;
347         req->rq_reqbuf = ev->md_start + ev->offset;
348         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
349                 req->rq_reqdata_len = ev->mlength;
350         ktime_get_real_ts64(&req->rq_arrival_time);
351         /* Multi-Rail: keep track of both initiator and source NID. */
352         req->rq_peer = ev->initiator;
353         req->rq_source = ev->source;
354         req->rq_self = ev->target.nid;
355         req->rq_rqbd = rqbd;
356         req->rq_phase = RQ_PHASE_NEW;
357         if (ev->type == LNET_EVENT_PUT)
358                 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
359                        req, req->rq_xid, ev->mlength);
360
361         CDEBUG(D_RPCTRACE, "peer: %s (source: %s)\n",
362                 libcfs_id2str(req->rq_peer), libcfs_id2str(req->rq_source));
363
364         spin_lock(&svcpt->scp_lock);
365
366         ptlrpc_req_add_history(svcpt, req);
367
368         if (ev->unlinked) {
369                 svcpt->scp_nrqbds_posted--;
370                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
371                        svcpt->scp_nrqbds_posted);
372
373                 /* Normally, don't complain about 0 buffers posted; LNET won't
374                  * drop incoming reqs since we set the portal lazy */
375                 if (test_req_buffer_pressure &&
376                     ev->type != LNET_EVENT_UNLINK &&
377                     svcpt->scp_nrqbds_posted == 0)
378                         CWARN("All %s request buffers busy\n",
379                               service->srv_name);
380
381                 /* req takes over the network's ref on rqbd */
382         } else {
383                 /* req takes a ref on rqbd */
384                 rqbd->rqbd_refcount++;
385         }
386
387         list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
388         svcpt->scp_nreqs_incoming++;
389
390         /* NB everything can disappear under us once the request
391          * has been queued and we unlock, so do the wake now... */
392         wake_up(&svcpt->scp_waitq);
393
394         spin_unlock(&svcpt->scp_lock);
395         EXIT;
396 }
397
398 /*
399  *  Server's outgoing reply callback
400  */
401 void reply_out_callback(struct lnet_event *ev)
402 {
403         struct ptlrpc_cb_id       *cbid = ev->md_user_ptr;
404         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
405         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
406         ENTRY;
407
408         LASSERT (ev->type == LNET_EVENT_SEND ||
409                  ev->type == LNET_EVENT_ACK ||
410                  ev->type == LNET_EVENT_UNLINK);
411
412         if (!rs->rs_difficult) {
413                 /* 'Easy' replies have no further processing so I drop the
414                  * net's ref on 'rs' */
415                 LASSERT (ev->unlinked);
416                 ptlrpc_rs_decref(rs);
417                 EXIT;
418                 return;
419         }
420
421         LASSERT (rs->rs_on_net);
422
423         if (ev->unlinked) {
424                 /* Last network callback. The net's ref on 'rs' stays put
425                  * until ptlrpc_handle_rs() is done with it */
426                 spin_lock(&svcpt->scp_rep_lock);
427                 spin_lock(&rs->rs_lock);
428
429                 rs->rs_on_net = 0;
430                 if (!rs->rs_no_ack ||
431                     rs->rs_transno <=
432                     rs->rs_export->exp_obd->obd_last_committed ||
433                     list_empty(&rs->rs_obd_list))
434                         ptlrpc_schedule_difficult_reply(rs);
435
436                 spin_unlock(&rs->rs_lock);
437                 spin_unlock(&svcpt->scp_rep_lock);
438         }
439         EXIT;
440 }
441
442 #ifdef HAVE_SERVER_SUPPORT
443 /*
444  * Server's bulk completion callback
445  */
446 void server_bulk_callback(struct lnet_event *ev)
447 {
448         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
449         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
450         ENTRY;
451
452         LASSERT(ev->type == LNET_EVENT_SEND ||
453                 ev->type == LNET_EVENT_UNLINK ||
454                 (ptlrpc_is_bulk_put_source(desc->bd_type) &&
455                  ev->type == LNET_EVENT_ACK) ||
456                 (ptlrpc_is_bulk_get_sink(desc->bd_type) &&
457                  ev->type == LNET_EVENT_REPLY));
458
459         CDEBUG_LIMIT((ev->status == 0) ? D_NET : D_ERROR,
460                      "event type %d, status %d, desc %p\n",
461                      ev->type, ev->status, desc);
462
463         spin_lock(&desc->bd_lock);
464
465         LASSERT(desc->bd_refs > 0);
466
467         if ((ev->type == LNET_EVENT_ACK ||
468              ev->type == LNET_EVENT_REPLY) &&
469             ev->status == 0) {
470                 /* We heard back from the peer, so even if we get this
471                  * before the SENT event (oh yes we can), we know we
472                  * read/wrote the peer buffer and how much... */
473                 desc->bd_nob_transferred += ev->mlength;
474                 desc->bd_sender = ev->sender;
475         }
476
477         if (ev->status != 0)
478                 desc->bd_failure = 1;
479
480         if (ev->unlinked) {
481                 desc->bd_refs--;
482                 /* This is the last callback no matter what... */
483                 if (desc->bd_refs == 0)
484                         wake_up(&desc->bd_waitq);
485         }
486
487         spin_unlock(&desc->bd_lock);
488         EXIT;
489 }
490 #endif
491
492 static void ptlrpc_master_callback(struct lnet_event *ev)
493 {
494         struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
495         void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
496
497         /* Honestly, it's best to find out early. */
498         LASSERT(cbid->cbid_arg != LP_POISON);
499         LASSERT(callback == request_out_callback ||
500                 callback == reply_in_callback ||
501                 callback == client_bulk_callback ||
502                 callback == request_in_callback ||
503                 callback == reply_out_callback
504 #ifdef HAVE_SERVER_SUPPORT
505                 || callback == server_bulk_callback
506 #endif
507                 );
508
509         callback(ev);
510         if (ev->unlinked)
511                 percpu_ref_put(&ptlrpc_pending);
512 }
513
514 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
515                         struct lnet_process_id *peer, lnet_nid_t *self)
516 {
517         int best_dist = 0;
518         __u32 best_order = 0;
519         int count = 0;
520         int rc = -ENOENT;
521         int dist;
522         __u32 order;
523         lnet_nid_t dst_nid;
524         lnet_nid_t src_nid;
525
526         peer->pid = LNET_PID_LUSTRE;
527
528         /* Choose the matching UUID that's closest */
529         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
530                 if (peer->nid != LNET_NID_ANY && LNET_NIDADDR(peer->nid) == 0 &&
531                     LNET_NIDNET(dst_nid) != LNET_NIDNET(peer->nid))
532                         continue;
533
534                 dist = LNetDist(dst_nid, &src_nid, &order);
535                 if (dist < 0)
536                         continue;
537
538                 if (dist == 0) {                /* local! use loopback LND */
539                         peer->nid = *self = LNET_NID_LO_0;
540                         rc = 0;
541                         break;
542                 }
543
544                 if (rc < 0 ||
545                     dist < best_dist ||
546                     (dist == best_dist && order < best_order)) {
547                         best_dist = dist;
548                         best_order = order;
549
550                         peer->nid = dst_nid;
551                         *self = src_nid;
552                         rc = 0;
553                 }
554         }
555
556         CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
557         return rc;
558 }
559
560 static struct completion ptlrpc_done;
561
562 static void ptlrpc_release(struct percpu_ref *ref)
563 {
564         complete(&ptlrpc_done);
565 }
566
567 static void ptlrpc_ni_fini(void)
568 {
569         /* Wait for the event queue to become idle since there may still be
570          * messages in flight with pending events (i.e. the fire-and-forget
571          * messages == client requests and "non-difficult" server
572          * replies */
573
574         init_completion(&ptlrpc_done);
575         percpu_ref_kill(&ptlrpc_pending);
576         wait_for_completion(&ptlrpc_done);
577
578         lnet_assert_handler_unused(ptlrpc_handler);
579         LNetNIFini();
580 }
581
582 lnet_pid_t ptl_get_pid(void)
583 {
584         return LNET_PID_LUSTRE;
585 }
586
587 int ptlrpc_ni_init(void)
588 {
589         int rc;
590         lnet_pid_t pid;
591
592         pid = ptl_get_pid();
593         CDEBUG(D_NET, "My pid is: %x\n", pid);
594
595         /* We're not passing any limits yet... */
596         rc = LNetNIInit(pid);
597         if (rc < 0) {
598                 CDEBUG(D_NET, "ptlrpc: Can't init network interface: rc = %d\n",
599                        rc);
600                 return rc;
601         }
602
603         rc = percpu_ref_init(&ptlrpc_pending, ptlrpc_release, 0, GFP_KERNEL);
604         if (rc) {
605                 CERROR("ptlrpc: Can't init percpu refcount: rc = %d\n", rc);
606                 return rc;
607         }
608         /* CAVEAT EMPTOR: how we process portals events is _radically_
609          * different depending on...
610          */
611         /* kernel LNet calls our master callback when there are new event,
612          * because we are guaranteed to get every event via callback,
613          * so we just set EQ size to 0 to avoid overhread of serializing
614          * enqueue/dequeue operations in LNet. */
615         ptlrpc_handler = ptlrpc_master_callback;
616         return 0;
617 }
618
619 int ptlrpc_init_portals(void)
620 {
621         int   rc = ptlrpc_ni_init();
622
623         if (rc != 0) {
624                 CERROR("network initialisation failed\n");
625                 return rc;
626         }
627         rc = ptlrpcd_addref();
628         if (rc == 0)
629                 return 0;
630
631         CERROR("rpcd initialisation failed\n");
632         ptlrpc_ni_fini();
633         return rc;
634 }
635
636 void ptlrpc_exit_portals(void)
637 {
638         ptlrpcd_decref();
639         ptlrpc_ni_fini();
640 }