Whamcloud - gitweb
7296a232c61f213a794899bc72c4396967507523
[fs/lustre-release.git] / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_RPC
34
35 #include <libcfs/libcfs.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <obd_class.h>
39 #include <lustre_net.h>
40 #include <lustre_sec.h>
41 #include "ptlrpc_internal.h"
42
43 lnet_handler_t ptlrpc_handler;
44 struct percpu_ref ptlrpc_pending;
45
46 /*
47  *  Client's outgoing request callback
48  */
49 void request_out_callback(struct lnet_event *ev)
50 {
51         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
52         struct ptlrpc_request *req = cbid->cbid_arg;
53         bool                   wakeup = false;
54         ENTRY;
55
56         LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
57         LASSERT(ev->unlinked);
58
59         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
60
61         /* Do not update imp_next_ping for connection request */
62         if (lustre_msg_get_opc(req->rq_reqmsg) !=
63             req->rq_import->imp_connect_op)
64                 ptlrpc_pinger_sending_on_import(req->rq_import);
65
66         sptlrpc_request_out_callback(req);
67
68         spin_lock(&req->rq_lock);
69         req->rq_real_sent = ktime_get_real_seconds();
70         req->rq_req_unlinked = 1;
71         /* reply_in_callback happened before request_out_callback? */
72         if (req->rq_reply_unlinked)
73                 wakeup = true;
74
75         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
76                 /* Failed send: make it seem like the reply timed out, just
77                  * like failing sends in client.c does currently...  */
78                 req->rq_net_err = 1;
79                 wakeup = true;
80         }
81
82         if (wakeup)
83                 ptlrpc_client_wake_req(req);
84
85         spin_unlock(&req->rq_lock);
86
87         ptlrpc_req_finished(req);
88         EXIT;
89 }
90
91 /*
92  * Client's incoming reply callback
93  */
94 void reply_in_callback(struct lnet_event *ev)
95 {
96         struct ptlrpc_cb_id   *cbid = ev->md_user_ptr;
97         struct ptlrpc_request *req = cbid->cbid_arg;
98         ENTRY;
99
100         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
101
102         LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
103         LASSERT(ev->md_start == req->rq_repbuf);
104         LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
105         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
106          * for adaptive timeouts' early reply.
107          */
108         LASSERT((ev->md_options & LNET_MD_MANAGE_REMOTE) != 0);
109
110         spin_lock(&req->rq_lock);
111
112         req->rq_receiving_reply = 0;
113         req->rq_early = 0;
114         if (ev->unlinked)
115                 req->rq_reply_unlinked = 1;
116
117         if (ev->status)
118                 goto out_wake;
119
120         if (ev->type == LNET_EVENT_UNLINK) {
121                 LASSERT(ev->unlinked);
122                 DEBUG_REQ(D_NET, req, "unlink");
123                 goto out_wake;
124         }
125
126         if (ev->mlength < ev->rlength ) {
127                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
128                        req->rq_replen, ev->rlength, ev->offset);
129                 req->rq_reply_truncated = 1;
130                 req->rq_replied = 1;
131                 req->rq_status = -EOVERFLOW;
132                 req->rq_nob_received = ev->rlength + ev->offset;
133                 goto out_wake;
134         }
135
136         if ((ev->offset == 0) &&
137             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
138                 /* Early reply */
139                 DEBUG_REQ(D_ADAPTTO, req,
140                           "Early reply received, mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
141                           ev->mlength, ev->offset,
142                           req->rq_replen, req->rq_replied, ev->unlinked);
143
144                 req->rq_early_count++; /* number received, client side */
145
146                 /* already got the real reply or buffers are already unlinked */
147                 if (req->rq_replied ||
148                     req->rq_reply_unlinked == 1)
149                         goto out_wake;
150
151                 req->rq_early = 1;
152                 req->rq_reply_off = ev->offset;
153                 req->rq_nob_received = ev->mlength;
154                 /* And we're still receiving */
155                 req->rq_receiving_reply = 1;
156         } else {
157                 /* Real reply */
158                 req->rq_rep_swab_mask = 0;
159                 req->rq_replied = 1;
160                 /* Got reply, no resend required */
161                 req->rq_resend = 0;
162                 req->rq_reply_off = ev->offset;
163                 req->rq_nob_received = ev->mlength;
164                 /* LNetMDUnlink can't be called under the LNET_LOCK,
165                    so we must unlink in ptlrpc_unregister_reply */
166                 DEBUG_REQ(D_INFO, req,
167                           "reply in flags=%x mlen=%u offset=%d replen=%d",
168                           lustre_msg_get_flags(req->rq_reqmsg),
169                           ev->mlength, ev->offset, req->rq_replen);
170         }
171
172         if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
173                 req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
174
175 out_wake:
176         /* NB don't unlock till after wakeup; req can disappear under us
177          * since we don't have our own ref */
178         ptlrpc_client_wake_req(req);
179         spin_unlock(&req->rq_lock);
180         EXIT;
181 }
182
183 /*
184  * Client's bulk has been written/read
185  */
186 void client_bulk_callback(struct lnet_event *ev)
187 {
188         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
189         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
190         struct ptlrpc_request   *req;
191         ENTRY;
192
193         LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
194                  ev->type == LNET_EVENT_PUT) ||
195                 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
196                  ev->type == LNET_EVENT_GET) ||
197                 ev->type == LNET_EVENT_UNLINK);
198         LASSERT(ev->unlinked);
199
200         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
201                 ev->status = -EIO;
202
203         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
204                 ev->status = -EIO;
205
206         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
207                "event type %d, status %d, desc %p\n",
208                ev->type, ev->status, desc);
209
210         spin_lock(&desc->bd_lock);
211         req = desc->bd_req;
212         LASSERT(desc->bd_refs > 0);
213         desc->bd_refs--;
214
215         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
216                 desc->bd_nob_transferred += ev->mlength;
217                 desc->bd_sender = ev->sender;
218         } else {
219                 /* start reconnect and resend if network error hit */
220                 spin_lock(&req->rq_lock);
221                 req->rq_net_err = 1;
222                 spin_unlock(&req->rq_lock);
223         }
224
225         if (ev->status != 0)
226                 desc->bd_failure = 1;
227
228         /* NB don't unlock till after wakeup; desc can disappear under us
229          * otherwise */
230         if (desc->bd_refs == 0)
231                 ptlrpc_client_wake_req(desc->bd_req);
232
233         spin_unlock(&desc->bd_lock);
234         EXIT;
235 }
236
237 /*
238  * We will have percpt request history list for ptlrpc service in upcoming
239  * patches because we don't want to be serialized by current per-service
240  * history operations. So we require history ID can (somehow) show arriving
241  * order w/o grabbing global lock, and user can sort them in userspace.
242  *
243  * This is how we generate history ID for ptlrpc_request:
244  * ----------------------------------------------------
245  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
246  * ----------------------------------------------------
247  * |  seconds  | usec / 16 |   sequence    | CPT id   |
248  * ----------------------------------------------------
249  *
250  * it might not be precise but should be good enough.
251  */
252
253 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
254
255 #define REQS_SEC_SHIFT          32
256 #define REQS_USEC_SHIFT         16
257 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
258
259 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
260                                    struct ptlrpc_request *req)
261 {
262         u64 sec = req->rq_arrival_time.tv_sec;
263         u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
264         u64 new_seq;
265
266         /* set sequence ID for request and add it to history list,
267          * it must be called with hold svcpt::scp_lock */
268
269         new_seq = (sec << REQS_SEC_SHIFT) |
270                   (usec << REQS_USEC_SHIFT) |
271                   (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
272
273         if (new_seq > svcpt->scp_hist_seq) {
274                 /* This handles the initial case of scp_hist_seq == 0 or
275                  * we just jumped into a new time window */
276                 svcpt->scp_hist_seq = new_seq;
277         } else {
278                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
279                 /* NB: increase sequence number in current usec bucket,
280                  * however, it's possible that we used up all bits for
281                  * sequence and jumped into the next usec bucket (future time),
282                  * then we hope there will be less RPCs per bucket at some
283                  * point, and sequence will catch up again */
284                 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
285                 new_seq = svcpt->scp_hist_seq;
286         }
287
288         req->rq_history_seq = new_seq;
289
290         list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
291 }
292
293 /*
294  * Server's incoming request callback
295  */
296 void request_in_callback(struct lnet_event *ev)
297 {
298         struct ptlrpc_cb_id               *cbid = ev->md_user_ptr;
299         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
300         struct ptlrpc_service_part        *svcpt = rqbd->rqbd_svcpt;
301         struct ptlrpc_service             *service = svcpt->scp_service;
302         struct ptlrpc_request             *req;
303         ENTRY;
304
305         LASSERT(ev->type == LNET_EVENT_PUT ||
306                 ev->type == LNET_EVENT_UNLINK);
307         LASSERT((char *)ev->md_start >= rqbd->rqbd_buffer);
308         LASSERT((char *)ev->md_start + ev->offset + ev->mlength <=
309                 rqbd->rqbd_buffer + service->srv_buf_size);
310
311         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
312                "event type %d, status %d, service %s\n",
313                ev->type, ev->status, service->srv_name);
314
315         if (ev->unlinked) {
316                 /* If this is the last request message to fit in the
317                  * request buffer we can use the request object embedded in
318                  * rqbd.  Note that if we failed to allocate a request,
319                  * we'd have to re-post the rqbd, which we can't do in this
320                  * context. */
321                 req = &rqbd->rqbd_req;
322                 memset(req, 0, sizeof (*req));
323         } else {
324                 LASSERT (ev->type == LNET_EVENT_PUT);
325                 if (ev->status != 0) {
326                         /* We moaned above already... */
327                         return;
328                 }
329                 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
330                 if (req == NULL) {
331                         CERROR("Can't allocate incoming request descriptor: "
332                                "Dropping %s RPC from %s\n",
333                                service->srv_name,
334                                libcfs_id2str(ev->initiator));
335                         return;
336                 }
337         }
338
339         ptlrpc_srv_req_init(req);
340         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
341          * flags are reset and scalars are zero.  We only set the message
342          * size to non-zero if this was a successful receive. */
343         req->rq_xid = ev->match_bits;
344         req->rq_reqbuf = ev->md_start + ev->offset;
345         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
346                 req->rq_reqdata_len = ev->mlength;
347         ktime_get_real_ts64(&req->rq_arrival_time);
348         /* Multi-Rail: keep track of both initiator and source NID. */
349         req->rq_peer = ev->initiator;
350         req->rq_source = ev->source;
351         req->rq_self = ev->target.nid;
352         req->rq_rqbd = rqbd;
353         req->rq_phase = RQ_PHASE_NEW;
354         if (ev->type == LNET_EVENT_PUT)
355                 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
356                        req, req->rq_xid, ev->mlength);
357
358         CDEBUG(D_RPCTRACE, "peer: %s (source: %s)\n",
359                 libcfs_id2str(req->rq_peer), libcfs_id2str(req->rq_source));
360
361         spin_lock(&svcpt->scp_lock);
362
363         ptlrpc_req_add_history(svcpt, req);
364
365         if (ev->unlinked) {
366                 svcpt->scp_nrqbds_posted--;
367                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
368                        svcpt->scp_nrqbds_posted);
369
370                 /* Normally, don't complain about 0 buffers posted; LNET won't
371                  * drop incoming reqs since we set the portal lazy */
372                 if (test_req_buffer_pressure &&
373                     ev->type != LNET_EVENT_UNLINK &&
374                     svcpt->scp_nrqbds_posted == 0)
375                         CWARN("All %s request buffers busy\n",
376                               service->srv_name);
377
378                 /* req takes over the network's ref on rqbd */
379         } else {
380                 /* req takes a ref on rqbd */
381                 rqbd->rqbd_refcount++;
382         }
383
384         list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
385         svcpt->scp_nreqs_incoming++;
386
387         /* NB everything can disappear under us once the request
388          * has been queued and we unlock, so do the wake now... */
389         wake_up(&svcpt->scp_waitq);
390
391         spin_unlock(&svcpt->scp_lock);
392         EXIT;
393 }
394
395 /*
396  *  Server's outgoing reply callback
397  */
398 void reply_out_callback(struct lnet_event *ev)
399 {
400         struct ptlrpc_cb_id       *cbid = ev->md_user_ptr;
401         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
402         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
403         ENTRY;
404
405         LASSERT (ev->type == LNET_EVENT_SEND ||
406                  ev->type == LNET_EVENT_ACK ||
407                  ev->type == LNET_EVENT_UNLINK);
408
409         if (!rs->rs_difficult) {
410                 /* 'Easy' replies have no further processing so I drop the
411                  * net's ref on 'rs' */
412                 LASSERT (ev->unlinked);
413                 ptlrpc_rs_decref(rs);
414                 EXIT;
415                 return;
416         }
417
418         LASSERT (rs->rs_on_net);
419
420         if (ev->unlinked) {
421                 /* Last network callback. The net's ref on 'rs' stays put
422                  * until ptlrpc_handle_rs() is done with it */
423                 spin_lock(&svcpt->scp_rep_lock);
424                 spin_lock(&rs->rs_lock);
425
426                 rs->rs_on_net = 0;
427                 if (!rs->rs_no_ack ||
428                     rs->rs_transno <=
429                     rs->rs_export->exp_obd->obd_last_committed ||
430                     list_empty(&rs->rs_obd_list))
431                         ptlrpc_schedule_difficult_reply(rs);
432
433                 spin_unlock(&rs->rs_lock);
434                 spin_unlock(&svcpt->scp_rep_lock);
435         }
436         EXIT;
437 }
438
439 #ifdef HAVE_SERVER_SUPPORT
440 /*
441  * Server's bulk completion callback
442  */
443 void server_bulk_callback(struct lnet_event *ev)
444 {
445         struct ptlrpc_cb_id     *cbid = ev->md_user_ptr;
446         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
447         ENTRY;
448
449         LASSERT(ev->type == LNET_EVENT_SEND ||
450                 ev->type == LNET_EVENT_UNLINK ||
451                 (ptlrpc_is_bulk_put_source(desc->bd_type) &&
452                  ev->type == LNET_EVENT_ACK) ||
453                 (ptlrpc_is_bulk_get_sink(desc->bd_type) &&
454                  ev->type == LNET_EVENT_REPLY));
455
456         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
457                "event type %d, status %d, desc %p\n",
458                ev->type, ev->status, desc);
459
460         spin_lock(&desc->bd_lock);
461
462         LASSERT(desc->bd_refs > 0);
463
464         if ((ev->type == LNET_EVENT_ACK ||
465              ev->type == LNET_EVENT_REPLY) &&
466             ev->status == 0) {
467                 /* We heard back from the peer, so even if we get this
468                  * before the SENT event (oh yes we can), we know we
469                  * read/wrote the peer buffer and how much... */
470                 desc->bd_nob_transferred += ev->mlength;
471                 desc->bd_sender = ev->sender;
472         }
473
474         if (ev->status != 0)
475                 desc->bd_failure = 1;
476
477         if (ev->unlinked) {
478                 desc->bd_refs--;
479                 /* This is the last callback no matter what... */
480                 if (desc->bd_refs == 0)
481                         wake_up(&desc->bd_waitq);
482         }
483
484         spin_unlock(&desc->bd_lock);
485         EXIT;
486 }
487 #endif
488
489 static void ptlrpc_master_callback(struct lnet_event *ev)
490 {
491         struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
492         void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
493
494         /* Honestly, it's best to find out early. */
495         LASSERT(cbid->cbid_arg != LP_POISON);
496         LASSERT(callback == request_out_callback ||
497                 callback == reply_in_callback ||
498                 callback == client_bulk_callback ||
499                 callback == request_in_callback ||
500                 callback == reply_out_callback
501 #ifdef HAVE_SERVER_SUPPORT
502                 || callback == server_bulk_callback
503 #endif
504                 );
505
506         callback(ev);
507         if (ev->unlinked)
508                 percpu_ref_put(&ptlrpc_pending);
509 }
510
511 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
512                         struct lnet_process_id *peer, lnet_nid_t *self)
513 {
514         int best_dist = 0;
515         __u32 best_order = 0;
516         int count = 0;
517         int rc = -ENOENT;
518         int dist;
519         __u32 order;
520         lnet_nid_t dst_nid;
521         lnet_nid_t src_nid;
522
523         peer->pid = LNET_PID_LUSTRE;
524
525         /* Choose the matching UUID that's closest */
526         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
527                 if (peer->nid != LNET_NID_ANY && LNET_NIDADDR(peer->nid) == 0 &&
528                     LNET_NIDNET(dst_nid) != LNET_NIDNET(peer->nid))
529                         continue;
530
531                 dist = LNetDist(dst_nid, &src_nid, &order);
532                 if (dist < 0)
533                         continue;
534
535                 if (dist == 0) {                /* local! use loopback LND */
536                         peer->nid = *self = LNET_NID_LO_0;
537                         rc = 0;
538                         break;
539                 }
540
541                 if (rc < 0 ||
542                     dist < best_dist ||
543                     (dist == best_dist && order < best_order)) {
544                         best_dist = dist;
545                         best_order = order;
546
547                         peer->nid = dst_nid;
548                         *self = src_nid;
549                         rc = 0;
550                 }
551         }
552
553         CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
554         return rc;
555 }
556
557 static struct completion ptlrpc_done;
558
559 static void ptlrpc_release(struct percpu_ref *ref)
560 {
561         complete(&ptlrpc_done);
562 }
563
564 static void ptlrpc_ni_fini(void)
565 {
566         /* Wait for the event queue to become idle since there may still be
567          * messages in flight with pending events (i.e. the fire-and-forget
568          * messages == client requests and "non-difficult" server
569          * replies */
570
571         init_completion(&ptlrpc_done);
572         percpu_ref_kill(&ptlrpc_pending);
573         wait_for_completion(&ptlrpc_done);
574
575         lnet_assert_handler_unused(ptlrpc_handler);
576         LNetNIFini();
577 }
578
579 lnet_pid_t ptl_get_pid(void)
580 {
581         return LNET_PID_LUSTRE;
582 }
583
584 int ptlrpc_ni_init(void)
585 {
586         int rc;
587         lnet_pid_t pid;
588
589         pid = ptl_get_pid();
590         CDEBUG(D_NET, "My pid is: %x\n", pid);
591
592         /* We're not passing any limits yet... */
593         rc = LNetNIInit(pid);
594         if (rc < 0) {
595                 CDEBUG(D_NET, "ptlrpc: Can't init network interface: rc = %d\n",
596                        rc);
597                 return rc;
598         }
599
600         rc = percpu_ref_init(&ptlrpc_pending, ptlrpc_release, 0, GFP_KERNEL);
601         if (rc) {
602                 CERROR("ptlrpc: Can't init percpu refcount: rc = %d\n", rc);
603                 return rc;
604         }
605         /* CAVEAT EMPTOR: how we process portals events is _radically_
606          * different depending on...
607          */
608         /* kernel LNet calls our master callback when there are new event,
609          * because we are guaranteed to get every event via callback,
610          * so we just set EQ size to 0 to avoid overhread of serializing
611          * enqueue/dequeue operations in LNet. */
612         ptlrpc_handler = ptlrpc_master_callback;
613         return 0;
614 }
615
616 int ptlrpc_init_portals(void)
617 {
618         int   rc = ptlrpc_ni_init();
619
620         if (rc != 0) {
621                 CERROR("network initialisation failed\n");
622                 return rc;
623         }
624         rc = ptlrpcd_addref();
625         if (rc == 0)
626                 return 0;
627
628         CERROR("rpcd initialisation failed\n");
629         ptlrpc_ni_fini();
630         return rc;
631 }
632
633 void ptlrpc_exit_portals(void)
634 {
635         ptlrpcd_decref();
636         ptlrpc_ni_fini();
637 }