4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
58 #include <libcfs/libcfs.h>
59 #include <lustre_net.h>
60 #include <lustre_lib.h>
61 #include <lustre_ha.h>
62 #include <obd_class.h> /* for obd_zombie */
63 #include <obd_support.h> /* for OBD_FAIL_CHECK */
64 #include <cl_object.h> /* cl_env_{get,put}() */
65 #include <lprocfs_status.h>
67 #include "ptlrpc_internal.h"
73 struct ptlrpcd_ctl pd_thread_rcv;
74 struct ptlrpcd_ctl pd_threads[0];
77 static int max_ptlrpcds;
78 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
79 "Max ptlrpcd thread count to be started.");
81 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
82 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
83 "Ptlrpcd threads binding mode.");
84 static struct ptlrpcd *ptlrpcds;
86 struct mutex ptlrpcd_mutex;
87 static int ptlrpcd_users = 0;
89 void ptlrpcd_wake(struct ptlrpc_request *req)
91 struct ptlrpc_request_set *set = req->rq_set;
94 wake_up(&set->set_waitq);
96 EXPORT_SYMBOL(ptlrpcd_wake);
98 static struct ptlrpcd_ctl *
99 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
103 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
104 return &ptlrpcds->pd_thread_rcv;
107 case PDL_POLICY_SAME:
108 idx = smp_processor_id() % ptlrpcds->pd_nthreads;
110 case PDL_POLICY_LOCAL:
111 /* Before CPU partition patches available, process it the same
112 * as "PDL_POLICY_ROUND". */
113 # ifdef CFS_CPU_MODE_NUMA
114 # warning "fix this code to use new CPU partition APIs"
116 /* Fall through to PDL_POLICY_ROUND until the CPU
117 * CPU partition patches are available. */
119 case PDL_POLICY_PREFERRED:
120 if (index >= 0 && index < num_online_cpus()) {
121 idx = index % ptlrpcds->pd_nthreads;
124 /* Fall through to PDL_POLICY_ROUND for bad index. */
126 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
127 case PDL_POLICY_ROUND:
128 /* We do not care whether it is strict load balance. */
129 idx = ptlrpcds->pd_index + 1;
130 if (idx == smp_processor_id())
132 idx %= ptlrpcds->pd_nthreads;
133 ptlrpcds->pd_index = idx;
137 return &ptlrpcds->pd_threads[idx];
141 * Move all request from an existing request set to the ptlrpcd queue.
142 * All requests from the set must be in phase RQ_PHASE_NEW.
144 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
146 struct list_head *tmp, *pos;
147 struct ptlrpcd_ctl *pc;
148 struct ptlrpc_request_set *new;
151 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
154 list_for_each_safe(pos, tmp, &set->set_requests) {
155 struct ptlrpc_request *req =
156 list_entry(pos, struct ptlrpc_request,
159 LASSERT(req->rq_phase == RQ_PHASE_NEW);
161 req->rq_queued_time = cfs_time_current();
164 spin_lock(&new->set_new_req_lock);
165 list_splice_init(&set->set_requests, &new->set_new_requests);
166 i = atomic_read(&set->set_remaining);
167 count = atomic_add_return(i, &new->set_new_count);
168 atomic_set(&set->set_remaining, 0);
169 spin_unlock(&new->set_new_req_lock);
171 wake_up(&new->set_waitq);
173 /* XXX: It maybe unnecessary to wakeup all the partners. But to
174 * guarantee the async RPC can be processed ASAP, we have
175 * no other better choice. It maybe fixed in future. */
176 for (i = 0; i < pc->pc_npartners; i++)
177 wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
182 * Return transferred RPCs count.
184 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
185 struct ptlrpc_request_set *src)
187 struct list_head *tmp, *pos;
188 struct ptlrpc_request *req;
191 spin_lock(&src->set_new_req_lock);
192 if (likely(!list_empty(&src->set_new_requests))) {
193 list_for_each_safe(pos, tmp, &src->set_new_requests) {
194 req = list_entry(pos, struct ptlrpc_request,
198 list_splice_init(&src->set_new_requests,
200 rc = atomic_read(&src->set_new_count);
201 atomic_add(rc, &des->set_remaining);
202 atomic_set(&src->set_new_count, 0);
204 spin_unlock(&src->set_new_req_lock);
209 * Requests that are added to the ptlrpcd queue are sent via
210 * ptlrpcd_check->ptlrpc_check_set().
212 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
214 struct ptlrpcd_ctl *pc;
217 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
219 spin_lock(&req->rq_lock);
220 if (req->rq_invalid_rqset) {
221 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
222 back_to_sleep, NULL);
224 req->rq_invalid_rqset = 0;
225 spin_unlock(&req->rq_lock);
226 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
227 } else if (req->rq_set) {
228 /* If we have a vaid "rq_set", just reuse it to avoid double
230 LASSERT(req->rq_phase == RQ_PHASE_NEW);
231 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
233 /* ptlrpc_check_set will decrease the count */
234 atomic_inc(&req->rq_set->set_remaining);
235 spin_unlock(&req->rq_lock);
236 wake_up(&req->rq_set->set_waitq);
239 spin_unlock(&req->rq_lock);
242 pc = ptlrpcd_select_pc(req, policy, idx);
244 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
245 req, pc->pc_name, pc->pc_index);
247 ptlrpc_set_add_new_req(pc, req);
249 EXPORT_SYMBOL(ptlrpcd_add_req);
251 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
253 atomic_inc(&set->set_refcount);
257 * Check if there is more work to do on ptlrpcd set.
260 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
262 struct list_head *tmp, *pos;
263 struct ptlrpc_request *req;
264 struct ptlrpc_request_set *set = pc->pc_set;
269 if (atomic_read(&set->set_new_count)) {
270 spin_lock(&set->set_new_req_lock);
271 if (likely(!list_empty(&set->set_new_requests))) {
272 list_splice_init(&set->set_new_requests,
274 atomic_add(atomic_read(&set->set_new_count),
275 &set->set_remaining);
276 atomic_set(&set->set_new_count, 0);
278 * Need to calculate its timeout.
282 spin_unlock(&set->set_new_req_lock);
285 /* We should call lu_env_refill() before handling new requests to make
286 * sure that env key the requests depending on really exists.
288 rc2 = lu_env_refill(env);
291 * XXX This is very awkward situation, because
292 * execution can neither continue (request
293 * interpreters assume that env is set up), nor repeat
294 * the loop (as this potentially results in a tight
295 * loop of -ENOMEM's).
297 * Fortunately, refill only ever does something when
298 * new modules are loaded, i.e., early during boot up.
300 CERROR("Failure to refill session: %d\n", rc2);
304 if (atomic_read(&set->set_remaining))
305 rc |= ptlrpc_check_set(env, set);
307 /* NB: ptlrpc_check_set has already moved complted request at the
308 * head of seq::set_requests */
309 list_for_each_safe(pos, tmp, &set->set_requests) {
310 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
311 if (req->rq_phase != RQ_PHASE_COMPLETE)
314 list_del_init(&req->rq_set_chain);
316 ptlrpc_req_finished(req);
321 * If new requests have been added, make sure to wake up.
323 rc = atomic_read(&set->set_new_count);
325 /* If we have nothing to do, check whether we can take some
326 * work from our partner threads. */
327 if (rc == 0 && pc->pc_npartners > 0) {
328 struct ptlrpcd_ctl *partner;
329 struct ptlrpc_request_set *ps;
330 int first = pc->pc_cursor;
333 partner = pc->pc_partners[pc->pc_cursor++];
334 if (pc->pc_cursor >= pc->pc_npartners)
339 spin_lock(&partner->pc_lock);
340 ps = partner->pc_set;
342 spin_unlock(&partner->pc_lock);
346 ptlrpc_reqset_get(ps);
347 spin_unlock(&partner->pc_lock);
349 if (atomic_read(&ps->set_new_count)) {
350 rc = ptlrpcd_steal_rqset(set, ps);
352 CDEBUG(D_RPCTRACE, "transfer %d"
353 " async RPCs [%d->%d]\n",
354 rc, partner->pc_index,
357 ptlrpc_reqset_put(ps);
358 } while (rc == 0 && pc->pc_cursor != first);
366 * Main ptlrpcd thread.
367 * ptlrpc's code paths like to execute in process context, so we have this
368 * thread which spins on a set which contains the rpcs and sends them.
371 static int ptlrpcd(void *arg)
373 struct ptlrpcd_ctl *pc = arg;
374 struct ptlrpc_request_set *set = pc->pc_set;
375 struct lu_context ses = { 0 };
376 struct lu_env env = { .le_ses = &ses };
381 #if defined(CONFIG_SMP)
382 if (test_bit(LIOD_BIND, &pc->pc_flags)) {
383 int index = pc->pc_index;
385 if (index >= 0 && index < num_possible_cpus()) {
386 while (!cpu_online(index)) {
387 if (++index >= num_possible_cpus())
390 set_cpus_allowed_ptr(current,
391 cpumask_of_node(cpu_to_node(index)));
395 /* Both client and server (MDT/OST) may use the environment. */
396 rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
397 LCT_CL_THREAD | LCT_REMEMBER |
400 rc = lu_context_init(env.le_ses,
401 LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
403 lu_context_fini(&env.le_ctx);
405 complete(&pc->pc_starting);
411 * This mainloop strongly resembles ptlrpc_set_wait() except that our
412 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
413 * there are requests in the set. New requests come in on the set's
414 * new_req_list and ptlrpcd_check() moves them into the set.
417 struct l_wait_info lwi;
420 timeout = ptlrpc_set_next_timeout(set);
421 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
422 ptlrpc_expired_set, set);
424 lu_context_enter(&env.le_ctx);
425 lu_context_enter(env.le_ses);
426 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
427 lu_context_exit(&env.le_ctx);
428 lu_context_exit(env.le_ses);
431 * Abort inflight rpcs for forced stop case.
433 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
434 if (test_bit(LIOD_FORCE, &pc->pc_flags))
435 ptlrpc_abort_set(set);
440 * Let's make one more loop to make sure that ptlrpcd_check()
441 * copied all raced new rpcs into the set so we can kill them.
446 * Wait for inflight requests to drain.
448 if (!list_empty(&set->set_requests))
449 ptlrpc_set_wait(set);
450 lu_context_fini(&env.le_ctx);
451 lu_context_fini(env.le_ses);
453 complete(&pc->pc_finishing);
458 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
459 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
460 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
461 * CPU core. But binding all ptlrpcd threads maybe cause response delay
462 * because of some CPU core(s) busy with other loads.
464 * For example: "ls -l", some async RPCs for statahead are assigned to
465 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
466 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
467 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
468 * such case, the statahead async RPCs can not be processed in time, it is
469 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
470 * be better. But it breaks former data transfer policy.
472 * So we shouldn't be blind for avoiding the data transfer. We make some
473 * compromise: divide the ptlrpcd threds pool into two parts. One part is
474 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
475 * core. The other part is for free mode, all the ptlrpcd threads in the
476 * part can be scheduled on any CPU core. We specify some partnership
477 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
478 * and the async RPC load within the partners are shared.
480 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
481 * thread can be scheduled in time), and try to guarantee the async RPC
482 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
485 * As for how to specify the partnership between bound mode ptlrpcd
486 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
487 * <free bound> pair. In future, we can specify some more complex
488 * partnership based on the patches for CPU partition. But before such
489 * patches are available, we prefer to use the simplest one.
491 # ifdef CFS_CPU_MODE_NUMA
492 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
494 static int ptlrpcd_bind(int index, int max)
496 struct ptlrpcd_ctl *pc;
498 #if defined(CONFIG_NUMA)
503 LASSERT(index <= max - 1);
504 pc = &ptlrpcds->pd_threads[index];
505 switch (ptlrpcd_bind_policy) {
506 case PDB_POLICY_NONE:
507 pc->pc_npartners = -1;
509 case PDB_POLICY_FULL:
510 pc->pc_npartners = 0;
511 set_bit(LIOD_BIND, &pc->pc_flags);
513 case PDB_POLICY_PAIR:
514 LASSERT(max % 2 == 0);
515 pc->pc_npartners = 1;
517 case PDB_POLICY_NEIGHBOR:
518 #if defined(CONFIG_NUMA)
521 mask = *cpumask_of_node(cpu_to_node(index));
522 for (i = max; i < num_online_cpus(); i++)
524 pc->pc_npartners = cpus_weight(mask) - 1;
525 set_bit(LIOD_BIND, &pc->pc_flags);
529 pc->pc_npartners = 2;
533 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
537 if (rc == 0 && pc->pc_npartners > 0) {
538 OBD_ALLOC(pc->pc_partners,
539 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
540 if (pc->pc_partners == NULL) {
541 pc->pc_npartners = 0;
544 switch (ptlrpcd_bind_policy) {
545 case PDB_POLICY_PAIR:
547 set_bit(LIOD_BIND, &pc->pc_flags);
548 pc->pc_partners[0] = &ptlrpcds->
549 pd_threads[index - 1];
550 ptlrpcds->pd_threads[index - 1].
554 case PDB_POLICY_NEIGHBOR:
555 #if defined(CONFIG_NUMA)
557 struct ptlrpcd_ctl *ppc;
559 /* partners are cores in the same NUMA node.
560 * setup partnership only with ptlrpcd threads
561 * that are already initialized
563 for (pidx = 0, i = 0; i < index; i++) {
564 if (cpu_isset(i, mask)) {
565 ppc = &ptlrpcds->pd_threads[i];
566 pc->pc_partners[pidx++] = ppc;
567 ppc->pc_partners[ppc->
568 pc_npartners++] = pc;
571 /* adjust number of partners to the number
572 * of partnership really setup */
573 pc->pc_npartners = pidx;
577 set_bit(LIOD_BIND, &pc->pc_flags);
579 pc->pc_partners[0] = &ptlrpcds->
580 pd_threads[index - 1];
581 ptlrpcds->pd_threads[index - 1].
583 if (index == max - 1) {
585 &ptlrpcds->pd_threads[0];
586 ptlrpcds->pd_threads[0].
600 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
606 * Do not allow start second thread for one pc.
608 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
609 CWARN("Starting second thread (%s) for same pc %p\n",
614 pc->pc_index = index;
615 init_completion(&pc->pc_starting);
616 init_completion(&pc->pc_finishing);
617 spin_lock_init(&pc->pc_lock);
618 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
619 pc->pc_set = ptlrpc_prep_set();
620 if (pc->pc_set == NULL)
621 GOTO(out, rc = -ENOMEM);
624 * So far only "client" ptlrpcd uses an environment. In the future,
625 * ptlrpcd thread (or a thread-set) has to be given an argument,
626 * describing its "scope".
628 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
633 struct task_struct *task;
635 rc = ptlrpcd_bind(index, max);
640 task = kthread_run(ptlrpcd, pc, pc->pc_name);
642 GOTO(out_env, rc = PTR_ERR(task));
644 wait_for_completion(&pc->pc_starting);
649 lu_context_fini(&pc->pc_env.le_ctx);
652 if (pc->pc_set != NULL) {
653 struct ptlrpc_request_set *set = pc->pc_set;
655 spin_lock(&pc->pc_lock);
657 spin_unlock(&pc->pc_lock);
658 ptlrpc_set_destroy(set);
660 clear_bit(LIOD_BIND, &pc->pc_flags);
662 clear_bit(LIOD_START, &pc->pc_flags);
666 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
670 if (!test_bit(LIOD_START, &pc->pc_flags)) {
671 CWARN("Thread for pc %p was not started\n", pc);
675 set_bit(LIOD_STOP, &pc->pc_flags);
677 set_bit(LIOD_FORCE, &pc->pc_flags);
678 wake_up(&pc->pc_set->set_waitq);
684 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
686 struct ptlrpc_request_set *set = pc->pc_set;
689 if (!test_bit(LIOD_START, &pc->pc_flags)) {
690 CWARN("Thread for pc %p was not started\n", pc);
694 wait_for_completion(&pc->pc_finishing);
695 lu_context_fini(&pc->pc_env.le_ctx);
697 spin_lock(&pc->pc_lock);
699 spin_unlock(&pc->pc_lock);
700 ptlrpc_set_destroy(set);
702 clear_bit(LIOD_START, &pc->pc_flags);
703 clear_bit(LIOD_STOP, &pc->pc_flags);
704 clear_bit(LIOD_FORCE, &pc->pc_flags);
705 clear_bit(LIOD_BIND, &pc->pc_flags);
708 if (pc->pc_npartners > 0) {
709 LASSERT(pc->pc_partners != NULL);
711 OBD_FREE(pc->pc_partners,
712 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
713 pc->pc_partners = NULL;
715 pc->pc_npartners = 0;
719 static void ptlrpcd_fini(void)
724 if (ptlrpcds != NULL) {
725 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
726 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
727 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
728 ptlrpcd_free(&ptlrpcds->pd_threads[i]);
729 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
730 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
731 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
738 static int ptlrpcd_init(void)
740 int nthreads = num_online_cpus();
742 int size, i = -1, j, rc = 0;
745 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
746 nthreads = max_ptlrpcds;
749 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
750 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
751 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
752 nthreads &= ~1; /* make sure it is even */
754 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
755 OBD_ALLOC(ptlrpcds, size);
756 if (ptlrpcds == NULL)
757 GOTO(out, rc = -ENOMEM);
759 snprintf(name, 15, "ptlrpcd_rcv");
760 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
761 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
765 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
766 * non-recovery async RPC to improve overall async RPC efficiency.
768 * But there are some issues with async I/O RPCs and async non-I/O
769 * RPCs processed in the same set under some cases. The ptlrpcd may
770 * be blocked by some async I/O RPC(s), then will cause other async
771 * non-I/O RPC(s) can not be processed in time.
773 * Maybe we should distinguish blocked async RPCs from non-blocked
774 * async RPCs, and process them in different ptlrpcd sets to avoid
775 * unnecessary dependency. But how to distribute async RPCs load
776 * among all the ptlrpc daemons becomes another trouble. */
777 for (i = 0; i < nthreads; i++) {
778 snprintf(name, 15, "ptlrpcd_%d", i);
779 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
784 ptlrpcds->pd_size = size;
785 ptlrpcds->pd_index = 0;
786 ptlrpcds->pd_nthreads = nthreads;
789 if (rc != 0 && ptlrpcds != NULL) {
790 for (j = 0; j <= i; j++)
791 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
792 for (j = 0; j <= i; j++)
793 ptlrpcd_free(&ptlrpcds->pd_threads[j]);
794 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
795 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
796 OBD_FREE(ptlrpcds, size);
803 int ptlrpcd_addref(void)
808 mutex_lock(&ptlrpcd_mutex);
809 if (++ptlrpcd_users == 1) {
814 mutex_unlock(&ptlrpcd_mutex);
817 EXPORT_SYMBOL(ptlrpcd_addref);
819 void ptlrpcd_decref(void)
821 mutex_lock(&ptlrpcd_mutex);
822 if (--ptlrpcd_users == 0)
824 mutex_unlock(&ptlrpcd_mutex);
826 EXPORT_SYMBOL(ptlrpcd_decref);