4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
58 #include <libcfs/libcfs.h>
59 #include <lustre_net.h>
60 #include <lustre_lib.h>
61 #include <lustre_ha.h>
62 #include <obd_class.h> /* for obd_zombie */
63 #include <obd_support.h> /* for OBD_FAIL_CHECK */
64 #include <cl_object.h> /* cl_env_{get,put}() */
65 #include <lprocfs_status.h>
67 #include "ptlrpc_internal.h"
73 struct ptlrpcd_ctl pd_thread_rcv;
74 struct ptlrpcd_ctl pd_threads[0];
77 static int max_ptlrpcds;
78 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
79 "Max ptlrpcd thread count to be started.");
81 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
82 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
83 "Ptlrpcd threads binding mode.");
84 static struct ptlrpcd *ptlrpcds;
86 struct mutex ptlrpcd_mutex;
87 static int ptlrpcd_users = 0;
89 void ptlrpcd_wake(struct ptlrpc_request *req)
91 struct ptlrpc_request_set *rq_set = req->rq_set;
93 LASSERT(rq_set != NULL);
95 wake_up(&rq_set->set_waitq);
97 EXPORT_SYMBOL(ptlrpcd_wake);
99 static struct ptlrpcd_ctl *
100 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
104 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
105 return &ptlrpcds->pd_thread_rcv;
108 case PDL_POLICY_SAME:
109 idx = smp_processor_id() % ptlrpcds->pd_nthreads;
111 case PDL_POLICY_LOCAL:
112 /* Before CPU partition patches available, process it the same
113 * as "PDL_POLICY_ROUND". */
114 # ifdef CFS_CPU_MODE_NUMA
115 # warning "fix this code to use new CPU partition APIs"
117 /* Fall through to PDL_POLICY_ROUND until the CPU
118 * CPU partition patches are available. */
120 case PDL_POLICY_PREFERRED:
121 if (index >= 0 && index < num_online_cpus()) {
122 idx = index % ptlrpcds->pd_nthreads;
125 /* Fall through to PDL_POLICY_ROUND for bad index. */
127 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
128 case PDL_POLICY_ROUND:
129 /* We do not care whether it is strict load balance. */
130 idx = ptlrpcds->pd_index + 1;
131 if (idx == smp_processor_id())
133 idx %= ptlrpcds->pd_nthreads;
134 ptlrpcds->pd_index = idx;
138 return &ptlrpcds->pd_threads[idx];
142 * Move all request from an existing request set to the ptlrpcd queue.
143 * All requests from the set must be in phase RQ_PHASE_NEW.
145 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
147 struct list_head *tmp, *pos;
148 struct ptlrpcd_ctl *pc;
149 struct ptlrpc_request_set *new;
152 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
155 list_for_each_safe(pos, tmp, &set->set_requests) {
156 struct ptlrpc_request *req =
157 list_entry(pos, struct ptlrpc_request,
160 LASSERT(req->rq_phase == RQ_PHASE_NEW);
162 req->rq_queued_time = cfs_time_current();
165 spin_lock(&new->set_new_req_lock);
166 list_splice_init(&set->set_requests, &new->set_new_requests);
167 i = atomic_read(&set->set_remaining);
168 count = atomic_add_return(i, &new->set_new_count);
169 atomic_set(&set->set_remaining, 0);
170 spin_unlock(&new->set_new_req_lock);
172 wake_up(&new->set_waitq);
174 /* XXX: It maybe unnecessary to wakeup all the partners. But to
175 * guarantee the async RPC can be processed ASAP, we have
176 * no other better choice. It maybe fixed in future. */
177 for (i = 0; i < pc->pc_npartners; i++)
178 wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
181 EXPORT_SYMBOL(ptlrpcd_add_rqset);
184 * Return transferred RPCs count.
186 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
187 struct ptlrpc_request_set *src)
189 struct list_head *tmp, *pos;
190 struct ptlrpc_request *req;
193 spin_lock(&src->set_new_req_lock);
194 if (likely(!list_empty(&src->set_new_requests))) {
195 list_for_each_safe(pos, tmp, &src->set_new_requests) {
196 req = list_entry(pos, struct ptlrpc_request,
200 list_splice_init(&src->set_new_requests,
202 rc = atomic_read(&src->set_new_count);
203 atomic_add(rc, &des->set_remaining);
204 atomic_set(&src->set_new_count, 0);
206 spin_unlock(&src->set_new_req_lock);
211 * Requests that are added to the ptlrpcd queue are sent via
212 * ptlrpcd_check->ptlrpc_check_set().
214 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
216 struct ptlrpcd_ctl *pc;
219 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
221 spin_lock(&req->rq_lock);
222 if (req->rq_invalid_rqset) {
223 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
224 back_to_sleep, NULL);
226 req->rq_invalid_rqset = 0;
227 spin_unlock(&req->rq_lock);
228 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
229 } else if (req->rq_set) {
230 /* If we have a vaid "rq_set", just reuse it to avoid double
232 LASSERT(req->rq_phase == RQ_PHASE_NEW);
233 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
235 /* ptlrpc_check_set will decrease the count */
236 atomic_inc(&req->rq_set->set_remaining);
237 spin_unlock(&req->rq_lock);
238 wake_up(&req->rq_set->set_waitq);
241 spin_unlock(&req->rq_lock);
244 pc = ptlrpcd_select_pc(req, policy, idx);
246 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
247 req, pc->pc_name, pc->pc_index);
249 ptlrpc_set_add_new_req(pc, req);
251 EXPORT_SYMBOL(ptlrpcd_add_req);
253 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
255 atomic_inc(&set->set_refcount);
259 * Check if there is more work to do on ptlrpcd set.
262 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
264 struct list_head *tmp, *pos;
265 struct ptlrpc_request *req;
266 struct ptlrpc_request_set *set = pc->pc_set;
271 if (atomic_read(&set->set_new_count)) {
272 spin_lock(&set->set_new_req_lock);
273 if (likely(!list_empty(&set->set_new_requests))) {
274 list_splice_init(&set->set_new_requests,
276 atomic_add(atomic_read(&set->set_new_count),
277 &set->set_remaining);
278 atomic_set(&set->set_new_count, 0);
280 * Need to calculate its timeout.
284 spin_unlock(&set->set_new_req_lock);
287 /* We should call lu_env_refill() before handling new requests to make
288 * sure that env key the requests depending on really exists.
290 rc2 = lu_env_refill(env);
293 * XXX This is very awkward situation, because
294 * execution can neither continue (request
295 * interpreters assume that env is set up), nor repeat
296 * the loop (as this potentially results in a tight
297 * loop of -ENOMEM's).
299 * Fortunately, refill only ever does something when
300 * new modules are loaded, i.e., early during boot up.
302 CERROR("Failure to refill session: %d\n", rc2);
306 if (atomic_read(&set->set_remaining))
307 rc |= ptlrpc_check_set(env, set);
309 if (!list_empty(&set->set_requests)) {
311 * XXX: our set never completes, so we prune the completed
312 * reqs after each iteration. boy could this be smarter.
314 list_for_each_safe(pos, tmp, &set->set_requests) {
315 req = list_entry(pos, struct ptlrpc_request,
317 if (req->rq_phase != RQ_PHASE_COMPLETE)
320 list_del_init(&req->rq_set_chain);
322 ptlrpc_req_finished(req);
328 * If new requests have been added, make sure to wake up.
330 rc = atomic_read(&set->set_new_count);
332 /* If we have nothing to do, check whether we can take some
333 * work from our partner threads. */
334 if (rc == 0 && pc->pc_npartners > 0) {
335 struct ptlrpcd_ctl *partner;
336 struct ptlrpc_request_set *ps;
337 int first = pc->pc_cursor;
340 partner = pc->pc_partners[pc->pc_cursor++];
341 if (pc->pc_cursor >= pc->pc_npartners)
346 spin_lock(&partner->pc_lock);
347 ps = partner->pc_set;
349 spin_unlock(&partner->pc_lock);
353 ptlrpc_reqset_get(ps);
354 spin_unlock(&partner->pc_lock);
356 if (atomic_read(&ps->set_new_count)) {
357 rc = ptlrpcd_steal_rqset(set, ps);
359 CDEBUG(D_RPCTRACE, "transfer %d"
360 " async RPCs [%d->%d]\n",
361 rc, partner->pc_index,
364 ptlrpc_reqset_put(ps);
365 } while (rc == 0 && pc->pc_cursor != first);
373 * Main ptlrpcd thread.
374 * ptlrpc's code paths like to execute in process context, so we have this
375 * thread which spins on a set which contains the rpcs and sends them.
378 static int ptlrpcd(void *arg)
380 struct ptlrpcd_ctl *pc = arg;
381 struct ptlrpc_request_set *set = pc->pc_set;
382 struct lu_context ses = { 0 };
383 struct lu_env env = { .le_ses = &ses };
388 #if defined(CONFIG_SMP)
389 if (test_bit(LIOD_BIND, &pc->pc_flags)) {
390 int index = pc->pc_index;
392 if (index >= 0 && index < num_possible_cpus()) {
393 while (!cpu_online(index)) {
394 if (++index >= num_possible_cpus())
397 set_cpus_allowed_ptr(current,
398 cpumask_of_node(cpu_to_node(index)));
402 /* Both client and server (MDT/OST) may use the environment. */
403 rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
404 LCT_CL_THREAD | LCT_REMEMBER |
407 rc = lu_context_init(env.le_ses,
408 LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
410 lu_context_fini(&env.le_ctx);
412 complete(&pc->pc_starting);
418 * This mainloop strongly resembles ptlrpc_set_wait() except that our
419 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
420 * there are requests in the set. New requests come in on the set's
421 * new_req_list and ptlrpcd_check() moves them into the set.
424 struct l_wait_info lwi;
427 timeout = ptlrpc_set_next_timeout(set);
428 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
429 ptlrpc_expired_set, set);
431 lu_context_enter(&env.le_ctx);
432 lu_context_enter(env.le_ses);
433 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
434 lu_context_exit(&env.le_ctx);
435 lu_context_exit(env.le_ses);
438 * Abort inflight rpcs for forced stop case.
440 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
441 if (test_bit(LIOD_FORCE, &pc->pc_flags))
442 ptlrpc_abort_set(set);
447 * Let's make one more loop to make sure that ptlrpcd_check()
448 * copied all raced new rpcs into the set so we can kill them.
453 * Wait for inflight requests to drain.
455 if (!list_empty(&set->set_requests))
456 ptlrpc_set_wait(set);
457 lu_context_fini(&env.le_ctx);
458 lu_context_fini(env.le_ses);
460 complete(&pc->pc_finishing);
465 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
466 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
467 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
468 * CPU core. But binding all ptlrpcd threads maybe cause response delay
469 * because of some CPU core(s) busy with other loads.
471 * For example: "ls -l", some async RPCs for statahead are assigned to
472 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
473 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
474 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
475 * such case, the statahead async RPCs can not be processed in time, it is
476 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
477 * be better. But it breaks former data transfer policy.
479 * So we shouldn't be blind for avoiding the data transfer. We make some
480 * compromise: divide the ptlrpcd threds pool into two parts. One part is
481 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
482 * core. The other part is for free mode, all the ptlrpcd threads in the
483 * part can be scheduled on any CPU core. We specify some partnership
484 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
485 * and the async RPC load within the partners are shared.
487 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
488 * thread can be scheduled in time), and try to guarantee the async RPC
489 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
492 * As for how to specify the partnership between bound mode ptlrpcd
493 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
494 * <free bound> pair. In future, we can specify some more complex
495 * partnership based on the patches for CPU partition. But before such
496 * patches are available, we prefer to use the simplest one.
498 # ifdef CFS_CPU_MODE_NUMA
499 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
501 static int ptlrpcd_bind(int index, int max)
503 struct ptlrpcd_ctl *pc;
505 #if defined(CONFIG_NUMA)
510 LASSERT(index <= max - 1);
511 pc = &ptlrpcds->pd_threads[index];
512 switch (ptlrpcd_bind_policy) {
513 case PDB_POLICY_NONE:
514 pc->pc_npartners = -1;
516 case PDB_POLICY_FULL:
517 pc->pc_npartners = 0;
518 set_bit(LIOD_BIND, &pc->pc_flags);
520 case PDB_POLICY_PAIR:
521 LASSERT(max % 2 == 0);
522 pc->pc_npartners = 1;
524 case PDB_POLICY_NEIGHBOR:
525 #if defined(CONFIG_NUMA)
528 mask = *cpumask_of_node(cpu_to_node(index));
529 for (i = max; i < num_online_cpus(); i++)
531 pc->pc_npartners = cpus_weight(mask) - 1;
532 set_bit(LIOD_BIND, &pc->pc_flags);
536 pc->pc_npartners = 2;
540 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
544 if (rc == 0 && pc->pc_npartners > 0) {
545 OBD_ALLOC(pc->pc_partners,
546 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
547 if (pc->pc_partners == NULL) {
548 pc->pc_npartners = 0;
551 switch (ptlrpcd_bind_policy) {
552 case PDB_POLICY_PAIR:
554 set_bit(LIOD_BIND, &pc->pc_flags);
555 pc->pc_partners[0] = &ptlrpcds->
556 pd_threads[index - 1];
557 ptlrpcds->pd_threads[index - 1].
561 case PDB_POLICY_NEIGHBOR:
562 #if defined(CONFIG_NUMA)
564 struct ptlrpcd_ctl *ppc;
566 /* partners are cores in the same NUMA node.
567 * setup partnership only with ptlrpcd threads
568 * that are already initialized
570 for (pidx = 0, i = 0; i < index; i++) {
571 if (cpu_isset(i, mask)) {
572 ppc = &ptlrpcds->pd_threads[i];
573 pc->pc_partners[pidx++] = ppc;
574 ppc->pc_partners[ppc->
575 pc_npartners++] = pc;
578 /* adjust number of partners to the number
579 * of partnership really setup */
580 pc->pc_npartners = pidx;
584 set_bit(LIOD_BIND, &pc->pc_flags);
586 pc->pc_partners[0] = &ptlrpcds->
587 pd_threads[index - 1];
588 ptlrpcds->pd_threads[index - 1].
590 if (index == max - 1) {
592 &ptlrpcds->pd_threads[0];
593 ptlrpcds->pd_threads[0].
607 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
613 * Do not allow start second thread for one pc.
615 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
616 CWARN("Starting second thread (%s) for same pc %p\n",
621 pc->pc_index = index;
622 init_completion(&pc->pc_starting);
623 init_completion(&pc->pc_finishing);
624 spin_lock_init(&pc->pc_lock);
625 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
626 pc->pc_set = ptlrpc_prep_set();
627 if (pc->pc_set == NULL)
628 GOTO(out, rc = -ENOMEM);
631 * So far only "client" ptlrpcd uses an environment. In the future,
632 * ptlrpcd thread (or a thread-set) has to be given an argument,
633 * describing its "scope".
635 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
640 struct task_struct *task;
642 rc = ptlrpcd_bind(index, max);
647 task = kthread_run(ptlrpcd, pc, pc->pc_name);
649 GOTO(out_env, rc = PTR_ERR(task));
651 wait_for_completion(&pc->pc_starting);
656 lu_context_fini(&pc->pc_env.le_ctx);
659 if (pc->pc_set != NULL) {
660 struct ptlrpc_request_set *set = pc->pc_set;
662 spin_lock(&pc->pc_lock);
664 spin_unlock(&pc->pc_lock);
665 ptlrpc_set_destroy(set);
667 clear_bit(LIOD_BIND, &pc->pc_flags);
669 clear_bit(LIOD_START, &pc->pc_flags);
673 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
677 if (!test_bit(LIOD_START, &pc->pc_flags)) {
678 CWARN("Thread for pc %p was not started\n", pc);
682 set_bit(LIOD_STOP, &pc->pc_flags);
684 set_bit(LIOD_FORCE, &pc->pc_flags);
685 wake_up(&pc->pc_set->set_waitq);
691 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
693 struct ptlrpc_request_set *set = pc->pc_set;
696 if (!test_bit(LIOD_START, &pc->pc_flags)) {
697 CWARN("Thread for pc %p was not started\n", pc);
701 wait_for_completion(&pc->pc_finishing);
702 lu_context_fini(&pc->pc_env.le_ctx);
704 spin_lock(&pc->pc_lock);
706 spin_unlock(&pc->pc_lock);
707 ptlrpc_set_destroy(set);
709 clear_bit(LIOD_START, &pc->pc_flags);
710 clear_bit(LIOD_STOP, &pc->pc_flags);
711 clear_bit(LIOD_FORCE, &pc->pc_flags);
712 clear_bit(LIOD_BIND, &pc->pc_flags);
715 if (pc->pc_npartners > 0) {
716 LASSERT(pc->pc_partners != NULL);
718 OBD_FREE(pc->pc_partners,
719 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
720 pc->pc_partners = NULL;
722 pc->pc_npartners = 0;
726 static void ptlrpcd_fini(void)
731 if (ptlrpcds != NULL) {
732 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
733 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
734 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
735 ptlrpcd_free(&ptlrpcds->pd_threads[i]);
736 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
737 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
738 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
745 static int ptlrpcd_init(void)
747 int nthreads = num_online_cpus();
749 int size, i = -1, j, rc = 0;
752 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
753 nthreads = max_ptlrpcds;
756 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
757 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
758 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
759 nthreads &= ~1; /* make sure it is even */
761 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
762 OBD_ALLOC(ptlrpcds, size);
763 if (ptlrpcds == NULL)
764 GOTO(out, rc = -ENOMEM);
766 snprintf(name, 15, "ptlrpcd_rcv");
767 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
768 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
772 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
773 * non-recovery async RPC to improve overall async RPC efficiency.
775 * But there are some issues with async I/O RPCs and async non-I/O
776 * RPCs processed in the same set under some cases. The ptlrpcd may
777 * be blocked by some async I/O RPC(s), then will cause other async
778 * non-I/O RPC(s) can not be processed in time.
780 * Maybe we should distinguish blocked async RPCs from non-blocked
781 * async RPCs, and process them in different ptlrpcd sets to avoid
782 * unnecessary dependency. But how to distribute async RPCs load
783 * among all the ptlrpc daemons becomes another trouble. */
784 for (i = 0; i < nthreads; i++) {
785 snprintf(name, 15, "ptlrpcd_%d", i);
786 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
791 ptlrpcds->pd_size = size;
792 ptlrpcds->pd_index = 0;
793 ptlrpcds->pd_nthreads = nthreads;
796 if (rc != 0 && ptlrpcds != NULL) {
797 for (j = 0; j <= i; j++)
798 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
799 for (j = 0; j <= i; j++)
800 ptlrpcd_free(&ptlrpcds->pd_threads[j]);
801 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
802 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
803 OBD_FREE(ptlrpcds, size);
810 int ptlrpcd_addref(void)
815 mutex_lock(&ptlrpcd_mutex);
816 if (++ptlrpcd_users == 1) {
821 mutex_unlock(&ptlrpcd_mutex);
824 EXPORT_SYMBOL(ptlrpcd_addref);
826 void ptlrpcd_decref(void)
828 mutex_lock(&ptlrpcd_mutex);
829 if (--ptlrpcd_users == 0)
831 mutex_unlock(&ptlrpcd_mutex);
833 EXPORT_SYMBOL(ptlrpcd_decref);