4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
58 #include <libcfs/libcfs.h>
59 #include <lustre_net.h>
60 #include <lustre_lib.h>
61 #include <lustre_ha.h>
62 #include <obd_class.h> /* for obd_zombie */
63 #include <obd_support.h> /* for OBD_FAIL_CHECK */
64 #include <cl_object.h> /* cl_env_{get,put}() */
65 #include <lprocfs_status.h>
67 #include "ptlrpc_internal.h"
73 struct ptlrpcd_ctl pd_thread_rcv;
74 struct ptlrpcd_ctl pd_threads[0];
77 static int max_ptlrpcds;
78 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
79 "Max ptlrpcd thread count to be started.");
81 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
82 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
83 "Ptlrpcd threads binding mode.");
84 static struct ptlrpcd *ptlrpcds;
86 struct mutex ptlrpcd_mutex;
87 static int ptlrpcd_users = 0;
89 void ptlrpcd_wake(struct ptlrpc_request *req)
91 struct ptlrpc_request_set *rq_set = req->rq_set;
93 LASSERT(rq_set != NULL);
95 wake_up(&rq_set->set_waitq);
97 EXPORT_SYMBOL(ptlrpcd_wake);
99 static struct ptlrpcd_ctl *
100 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
104 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
105 return &ptlrpcds->pd_thread_rcv;
108 case PDL_POLICY_SAME:
109 idx = smp_processor_id() % ptlrpcds->pd_nthreads;
111 case PDL_POLICY_LOCAL:
112 /* Before CPU partition patches available, process it the same
113 * as "PDL_POLICY_ROUND". */
114 # ifdef CFS_CPU_MODE_NUMA
115 # warning "fix this code to use new CPU partition APIs"
117 /* Fall through to PDL_POLICY_ROUND until the CPU
118 * CPU partition patches are available. */
120 case PDL_POLICY_PREFERRED:
121 if (index >= 0 && index < num_online_cpus()) {
122 idx = index % ptlrpcds->pd_nthreads;
125 /* Fall through to PDL_POLICY_ROUND for bad index. */
127 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
128 case PDL_POLICY_ROUND:
129 /* We do not care whether it is strict load balance. */
130 idx = ptlrpcds->pd_index + 1;
131 if (idx == smp_processor_id())
133 idx %= ptlrpcds->pd_nthreads;
134 ptlrpcds->pd_index = idx;
138 return &ptlrpcds->pd_threads[idx];
142 * Move all request from an existing request set to the ptlrpcd queue.
143 * All requests from the set must be in phase RQ_PHASE_NEW.
145 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
147 struct list_head *tmp, *pos;
148 struct ptlrpcd_ctl *pc;
149 struct ptlrpc_request_set *new;
152 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
155 list_for_each_safe(pos, tmp, &set->set_requests) {
156 struct ptlrpc_request *req =
157 list_entry(pos, struct ptlrpc_request,
160 LASSERT(req->rq_phase == RQ_PHASE_NEW);
162 req->rq_queued_time = cfs_time_current();
165 spin_lock(&new->set_new_req_lock);
166 list_splice_init(&set->set_requests, &new->set_new_requests);
167 i = atomic_read(&set->set_remaining);
168 count = atomic_add_return(i, &new->set_new_count);
169 atomic_set(&set->set_remaining, 0);
170 spin_unlock(&new->set_new_req_lock);
172 wake_up(&new->set_waitq);
174 /* XXX: It maybe unnecessary to wakeup all the partners. But to
175 * guarantee the async RPC can be processed ASAP, we have
176 * no other better choice. It maybe fixed in future. */
177 for (i = 0; i < pc->pc_npartners; i++)
178 wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
181 EXPORT_SYMBOL(ptlrpcd_add_rqset);
184 * Return transferred RPCs count.
186 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
187 struct ptlrpc_request_set *src)
189 struct list_head *tmp, *pos;
190 struct ptlrpc_request *req;
193 spin_lock(&src->set_new_req_lock);
194 if (likely(!list_empty(&src->set_new_requests))) {
195 list_for_each_safe(pos, tmp, &src->set_new_requests) {
196 req = list_entry(pos, struct ptlrpc_request,
200 list_splice_init(&src->set_new_requests,
202 rc = atomic_read(&src->set_new_count);
203 atomic_add(rc, &des->set_remaining);
204 atomic_set(&src->set_new_count, 0);
206 spin_unlock(&src->set_new_req_lock);
211 * Requests that are added to the ptlrpcd queue are sent via
212 * ptlrpcd_check->ptlrpc_check_set().
214 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
216 struct ptlrpcd_ctl *pc;
219 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
221 spin_lock(&req->rq_lock);
222 if (req->rq_invalid_rqset) {
223 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
224 back_to_sleep, NULL);
226 req->rq_invalid_rqset = 0;
227 spin_unlock(&req->rq_lock);
228 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
229 } else if (req->rq_set) {
230 /* If we have a vaid "rq_set", just reuse it to avoid double
232 LASSERT(req->rq_phase == RQ_PHASE_NEW);
233 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
235 /* ptlrpc_check_set will decrease the count */
236 atomic_inc(&req->rq_set->set_remaining);
237 spin_unlock(&req->rq_lock);
238 wake_up(&req->rq_set->set_waitq);
241 spin_unlock(&req->rq_lock);
244 pc = ptlrpcd_select_pc(req, policy, idx);
246 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
247 req, pc->pc_name, pc->pc_index);
249 ptlrpc_set_add_new_req(pc, req);
251 EXPORT_SYMBOL(ptlrpcd_add_req);
253 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
255 atomic_inc(&set->set_refcount);
259 * Check if there is more work to do on ptlrpcd set.
262 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
264 struct list_head *tmp, *pos;
265 struct ptlrpc_request *req;
266 struct ptlrpc_request_set *set = pc->pc_set;
271 if (atomic_read(&set->set_new_count)) {
272 spin_lock(&set->set_new_req_lock);
273 if (likely(!list_empty(&set->set_new_requests))) {
274 list_splice_init(&set->set_new_requests,
276 atomic_add(atomic_read(&set->set_new_count),
277 &set->set_remaining);
278 atomic_set(&set->set_new_count, 0);
280 * Need to calculate its timeout.
284 spin_unlock(&set->set_new_req_lock);
287 /* We should call lu_env_refill() before handling new requests to make
288 * sure that env key the requests depending on really exists.
290 rc2 = lu_env_refill(env);
293 * XXX This is very awkward situation, because
294 * execution can neither continue (request
295 * interpreters assume that env is set up), nor repeat
296 * the loop (as this potentially results in a tight
297 * loop of -ENOMEM's).
299 * Fortunately, refill only ever does something when
300 * new modules are loaded, i.e., early during boot up.
302 CERROR("Failure to refill session: %d\n", rc2);
306 if (atomic_read(&set->set_remaining))
307 rc |= ptlrpc_check_set(env, set);
309 /* NB: ptlrpc_check_set has already moved complted request at the
310 * head of seq::set_requests */
311 list_for_each_safe(pos, tmp, &set->set_requests) {
312 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
313 if (req->rq_phase != RQ_PHASE_COMPLETE)
316 list_del_init(&req->rq_set_chain);
318 ptlrpc_req_finished(req);
323 * If new requests have been added, make sure to wake up.
325 rc = atomic_read(&set->set_new_count);
327 /* If we have nothing to do, check whether we can take some
328 * work from our partner threads. */
329 if (rc == 0 && pc->pc_npartners > 0) {
330 struct ptlrpcd_ctl *partner;
331 struct ptlrpc_request_set *ps;
332 int first = pc->pc_cursor;
335 partner = pc->pc_partners[pc->pc_cursor++];
336 if (pc->pc_cursor >= pc->pc_npartners)
341 spin_lock(&partner->pc_lock);
342 ps = partner->pc_set;
344 spin_unlock(&partner->pc_lock);
348 ptlrpc_reqset_get(ps);
349 spin_unlock(&partner->pc_lock);
351 if (atomic_read(&ps->set_new_count)) {
352 rc = ptlrpcd_steal_rqset(set, ps);
354 CDEBUG(D_RPCTRACE, "transfer %d"
355 " async RPCs [%d->%d]\n",
356 rc, partner->pc_index,
359 ptlrpc_reqset_put(ps);
360 } while (rc == 0 && pc->pc_cursor != first);
368 * Main ptlrpcd thread.
369 * ptlrpc's code paths like to execute in process context, so we have this
370 * thread which spins on a set which contains the rpcs and sends them.
373 static int ptlrpcd(void *arg)
375 struct ptlrpcd_ctl *pc = arg;
376 struct ptlrpc_request_set *set = pc->pc_set;
377 struct lu_context ses = { 0 };
378 struct lu_env env = { .le_ses = &ses };
383 #if defined(CONFIG_SMP)
384 if (test_bit(LIOD_BIND, &pc->pc_flags)) {
385 int index = pc->pc_index;
387 if (index >= 0 && index < num_possible_cpus()) {
388 while (!cpu_online(index)) {
389 if (++index >= num_possible_cpus())
392 set_cpus_allowed_ptr(current,
393 cpumask_of_node(cpu_to_node(index)));
397 /* Both client and server (MDT/OST) may use the environment. */
398 rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
399 LCT_CL_THREAD | LCT_REMEMBER |
402 rc = lu_context_init(env.le_ses,
403 LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
405 lu_context_fini(&env.le_ctx);
407 complete(&pc->pc_starting);
413 * This mainloop strongly resembles ptlrpc_set_wait() except that our
414 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
415 * there are requests in the set. New requests come in on the set's
416 * new_req_list and ptlrpcd_check() moves them into the set.
419 struct l_wait_info lwi;
422 timeout = ptlrpc_set_next_timeout(set);
423 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
424 ptlrpc_expired_set, set);
426 lu_context_enter(&env.le_ctx);
427 lu_context_enter(env.le_ses);
428 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
429 lu_context_exit(&env.le_ctx);
430 lu_context_exit(env.le_ses);
433 * Abort inflight rpcs for forced stop case.
435 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
436 if (test_bit(LIOD_FORCE, &pc->pc_flags))
437 ptlrpc_abort_set(set);
442 * Let's make one more loop to make sure that ptlrpcd_check()
443 * copied all raced new rpcs into the set so we can kill them.
448 * Wait for inflight requests to drain.
450 if (!list_empty(&set->set_requests))
451 ptlrpc_set_wait(set);
452 lu_context_fini(&env.le_ctx);
453 lu_context_fini(env.le_ses);
455 complete(&pc->pc_finishing);
460 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
461 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
462 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
463 * CPU core. But binding all ptlrpcd threads maybe cause response delay
464 * because of some CPU core(s) busy with other loads.
466 * For example: "ls -l", some async RPCs for statahead are assigned to
467 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
468 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
469 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
470 * such case, the statahead async RPCs can not be processed in time, it is
471 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
472 * be better. But it breaks former data transfer policy.
474 * So we shouldn't be blind for avoiding the data transfer. We make some
475 * compromise: divide the ptlrpcd threds pool into two parts. One part is
476 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
477 * core. The other part is for free mode, all the ptlrpcd threads in the
478 * part can be scheduled on any CPU core. We specify some partnership
479 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
480 * and the async RPC load within the partners are shared.
482 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
483 * thread can be scheduled in time), and try to guarantee the async RPC
484 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
487 * As for how to specify the partnership between bound mode ptlrpcd
488 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
489 * <free bound> pair. In future, we can specify some more complex
490 * partnership based on the patches for CPU partition. But before such
491 * patches are available, we prefer to use the simplest one.
493 # ifdef CFS_CPU_MODE_NUMA
494 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
496 static int ptlrpcd_bind(int index, int max)
498 struct ptlrpcd_ctl *pc;
500 #if defined(CONFIG_NUMA)
505 LASSERT(index <= max - 1);
506 pc = &ptlrpcds->pd_threads[index];
507 switch (ptlrpcd_bind_policy) {
508 case PDB_POLICY_NONE:
509 pc->pc_npartners = -1;
511 case PDB_POLICY_FULL:
512 pc->pc_npartners = 0;
513 set_bit(LIOD_BIND, &pc->pc_flags);
515 case PDB_POLICY_PAIR:
516 LASSERT(max % 2 == 0);
517 pc->pc_npartners = 1;
519 case PDB_POLICY_NEIGHBOR:
520 #if defined(CONFIG_NUMA)
523 mask = *cpumask_of_node(cpu_to_node(index));
524 for (i = max; i < num_online_cpus(); i++)
526 pc->pc_npartners = cpus_weight(mask) - 1;
527 set_bit(LIOD_BIND, &pc->pc_flags);
531 pc->pc_npartners = 2;
535 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
539 if (rc == 0 && pc->pc_npartners > 0) {
540 OBD_ALLOC(pc->pc_partners,
541 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
542 if (pc->pc_partners == NULL) {
543 pc->pc_npartners = 0;
546 switch (ptlrpcd_bind_policy) {
547 case PDB_POLICY_PAIR:
549 set_bit(LIOD_BIND, &pc->pc_flags);
550 pc->pc_partners[0] = &ptlrpcds->
551 pd_threads[index - 1];
552 ptlrpcds->pd_threads[index - 1].
556 case PDB_POLICY_NEIGHBOR:
557 #if defined(CONFIG_NUMA)
559 struct ptlrpcd_ctl *ppc;
561 /* partners are cores in the same NUMA node.
562 * setup partnership only with ptlrpcd threads
563 * that are already initialized
565 for (pidx = 0, i = 0; i < index; i++) {
566 if (cpu_isset(i, mask)) {
567 ppc = &ptlrpcds->pd_threads[i];
568 pc->pc_partners[pidx++] = ppc;
569 ppc->pc_partners[ppc->
570 pc_npartners++] = pc;
573 /* adjust number of partners to the number
574 * of partnership really setup */
575 pc->pc_npartners = pidx;
579 set_bit(LIOD_BIND, &pc->pc_flags);
581 pc->pc_partners[0] = &ptlrpcds->
582 pd_threads[index - 1];
583 ptlrpcds->pd_threads[index - 1].
585 if (index == max - 1) {
587 &ptlrpcds->pd_threads[0];
588 ptlrpcds->pd_threads[0].
602 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
608 * Do not allow start second thread for one pc.
610 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
611 CWARN("Starting second thread (%s) for same pc %p\n",
616 pc->pc_index = index;
617 init_completion(&pc->pc_starting);
618 init_completion(&pc->pc_finishing);
619 spin_lock_init(&pc->pc_lock);
620 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
621 pc->pc_set = ptlrpc_prep_set();
622 if (pc->pc_set == NULL)
623 GOTO(out, rc = -ENOMEM);
626 * So far only "client" ptlrpcd uses an environment. In the future,
627 * ptlrpcd thread (or a thread-set) has to be given an argument,
628 * describing its "scope".
630 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
635 struct task_struct *task;
637 rc = ptlrpcd_bind(index, max);
642 task = kthread_run(ptlrpcd, pc, pc->pc_name);
644 GOTO(out_env, rc = PTR_ERR(task));
646 wait_for_completion(&pc->pc_starting);
651 lu_context_fini(&pc->pc_env.le_ctx);
654 if (pc->pc_set != NULL) {
655 struct ptlrpc_request_set *set = pc->pc_set;
657 spin_lock(&pc->pc_lock);
659 spin_unlock(&pc->pc_lock);
660 ptlrpc_set_destroy(set);
662 clear_bit(LIOD_BIND, &pc->pc_flags);
664 clear_bit(LIOD_START, &pc->pc_flags);
668 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
672 if (!test_bit(LIOD_START, &pc->pc_flags)) {
673 CWARN("Thread for pc %p was not started\n", pc);
677 set_bit(LIOD_STOP, &pc->pc_flags);
679 set_bit(LIOD_FORCE, &pc->pc_flags);
680 wake_up(&pc->pc_set->set_waitq);
686 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
688 struct ptlrpc_request_set *set = pc->pc_set;
691 if (!test_bit(LIOD_START, &pc->pc_flags)) {
692 CWARN("Thread for pc %p was not started\n", pc);
696 wait_for_completion(&pc->pc_finishing);
697 lu_context_fini(&pc->pc_env.le_ctx);
699 spin_lock(&pc->pc_lock);
701 spin_unlock(&pc->pc_lock);
702 ptlrpc_set_destroy(set);
704 clear_bit(LIOD_START, &pc->pc_flags);
705 clear_bit(LIOD_STOP, &pc->pc_flags);
706 clear_bit(LIOD_FORCE, &pc->pc_flags);
707 clear_bit(LIOD_BIND, &pc->pc_flags);
710 if (pc->pc_npartners > 0) {
711 LASSERT(pc->pc_partners != NULL);
713 OBD_FREE(pc->pc_partners,
714 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
715 pc->pc_partners = NULL;
717 pc->pc_npartners = 0;
721 static void ptlrpcd_fini(void)
726 if (ptlrpcds != NULL) {
727 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
728 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
729 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
730 ptlrpcd_free(&ptlrpcds->pd_threads[i]);
731 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
732 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
733 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
740 static int ptlrpcd_init(void)
742 int nthreads = num_online_cpus();
744 int size, i = -1, j, rc = 0;
747 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
748 nthreads = max_ptlrpcds;
751 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
752 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
753 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
754 nthreads &= ~1; /* make sure it is even */
756 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
757 OBD_ALLOC(ptlrpcds, size);
758 if (ptlrpcds == NULL)
759 GOTO(out, rc = -ENOMEM);
761 snprintf(name, 15, "ptlrpcd_rcv");
762 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
763 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
767 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
768 * non-recovery async RPC to improve overall async RPC efficiency.
770 * But there are some issues with async I/O RPCs and async non-I/O
771 * RPCs processed in the same set under some cases. The ptlrpcd may
772 * be blocked by some async I/O RPC(s), then will cause other async
773 * non-I/O RPC(s) can not be processed in time.
775 * Maybe we should distinguish blocked async RPCs from non-blocked
776 * async RPCs, and process them in different ptlrpcd sets to avoid
777 * unnecessary dependency. But how to distribute async RPCs load
778 * among all the ptlrpc daemons becomes another trouble. */
779 for (i = 0; i < nthreads; i++) {
780 snprintf(name, 15, "ptlrpcd_%d", i);
781 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
786 ptlrpcds->pd_size = size;
787 ptlrpcds->pd_index = 0;
788 ptlrpcds->pd_nthreads = nthreads;
791 if (rc != 0 && ptlrpcds != NULL) {
792 for (j = 0; j <= i; j++)
793 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
794 for (j = 0; j <= i; j++)
795 ptlrpcd_free(&ptlrpcds->pd_threads[j]);
796 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
797 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
798 OBD_FREE(ptlrpcds, size);
805 int ptlrpcd_addref(void)
810 mutex_lock(&ptlrpcd_mutex);
811 if (++ptlrpcd_users == 1) {
816 mutex_unlock(&ptlrpcd_mutex);
819 EXPORT_SYMBOL(ptlrpcd_addref);
821 void ptlrpcd_decref(void)
823 mutex_lock(&ptlrpcd_mutex);
824 if (--ptlrpcd_users == 0)
826 mutex_unlock(&ptlrpcd_mutex);
828 EXPORT_SYMBOL(ptlrpcd_decref);