4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
58 #include <linux/kthread.h>
59 #include <libcfs/libcfs.h>
60 #include <lustre_net.h>
61 #include <lustre_lib.h>
62 #include <lustre_ha.h>
63 #include <obd_class.h> /* for obd_zombie */
64 #include <obd_support.h> /* for OBD_FAIL_CHECK */
65 #include <cl_object.h> /* cl_env_{get,put}() */
66 #include <lprocfs_status.h>
68 #include "ptlrpc_internal.h"
74 struct ptlrpcd_ctl pd_thread_rcv;
75 struct ptlrpcd_ctl pd_threads[0];
78 static int max_ptlrpcds;
79 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
80 "Max ptlrpcd thread count to be started.");
82 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
83 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
84 "Ptlrpcd threads binding mode.");
85 static struct ptlrpcd *ptlrpcds;
87 struct mutex ptlrpcd_mutex;
88 static int ptlrpcd_users = 0;
90 void ptlrpcd_wake(struct ptlrpc_request *req)
92 struct ptlrpc_request_set *set = req->rq_set;
95 wake_up(&set->set_waitq);
97 EXPORT_SYMBOL(ptlrpcd_wake);
99 static struct ptlrpcd_ctl *
100 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
104 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
105 return &ptlrpcds->pd_thread_rcv;
108 case PDL_POLICY_SAME:
109 idx = smp_processor_id() % ptlrpcds->pd_nthreads;
111 case PDL_POLICY_LOCAL:
112 /* Before CPU partition patches available, process it the same
113 * as "PDL_POLICY_ROUND". */
114 # ifdef CFS_CPU_MODE_NUMA
115 # warning "fix this code to use new CPU partition APIs"
117 /* Fall through to PDL_POLICY_ROUND until the CPU
118 * CPU partition patches are available. */
120 case PDL_POLICY_PREFERRED:
121 if (index >= 0 && index < num_online_cpus()) {
122 idx = index % ptlrpcds->pd_nthreads;
125 /* Fall through to PDL_POLICY_ROUND for bad index. */
127 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
128 case PDL_POLICY_ROUND:
129 /* We do not care whether it is strict load balance. */
130 idx = ptlrpcds->pd_index + 1;
131 if (idx == smp_processor_id())
133 idx %= ptlrpcds->pd_nthreads;
134 ptlrpcds->pd_index = idx;
138 return &ptlrpcds->pd_threads[idx];
142 * Move all request from an existing request set to the ptlrpcd queue.
143 * All requests from the set must be in phase RQ_PHASE_NEW.
145 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
147 struct list_head *tmp, *pos;
148 struct ptlrpcd_ctl *pc;
149 struct ptlrpc_request_set *new;
152 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
155 list_for_each_safe(pos, tmp, &set->set_requests) {
156 struct ptlrpc_request *req =
157 list_entry(pos, struct ptlrpc_request,
160 LASSERT(req->rq_phase == RQ_PHASE_NEW);
162 req->rq_queued_time = cfs_time_current();
165 spin_lock(&new->set_new_req_lock);
166 list_splice_init(&set->set_requests, &new->set_new_requests);
167 i = atomic_read(&set->set_remaining);
168 count = atomic_add_return(i, &new->set_new_count);
169 atomic_set(&set->set_remaining, 0);
170 spin_unlock(&new->set_new_req_lock);
172 wake_up(&new->set_waitq);
174 /* XXX: It maybe unnecessary to wakeup all the partners. But to
175 * guarantee the async RPC can be processed ASAP, we have
176 * no other better choice. It maybe fixed in future. */
177 for (i = 0; i < pc->pc_npartners; i++)
178 wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
183 * Return transferred RPCs count.
185 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
186 struct ptlrpc_request_set *src)
188 struct list_head *tmp, *pos;
189 struct ptlrpc_request *req;
192 spin_lock(&src->set_new_req_lock);
193 if (likely(!list_empty(&src->set_new_requests))) {
194 list_for_each_safe(pos, tmp, &src->set_new_requests) {
195 req = list_entry(pos, struct ptlrpc_request,
199 list_splice_init(&src->set_new_requests,
201 rc = atomic_read(&src->set_new_count);
202 atomic_add(rc, &des->set_remaining);
203 atomic_set(&src->set_new_count, 0);
205 spin_unlock(&src->set_new_req_lock);
210 * Requests that are added to the ptlrpcd queue are sent via
211 * ptlrpcd_check->ptlrpc_check_set().
213 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
215 struct ptlrpcd_ctl *pc;
218 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
220 spin_lock(&req->rq_lock);
221 if (req->rq_invalid_rqset) {
222 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
223 back_to_sleep, NULL);
225 req->rq_invalid_rqset = 0;
226 spin_unlock(&req->rq_lock);
227 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
228 } else if (req->rq_set) {
229 /* If we have a vaid "rq_set", just reuse it to avoid double
231 LASSERT(req->rq_phase == RQ_PHASE_NEW);
232 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
234 /* ptlrpc_check_set will decrease the count */
235 atomic_inc(&req->rq_set->set_remaining);
236 spin_unlock(&req->rq_lock);
237 wake_up(&req->rq_set->set_waitq);
240 spin_unlock(&req->rq_lock);
243 pc = ptlrpcd_select_pc(req, policy, idx);
245 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
246 req, pc->pc_name, pc->pc_index);
248 ptlrpc_set_add_new_req(pc, req);
250 EXPORT_SYMBOL(ptlrpcd_add_req);
252 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
254 atomic_inc(&set->set_refcount);
258 * Check if there is more work to do on ptlrpcd set.
261 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
263 struct list_head *tmp, *pos;
264 struct ptlrpc_request *req;
265 struct ptlrpc_request_set *set = pc->pc_set;
270 if (atomic_read(&set->set_new_count)) {
271 spin_lock(&set->set_new_req_lock);
272 if (likely(!list_empty(&set->set_new_requests))) {
273 list_splice_init(&set->set_new_requests,
275 atomic_add(atomic_read(&set->set_new_count),
276 &set->set_remaining);
277 atomic_set(&set->set_new_count, 0);
279 * Need to calculate its timeout.
283 spin_unlock(&set->set_new_req_lock);
286 /* We should call lu_env_refill() before handling new requests to make
287 * sure that env key the requests depending on really exists.
289 rc2 = lu_env_refill(env);
292 * XXX This is very awkward situation, because
293 * execution can neither continue (request
294 * interpreters assume that env is set up), nor repeat
295 * the loop (as this potentially results in a tight
296 * loop of -ENOMEM's).
298 * Fortunately, refill only ever does something when
299 * new modules are loaded, i.e., early during boot up.
301 CERROR("Failure to refill session: %d\n", rc2);
305 if (atomic_read(&set->set_remaining))
306 rc |= ptlrpc_check_set(env, set);
308 /* NB: ptlrpc_check_set has already moved complted request at the
309 * head of seq::set_requests */
310 list_for_each_safe(pos, tmp, &set->set_requests) {
311 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
312 if (req->rq_phase != RQ_PHASE_COMPLETE)
315 list_del_init(&req->rq_set_chain);
317 ptlrpc_req_finished(req);
322 * If new requests have been added, make sure to wake up.
324 rc = atomic_read(&set->set_new_count);
326 /* If we have nothing to do, check whether we can take some
327 * work from our partner threads. */
328 if (rc == 0 && pc->pc_npartners > 0) {
329 struct ptlrpcd_ctl *partner;
330 struct ptlrpc_request_set *ps;
331 int first = pc->pc_cursor;
334 partner = pc->pc_partners[pc->pc_cursor++];
335 if (pc->pc_cursor >= pc->pc_npartners)
340 spin_lock(&partner->pc_lock);
341 ps = partner->pc_set;
343 spin_unlock(&partner->pc_lock);
347 ptlrpc_reqset_get(ps);
348 spin_unlock(&partner->pc_lock);
350 if (atomic_read(&ps->set_new_count)) {
351 rc = ptlrpcd_steal_rqset(set, ps);
353 CDEBUG(D_RPCTRACE, "transfer %d"
354 " async RPCs [%d->%d]\n",
355 rc, partner->pc_index,
358 ptlrpc_reqset_put(ps);
359 } while (rc == 0 && pc->pc_cursor != first);
367 * Main ptlrpcd thread.
368 * ptlrpc's code paths like to execute in process context, so we have this
369 * thread which spins on a set which contains the rpcs and sends them.
372 static int ptlrpcd(void *arg)
374 struct ptlrpcd_ctl *pc = arg;
375 struct ptlrpc_request_set *set = pc->pc_set;
376 struct lu_context ses = { 0 };
377 struct lu_env env = { .le_ses = &ses };
382 #if defined(CONFIG_SMP)
383 if (test_bit(LIOD_BIND, &pc->pc_flags)) {
384 int index = pc->pc_index;
386 if (index >= 0 && index < num_possible_cpus()) {
387 while (!cpu_online(index)) {
388 if (++index >= num_possible_cpus())
391 set_cpus_allowed_ptr(current,
392 cpumask_of_node(cpu_to_node(index)));
396 /* Both client and server (MDT/OST) may use the environment. */
397 rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | LCT_DT_THREAD |
398 LCT_CL_THREAD | LCT_REMEMBER |
401 rc = lu_context_init(env.le_ses,
402 LCT_SESSION|LCT_REMEMBER|LCT_NOREF);
404 lu_context_fini(&env.le_ctx);
406 complete(&pc->pc_starting);
412 * This mainloop strongly resembles ptlrpc_set_wait() except that our
413 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
414 * there are requests in the set. New requests come in on the set's
415 * new_req_list and ptlrpcd_check() moves them into the set.
418 struct l_wait_info lwi;
421 timeout = ptlrpc_set_next_timeout(set);
422 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
423 ptlrpc_expired_set, set);
425 lu_context_enter(&env.le_ctx);
426 lu_context_enter(env.le_ses);
427 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
428 lu_context_exit(&env.le_ctx);
429 lu_context_exit(env.le_ses);
432 * Abort inflight rpcs for forced stop case.
434 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
435 if (test_bit(LIOD_FORCE, &pc->pc_flags))
436 ptlrpc_abort_set(set);
441 * Let's make one more loop to make sure that ptlrpcd_check()
442 * copied all raced new rpcs into the set so we can kill them.
447 * Wait for inflight requests to drain.
449 if (!list_empty(&set->set_requests))
450 ptlrpc_set_wait(set);
451 lu_context_fini(&env.le_ctx);
452 lu_context_fini(env.le_ses);
454 complete(&pc->pc_finishing);
459 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
460 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
461 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
462 * CPU core. But binding all ptlrpcd threads maybe cause response delay
463 * because of some CPU core(s) busy with other loads.
465 * For example: "ls -l", some async RPCs for statahead are assigned to
466 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
467 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
468 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
469 * such case, the statahead async RPCs can not be processed in time, it is
470 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
471 * be better. But it breaks former data transfer policy.
473 * So we shouldn't be blind for avoiding the data transfer. We make some
474 * compromise: divide the ptlrpcd threds pool into two parts. One part is
475 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
476 * core. The other part is for free mode, all the ptlrpcd threads in the
477 * part can be scheduled on any CPU core. We specify some partnership
478 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
479 * and the async RPC load within the partners are shared.
481 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
482 * thread can be scheduled in time), and try to guarantee the async RPC
483 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
486 * As for how to specify the partnership between bound mode ptlrpcd
487 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
488 * <free bound> pair. In future, we can specify some more complex
489 * partnership based on the patches for CPU partition. But before such
490 * patches are available, we prefer to use the simplest one.
492 # ifdef CFS_CPU_MODE_NUMA
493 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
495 static int ptlrpcd_bind(int index, int max)
497 struct ptlrpcd_ctl *pc;
499 #if defined(CONFIG_NUMA)
504 LASSERT(index <= max - 1);
505 pc = &ptlrpcds->pd_threads[index];
506 switch (ptlrpcd_bind_policy) {
507 case PDB_POLICY_NONE:
508 pc->pc_npartners = -1;
510 case PDB_POLICY_FULL:
511 pc->pc_npartners = 0;
512 set_bit(LIOD_BIND, &pc->pc_flags);
514 case PDB_POLICY_PAIR:
515 LASSERT(max % 2 == 0);
516 pc->pc_npartners = 1;
518 case PDB_POLICY_NEIGHBOR:
519 #if defined(CONFIG_NUMA)
522 cpumask_copy(&mask, cpumask_of_node(cpu_to_node(index)));
523 for (i = max; i < num_online_cpus(); i++)
524 cpumask_clear_cpu(i, &mask);
525 pc->pc_npartners = cpumask_weight(&mask) - 1;
526 set_bit(LIOD_BIND, &pc->pc_flags);
530 pc->pc_npartners = 2;
534 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
538 if (rc == 0 && pc->pc_npartners > 0) {
539 OBD_ALLOC(pc->pc_partners,
540 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
541 if (pc->pc_partners == NULL) {
542 pc->pc_npartners = 0;
545 switch (ptlrpcd_bind_policy) {
546 case PDB_POLICY_PAIR:
548 set_bit(LIOD_BIND, &pc->pc_flags);
549 pc->pc_partners[0] = &ptlrpcds->
550 pd_threads[index - 1];
551 ptlrpcds->pd_threads[index - 1].
555 case PDB_POLICY_NEIGHBOR:
556 #if defined(CONFIG_NUMA)
558 struct ptlrpcd_ctl *ppc;
560 /* partners are cores in the same NUMA node.
561 * setup partnership only with ptlrpcd threads
562 * that are already initialized
564 for (pidx = 0, i = 0; i < index; i++) {
565 if (cpumask_test_cpu(i, &mask)) {
566 ppc = &ptlrpcds->pd_threads[i];
567 pc->pc_partners[pidx++] = ppc;
568 ppc->pc_partners[ppc->
569 pc_npartners++] = pc;
572 /* adjust number of partners to the number
573 * of partnership really setup */
574 pc->pc_npartners = pidx;
578 set_bit(LIOD_BIND, &pc->pc_flags);
580 pc->pc_partners[0] = &ptlrpcds->
581 pd_threads[index - 1];
582 ptlrpcds->pd_threads[index - 1].
584 if (index == max - 1) {
586 &ptlrpcds->pd_threads[0];
587 ptlrpcds->pd_threads[0].
601 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
607 * Do not allow start second thread for one pc.
609 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
610 CWARN("Starting second thread (%s) for same pc %p\n",
615 pc->pc_index = index;
616 init_completion(&pc->pc_starting);
617 init_completion(&pc->pc_finishing);
618 spin_lock_init(&pc->pc_lock);
619 strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
620 pc->pc_set = ptlrpc_prep_set();
621 if (pc->pc_set == NULL)
622 GOTO(out, rc = -ENOMEM);
625 * So far only "client" ptlrpcd uses an environment. In the future,
626 * ptlrpcd thread (or a thread-set) has to be given an argument,
627 * describing its "scope".
629 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
634 struct task_struct *task;
636 rc = ptlrpcd_bind(index, max);
641 task = kthread_run(ptlrpcd, pc, pc->pc_name);
643 GOTO(out_env, rc = PTR_ERR(task));
645 wait_for_completion(&pc->pc_starting);
650 lu_context_fini(&pc->pc_env.le_ctx);
653 if (pc->pc_set != NULL) {
654 struct ptlrpc_request_set *set = pc->pc_set;
656 spin_lock(&pc->pc_lock);
658 spin_unlock(&pc->pc_lock);
659 ptlrpc_set_destroy(set);
661 clear_bit(LIOD_BIND, &pc->pc_flags);
663 clear_bit(LIOD_START, &pc->pc_flags);
667 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
671 if (!test_bit(LIOD_START, &pc->pc_flags)) {
672 CWARN("Thread for pc %p was not started\n", pc);
676 set_bit(LIOD_STOP, &pc->pc_flags);
678 set_bit(LIOD_FORCE, &pc->pc_flags);
679 wake_up(&pc->pc_set->set_waitq);
685 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
687 struct ptlrpc_request_set *set = pc->pc_set;
690 if (!test_bit(LIOD_START, &pc->pc_flags)) {
691 CWARN("Thread for pc %p was not started\n", pc);
695 wait_for_completion(&pc->pc_finishing);
696 lu_context_fini(&pc->pc_env.le_ctx);
698 spin_lock(&pc->pc_lock);
700 spin_unlock(&pc->pc_lock);
701 ptlrpc_set_destroy(set);
703 clear_bit(LIOD_START, &pc->pc_flags);
704 clear_bit(LIOD_STOP, &pc->pc_flags);
705 clear_bit(LIOD_FORCE, &pc->pc_flags);
706 clear_bit(LIOD_BIND, &pc->pc_flags);
709 if (pc->pc_npartners > 0) {
710 LASSERT(pc->pc_partners != NULL);
712 OBD_FREE(pc->pc_partners,
713 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
714 pc->pc_partners = NULL;
716 pc->pc_npartners = 0;
720 static void ptlrpcd_fini(void)
725 if (ptlrpcds != NULL) {
726 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
727 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
728 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
729 ptlrpcd_free(&ptlrpcds->pd_threads[i]);
730 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
731 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
732 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
739 static int ptlrpcd_init(void)
741 int nthreads = num_online_cpus();
743 int size, i = -1, j, rc = 0;
746 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
747 nthreads = max_ptlrpcds;
750 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
751 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
752 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
753 nthreads &= ~1; /* make sure it is even */
755 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
756 OBD_ALLOC(ptlrpcds, size);
757 if (ptlrpcds == NULL)
758 GOTO(out, rc = -ENOMEM);
760 snprintf(name, 15, "ptlrpcd_rcv");
761 set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
762 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
766 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
767 * non-recovery async RPC to improve overall async RPC efficiency.
769 * But there are some issues with async I/O RPCs and async non-I/O
770 * RPCs processed in the same set under some cases. The ptlrpcd may
771 * be blocked by some async I/O RPC(s), then will cause other async
772 * non-I/O RPC(s) can not be processed in time.
774 * Maybe we should distinguish blocked async RPCs from non-blocked
775 * async RPCs, and process them in different ptlrpcd sets to avoid
776 * unnecessary dependency. But how to distribute async RPCs load
777 * among all the ptlrpc daemons becomes another trouble. */
778 for (i = 0; i < nthreads; i++) {
779 snprintf(name, 15, "ptlrpcd_%d", i);
780 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
785 ptlrpcds->pd_size = size;
786 ptlrpcds->pd_index = 0;
787 ptlrpcds->pd_nthreads = nthreads;
790 if (rc != 0 && ptlrpcds != NULL) {
791 for (j = 0; j <= i; j++)
792 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
793 for (j = 0; j <= i; j++)
794 ptlrpcd_free(&ptlrpcds->pd_threads[j]);
795 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
796 ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
797 OBD_FREE(ptlrpcds, size);
804 int ptlrpcd_addref(void)
809 mutex_lock(&ptlrpcd_mutex);
810 if (++ptlrpcd_users == 1) {
815 mutex_unlock(&ptlrpcd_mutex);
818 EXPORT_SYMBOL(ptlrpcd_addref);
820 void ptlrpcd_decref(void)
822 mutex_lock(&ptlrpcd_mutex);
823 if (--ptlrpcd_users == 0)
825 mutex_unlock(&ptlrpcd_mutex);
827 EXPORT_SYMBOL(ptlrpcd_decref);