4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
59 # include <libcfs/libcfs.h>
60 #else /* __KERNEL__ */
61 # include <liblustre.h>
65 #include <lustre_net.h>
66 # include <lustre_lib.h>
68 #include <lustre_ha.h>
69 #include <obd_class.h> /* for obd_zombie */
70 #include <obd_support.h> /* for OBD_FAIL_CHECK */
71 #include <cl_object.h> /* cl_env_{get,put}() */
72 #include <lprocfs_status.h>
74 #include "ptlrpc_internal.h"
80 struct ptlrpcd_ctl pd_thread_rcv;
81 struct ptlrpcd_ctl pd_threads[0];
85 static int max_ptlrpcds;
86 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
87 "Max ptlrpcd thread count to be started.");
89 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
90 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
91 "Ptlrpcd threads binding mode.");
93 static struct ptlrpcd *ptlrpcds;
95 cfs_mutex_t ptlrpcd_mutex;
96 static int ptlrpcd_users = 0;
98 void ptlrpcd_wake(struct ptlrpc_request *req)
100 struct ptlrpc_request_set *rq_set = req->rq_set;
102 LASSERT(rq_set != NULL);
104 cfs_waitq_signal(&rq_set->set_waitq);
107 static struct ptlrpcd_ctl *
108 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
112 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
113 return &ptlrpcds->pd_thread_rcv;
117 case PDL_POLICY_SAME:
118 idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
120 case PDL_POLICY_LOCAL:
121 /* Before CPU partition patches available, process it the same
122 * as "PDL_POLICY_ROUND". */
123 # ifdef CFS_CPU_MODE_NUMA
124 # warning "fix this code to use new CPU partition APIs"
126 /* Fall through to PDL_POLICY_ROUND until the CPU
127 * CPU partition patches are available. */
129 case PDL_POLICY_PREFERRED:
130 if (index >= 0 && index < cfs_num_online_cpus()) {
131 idx = index % ptlrpcds->pd_nthreads;
134 /* Fall through to PDL_POLICY_ROUND for bad index. */
136 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
137 case PDL_POLICY_ROUND:
138 /* We do not care whether it is strict load balance. */
139 idx = ptlrpcds->pd_index + 1;
140 if (idx == cfs_smp_processor_id())
142 idx %= ptlrpcds->pd_nthreads;
143 ptlrpcds->pd_index = idx;
146 #endif /* __KERNEL__ */
148 return &ptlrpcds->pd_threads[idx];
152 * Move all request from an existing request set to the ptlrpcd queue.
153 * All requests from the set must be in phase RQ_PHASE_NEW.
155 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
157 cfs_list_t *tmp, *pos;
159 struct ptlrpcd_ctl *pc;
160 struct ptlrpc_request_set *new;
163 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
167 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
168 struct ptlrpc_request *req =
169 cfs_list_entry(pos, struct ptlrpc_request,
172 LASSERT(req->rq_phase == RQ_PHASE_NEW);
175 req->rq_queued_time = cfs_time_current();
177 cfs_list_del_init(&req->rq_set_chain);
179 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
180 cfs_atomic_dec(&set->set_remaining);
185 cfs_spin_lock(&new->set_new_req_lock);
186 cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
187 i = cfs_atomic_read(&set->set_remaining);
188 count = cfs_atomic_add_return(i, &new->set_new_count);
189 cfs_atomic_set(&set->set_remaining, 0);
190 cfs_spin_unlock(&new->set_new_req_lock);
192 cfs_waitq_signal(&new->set_waitq);
194 /* XXX: It maybe unnecessary to wakeup all the partners. But to
195 * guarantee the async RPC can be processed ASAP, we have
196 * no other better choice. It maybe fixed in future. */
197 for (i = 0; i < pc->pc_npartners; i++)
198 cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
202 EXPORT_SYMBOL(ptlrpcd_add_rqset);
206 * Return transferred RPCs count.
208 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
209 struct ptlrpc_request_set *src)
211 cfs_list_t *tmp, *pos;
212 struct ptlrpc_request *req;
215 cfs_spin_lock(&src->set_new_req_lock);
216 if (likely(!cfs_list_empty(&src->set_new_requests))) {
217 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
218 req = cfs_list_entry(pos, struct ptlrpc_request,
222 cfs_list_splice_init(&src->set_new_requests,
224 rc = cfs_atomic_read(&src->set_new_count);
225 cfs_atomic_add(rc, &des->set_remaining);
226 cfs_atomic_set(&src->set_new_count, 0);
228 cfs_spin_unlock(&src->set_new_req_lock);
234 * Requests that are added to the ptlrpcd queue are sent via
235 * ptlrpcd_check->ptlrpc_check_set().
237 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
239 struct ptlrpcd_ctl *pc;
241 cfs_spin_lock(&req->rq_lock);
242 if (req->rq_invalid_rqset) {
243 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
244 back_to_sleep, NULL);
246 req->rq_invalid_rqset = 0;
247 cfs_spin_unlock(&req->rq_lock);
248 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
249 } else if (req->rq_set) {
250 /* If we have a vaid "rq_set", just reuse it to avoid double
252 LASSERT(req->rq_phase == RQ_PHASE_NEW);
253 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
255 /* ptlrpc_check_set will decrease the count */
256 cfs_atomic_inc(&req->rq_set->set_remaining);
257 cfs_spin_unlock(&req->rq_lock);
258 cfs_waitq_signal(&req->rq_set->set_waitq);
261 cfs_spin_unlock(&req->rq_lock);
264 pc = ptlrpcd_select_pc(req, policy, idx);
266 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
267 req, pc->pc_name, pc->pc_index);
269 ptlrpc_set_add_new_req(pc, req);
272 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
274 cfs_atomic_inc(&set->set_refcount);
278 * Check if there is more work to do on ptlrpcd set.
281 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
283 cfs_list_t *tmp, *pos;
284 struct ptlrpc_request *req;
285 struct ptlrpc_request_set *set = pc->pc_set;
290 if (cfs_atomic_read(&set->set_new_count)) {
291 cfs_spin_lock(&set->set_new_req_lock);
292 if (likely(!cfs_list_empty(&set->set_new_requests))) {
293 cfs_list_splice_init(&set->set_new_requests,
295 cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
296 &set->set_remaining);
297 cfs_atomic_set(&set->set_new_count, 0);
299 * Need to calculate its timeout.
303 cfs_spin_unlock(&set->set_new_req_lock);
306 /* We should call lu_env_refill() before handling new requests to make
307 * sure that env key the requests depending on really exists.
309 rc2 = lu_env_refill(env);
312 * XXX This is very awkward situation, because
313 * execution can neither continue (request
314 * interpreters assume that env is set up), nor repeat
315 * the loop (as this potentially results in a tight
316 * loop of -ENOMEM's).
318 * Fortunately, refill only ever does something when
319 * new modules are loaded, i.e., early during boot up.
321 CERROR("Failure to refill session: %d\n", rc2);
325 if (cfs_atomic_read(&set->set_remaining))
326 rc |= ptlrpc_check_set(env, set);
328 if (!cfs_list_empty(&set->set_requests)) {
330 * XXX: our set never completes, so we prune the completed
331 * reqs after each iteration. boy could this be smarter.
333 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
334 req = cfs_list_entry(pos, struct ptlrpc_request,
336 if (req->rq_phase != RQ_PHASE_COMPLETE)
339 cfs_list_del_init(&req->rq_set_chain);
341 ptlrpc_req_finished(req);
347 * If new requests have been added, make sure to wake up.
349 rc = cfs_atomic_read(&set->set_new_count);
352 /* If we have nothing to do, check whether we can take some
353 * work from our partner threads. */
354 if (rc == 0 && pc->pc_npartners > 0) {
355 struct ptlrpcd_ctl *partner;
356 struct ptlrpc_request_set *ps;
357 int first = pc->pc_cursor;
360 partner = pc->pc_partners[pc->pc_cursor++];
361 if (pc->pc_cursor >= pc->pc_npartners)
366 cfs_spin_lock(&partner->pc_lock);
367 ps = partner->pc_set;
369 cfs_spin_unlock(&partner->pc_lock);
373 ptlrpc_reqset_get(ps);
374 cfs_spin_unlock(&partner->pc_lock);
376 if (cfs_atomic_read(&ps->set_new_count)) {
377 rc = ptlrpcd_steal_rqset(set, ps);
379 CDEBUG(D_RPCTRACE, "transfer %d"
380 " async RPCs [%d->%d]\n",
381 rc, partner->pc_index,
384 ptlrpc_reqset_put(ps);
385 } while (rc == 0 && pc->pc_cursor != first);
395 * Main ptlrpcd thread.
396 * ptlrpc's code paths like to execute in process context, so we have this
397 * thread which spins on a set which contains the rpcs and sends them.
400 static int ptlrpcd(void *arg)
402 struct ptlrpcd_ctl *pc = arg;
403 struct ptlrpc_request_set *set = pc->pc_set;
404 struct lu_env env = { .le_ses = NULL };
408 cfs_daemonize_ctxt(pc->pc_name);
409 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
410 if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
411 int index = pc->pc_index;
413 if (index >= 0 && index < cfs_num_possible_cpus()) {
414 while (!cpu_online(index)) {
415 if (++index >= cfs_num_possible_cpus())
418 cfs_set_cpus_allowed(cfs_current(),
419 node_to_cpumask(cpu_to_node(index)));
424 * XXX So far only "client" ptlrpcd uses an environment. In
425 * the future, ptlrpcd thread (or a thread-set) has to given
426 * an argument, describing its "scope".
428 rc = lu_context_init(&env.le_ctx,
429 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
430 cfs_complete(&pc->pc_starting);
436 * This mainloop strongly resembles ptlrpc_set_wait() except that our
437 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
438 * there are requests in the set. New requests come in on the set's
439 * new_req_list and ptlrpcd_check() moves them into the set.
442 struct l_wait_info lwi;
445 timeout = ptlrpc_set_next_timeout(set);
446 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
447 ptlrpc_expired_set, set);
449 lu_context_enter(&env.le_ctx);
450 l_wait_event(set->set_waitq,
451 ptlrpcd_check(&env, pc), &lwi);
452 lu_context_exit(&env.le_ctx);
455 * Abort inflight rpcs for forced stop case.
457 if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
458 if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
459 ptlrpc_abort_set(set);
464 * Let's make one more loop to make sure that ptlrpcd_check()
465 * copied all raced new rpcs into the set so we can kill them.
470 * Wait for inflight requests to drain.
472 if (!cfs_list_empty(&set->set_requests))
473 ptlrpc_set_wait(set);
474 lu_context_fini(&env.le_ctx);
475 cfs_complete(&pc->pc_finishing);
477 cfs_clear_bit(LIOD_START, &pc->pc_flags);
478 cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
479 cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
480 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
484 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
485 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
486 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
487 * CPU core. But binding all ptlrpcd threads maybe cause response delay
488 * because of some CPU core(s) busy with other loads.
490 * For example: "ls -l", some async RPCs for statahead are assigned to
491 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
492 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
493 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
494 * such case, the statahead async RPCs can not be processed in time, it is
495 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
496 * be better. But it breaks former data transfer policy.
498 * So we shouldn't be blind for avoiding the data transfer. We make some
499 * compromise: divide the ptlrpcd threds pool into two parts. One part is
500 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
501 * core. The other part is for free mode, all the ptlrpcd threads in the
502 * part can be scheduled on any CPU core. We specify some partnership
503 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
504 * and the async RPC load within the partners are shared.
506 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
507 * thread can be scheduled in time), and try to guarantee the async RPC
508 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
511 * As for how to specify the partnership between bound mode ptlrpcd
512 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
513 * <free bound> pair. In future, we can specify some more complex
514 * partnership based on the patches for CPU partition. But before such
515 * patches are available, we prefer to use the simplest one.
517 # ifdef CFS_CPU_MODE_NUMA
518 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
520 static int ptlrpcd_bind(int index, int max)
522 struct ptlrpcd_ctl *pc;
524 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
525 struct ptlrpcd_ctl *ppc;
531 LASSERT(index <= max - 1);
532 pc = &ptlrpcds->pd_threads[index];
533 switch (ptlrpcd_bind_policy) {
534 case PDB_POLICY_NONE:
535 pc->pc_npartners = -1;
537 case PDB_POLICY_FULL:
538 pc->pc_npartners = 0;
539 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
541 case PDB_POLICY_PAIR:
542 LASSERT(max % 2 == 0);
543 pc->pc_npartners = 1;
545 case PDB_POLICY_NEIGHBOR:
546 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
547 node = cpu_to_node(index);
548 mask = node_to_cpumask(node);
549 for (i = max; i < cfs_num_online_cpus(); i++)
551 pc->pc_npartners = cpus_weight(mask) - 1;
552 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
555 pc->pc_npartners = 2;
559 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
563 if (rc == 0 && pc->pc_npartners > 0) {
564 OBD_ALLOC(pc->pc_partners,
565 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
566 if (pc->pc_partners == NULL) {
567 pc->pc_npartners = 0;
570 switch (ptlrpcd_bind_policy) {
571 case PDB_POLICY_PAIR:
573 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
574 pc->pc_partners[0] = &ptlrpcds->
575 pd_threads[index - 1];
576 ptlrpcds->pd_threads[index - 1].
580 case PDB_POLICY_NEIGHBOR:
581 #if defined(CONFIG_NUMA) && defined(HAVE_NODE_TO_CPUMASK)
582 /* partners are cores in the same NUMA node.
583 * setup partnership only with ptlrpcd threads
584 * that are already initialized
586 for (pidx = 0, i = 0; i < index; i++) {
587 if (cpu_isset(i, mask)) {
588 ppc = &ptlrpcds->pd_threads[i];
589 pc->pc_partners[pidx++] = ppc;
590 ppc->pc_partners[ppc->
591 pc_npartners++] = pc;
594 /* adjust number of partners to the number
595 * of partnership really setup */
596 pc->pc_npartners = pidx;
599 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
601 pc->pc_partners[0] = &ptlrpcds->
602 pd_threads[index - 1];
603 ptlrpcds->pd_threads[index - 1].
605 if (index == max - 1) {
607 &ptlrpcds->pd_threads[0];
608 ptlrpcds->pd_threads[0].
621 #else /* !__KERNEL__ */
624 * In liblustre we do not have separate threads, so this function
625 * is called from time to time all across common code to see
626 * if something needs to be processed on ptlrpcd set.
628 int ptlrpcd_check_async_rpcs(void *arg)
630 struct ptlrpcd_ctl *pc = arg;
638 if (pc->pc_recurred == 1) {
639 rc = lu_env_refill(&pc->pc_env);
641 lu_context_enter(&pc->pc_env.le_ctx);
642 rc = ptlrpcd_check(&pc->pc_env, pc);
644 ptlrpc_expired_set(pc->pc_set);
646 * XXX: send replay requests.
648 if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
649 rc = ptlrpcd_check(&pc->pc_env, pc);
650 lu_context_exit(&pc->pc_env.le_ctx);
658 int ptlrpcd_idle(void *arg)
660 struct ptlrpcd_ctl *pc = arg;
662 return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
663 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
668 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
675 * Do not allow start second thread for one pc.
677 if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
678 CWARN("Starting second thread (%s) for same pc %p\n",
683 pc->pc_index = index;
684 cfs_init_completion(&pc->pc_starting);
685 cfs_init_completion(&pc->pc_finishing);
686 cfs_spin_lock_init(&pc->pc_lock);
687 strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
688 pc->pc_set = ptlrpc_prep_set();
689 if (pc->pc_set == NULL)
690 GOTO(out, rc = -ENOMEM);
692 * So far only "client" ptlrpcd uses an environment. In the future,
693 * ptlrpcd thread (or a thread-set) has to be given an argument,
694 * describing its "scope".
696 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
703 rc = ptlrpcd_bind(index, max);
708 rc = cfs_create_thread(ptlrpcd, pc, 0);
713 cfs_wait_for_completion(&pc->pc_starting);
715 pc->pc_wait_callback =
716 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
717 &ptlrpcd_check_async_rpcs, pc);
718 pc->pc_idle_callback =
719 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
725 if (pc->pc_set != NULL) {
726 struct ptlrpc_request_set *set = pc->pc_set;
728 cfs_spin_lock(&pc->pc_lock);
730 cfs_spin_unlock(&pc->pc_lock);
731 ptlrpc_set_destroy(set);
734 lu_context_fini(&pc->pc_env.le_ctx);
735 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
739 cfs_clear_bit(LIOD_START, &pc->pc_flags);
744 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
746 struct ptlrpc_request_set *set = pc->pc_set;
749 if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
750 CWARN("Thread for pc %p was not started\n", pc);
754 cfs_set_bit(LIOD_STOP, &pc->pc_flags);
756 cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
757 cfs_waitq_signal(&pc->pc_set->set_waitq);
759 cfs_wait_for_completion(&pc->pc_finishing);
761 liblustre_deregister_wait_callback(pc->pc_wait_callback);
762 liblustre_deregister_idle_callback(pc->pc_idle_callback);
764 lu_context_fini(&pc->pc_env.le_ctx);
766 cfs_spin_lock(&pc->pc_lock);
768 cfs_spin_unlock(&pc->pc_lock);
769 ptlrpc_set_destroy(set);
773 if (pc->pc_npartners > 0) {
774 LASSERT(pc->pc_partners != NULL);
776 OBD_FREE(pc->pc_partners,
777 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
778 pc->pc_partners = NULL;
780 pc->pc_npartners = 0;
785 static void ptlrpcd_fini(void)
790 if (ptlrpcds != NULL) {
791 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
792 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
793 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
794 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
801 static int ptlrpcd_init(void)
803 int nthreads = cfs_num_online_cpus();
805 int size, i = -1, j, rc = 0;
809 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
810 nthreads = max_ptlrpcds;
813 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
814 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
815 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
816 nthreads &= ~1; /* make sure it is even */
821 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
822 OBD_ALLOC(ptlrpcds, size);
823 if (ptlrpcds == NULL)
824 GOTO(out, rc = -ENOMEM);
826 snprintf(name, 15, "ptlrpcd_rcv");
827 cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
828 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
832 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
833 * non-recovery async RPC to improve overall async RPC efficiency.
835 * But there are some issues with async I/O RPCs and async non-I/O
836 * RPCs processed in the same set under some cases. The ptlrpcd may
837 * be blocked by some async I/O RPC(s), then will cause other async
838 * non-I/O RPC(s) can not be processed in time.
840 * Maybe we should distinguish blocked async RPCs from non-blocked
841 * async RPCs, and process them in different ptlrpcd sets to avoid
842 * unnecessary dependency. But how to distribute async RPCs load
843 * among all the ptlrpc daemons becomes another trouble. */
844 for (i = 0; i < nthreads; i++) {
845 snprintf(name, 15, "ptlrpcd_%d", i);
846 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
851 ptlrpcds->pd_size = size;
852 ptlrpcds->pd_index = 0;
853 ptlrpcds->pd_nthreads = nthreads;
856 if (rc != 0 && ptlrpcds != NULL) {
857 for (j = 0; j <= i; j++)
858 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
859 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
860 OBD_FREE(ptlrpcds, size);
867 int ptlrpcd_addref(void)
872 cfs_mutex_lock(&ptlrpcd_mutex);
873 if (++ptlrpcd_users == 1)
875 cfs_mutex_unlock(&ptlrpcd_mutex);
879 void ptlrpcd_decref(void)
881 cfs_mutex_lock(&ptlrpcd_mutex);
882 if (--ptlrpcd_users == 0)
884 cfs_mutex_unlock(&ptlrpcd_mutex);