4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/ptlrpcd.c
39 /** \defgroup ptlrpcd PortalRPC daemon
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
56 #define DEBUG_SUBSYSTEM S_RPC
58 #include <linux/kthread.h>
59 #include <libcfs/libcfs.h>
60 #include <lustre_net.h>
61 #include <lustre_lib.h>
62 #include <lustre_ha.h>
63 #include <obd_class.h> /* for obd_zombie */
64 #include <obd_support.h> /* for OBD_FAIL_CHECK */
65 #include <cl_object.h> /* cl_env_{get,put}() */
66 #include <lprocfs_status.h>
68 #include "ptlrpc_internal.h"
70 /* One of these per CPT. */
78 struct ptlrpcd_ctl pd_threads[0];
82 * max_ptlrpcds is obsolete, but retained to ensure that the kernel
83 * module will load on a system where it has been tuned.
84 * A value other than 0 implies it was tuned, in which case the value
85 * is used to derive a setting for ptlrpcd_per_cpt_max.
87 static int max_ptlrpcds;
88 module_param(max_ptlrpcds, int, 0644);
89 MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
92 * ptlrpcd_bind_policy is obsolete, but retained to ensure that
93 * the kernel module will load on a system where it has been tuned.
94 * A value other than 0 implies it was tuned, in which case the value
95 * is used to derive a setting for ptlrpcd_partner_group_size.
97 static int ptlrpcd_bind_policy;
98 module_param(ptlrpcd_bind_policy, int, 0644);
99 MODULE_PARM_DESC(ptlrpcd_bind_policy,
100 "Ptlrpcd threads binding mode (obsolete).");
103 * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
106 static int ptlrpcd_per_cpt_max;
107 MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
108 "Max ptlrpcd thread count to be started per cpt.");
111 * ptlrpcd_partner_group_size: The desired number of threads in each
112 * ptlrpcd partner thread group. Default is 2, corresponding to the
113 * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
114 * a CPT partners of each other.
116 static int ptlrpcd_partner_group_size;
117 module_param(ptlrpcd_partner_group_size, int, 0644);
118 MODULE_PARM_DESC(ptlrpcd_partner_group_size,
119 "Number of ptlrpcd threads in a partner group.");
122 * ptlrpcd_cpts: A CPT string describing the CPU partitions that
123 * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
124 * a subset of all CPTs.
128 * run ptlrpcd threads only on CPT 2.
132 * run ptlrpcd threads on CPTs 0, 1, 2, and 3.
134 * ptlrpcd_cpts=[0-3,5,7]
135 * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
137 static char *ptlrpcd_cpts;
138 module_param(ptlrpcd_cpts, charp, 0644);
139 MODULE_PARM_DESC(ptlrpcd_cpts,
140 "CPU partitions ptlrpcd threads should run in");
142 /* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
143 static int *ptlrpcds_cpt_idx;
145 /* ptlrpcds_num is the number of entries in the ptlrpcds array. */
146 static int ptlrpcds_num;
147 static struct ptlrpcd **ptlrpcds;
150 * In addition to the regular thread pool above, there is a single
151 * global recovery thread. Recovery isn't critical for performance,
152 * and doesn't block, but must always be able to proceed, and it is
153 * possible that all normal ptlrpcd threads are blocked. Hence the
154 * need for a dedicated thread.
156 static struct ptlrpcd_ctl ptlrpcd_rcv;
158 struct mutex ptlrpcd_mutex;
159 static int ptlrpcd_users = 0;
161 void ptlrpcd_wake(struct ptlrpc_request *req)
163 struct ptlrpc_request_set *set = req->rq_set;
165 LASSERT(set != NULL);
166 wake_up(&set->set_waitq);
168 EXPORT_SYMBOL(ptlrpcd_wake);
170 static struct ptlrpcd_ctl *
171 ptlrpcd_select_pc(struct ptlrpc_request *req)
177 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
180 cpt = cfs_cpt_current(cfs_cpt_table, 1);
181 if (ptlrpcds_cpt_idx == NULL)
184 idx = ptlrpcds_cpt_idx[cpt];
187 /* We do not care whether it is strict load balance. */
189 if (++idx == pd->pd_nthreads)
193 return &pd->pd_threads[idx];
197 * Move all request from an existing request set to the ptlrpcd queue.
198 * All requests from the set must be in phase RQ_PHASE_NEW.
200 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
202 struct list_head *tmp, *pos;
203 struct ptlrpcd_ctl *pc;
204 struct ptlrpc_request_set *new;
207 pc = ptlrpcd_select_pc(NULL);
210 list_for_each_safe(pos, tmp, &set->set_requests) {
211 struct ptlrpc_request *req =
212 list_entry(pos, struct ptlrpc_request,
215 LASSERT(req->rq_phase == RQ_PHASE_NEW);
217 req->rq_queued_time = cfs_time_current();
220 spin_lock(&new->set_new_req_lock);
221 list_splice_init(&set->set_requests, &new->set_new_requests);
222 i = atomic_read(&set->set_remaining);
223 count = atomic_add_return(i, &new->set_new_count);
224 atomic_set(&set->set_remaining, 0);
225 spin_unlock(&new->set_new_req_lock);
227 wake_up(&new->set_waitq);
229 /* XXX: It maybe unnecessary to wakeup all the partners. But to
230 * guarantee the async RPC can be processed ASAP, we have
231 * no other better choice. It maybe fixed in future. */
232 for (i = 0; i < pc->pc_npartners; i++)
233 wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
238 * Return transferred RPCs count.
240 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
241 struct ptlrpc_request_set *src)
243 struct list_head *tmp, *pos;
244 struct ptlrpc_request *req;
247 spin_lock(&src->set_new_req_lock);
248 if (likely(!list_empty(&src->set_new_requests))) {
249 list_for_each_safe(pos, tmp, &src->set_new_requests) {
250 req = list_entry(pos, struct ptlrpc_request,
254 list_splice_init(&src->set_new_requests,
256 rc = atomic_read(&src->set_new_count);
257 atomic_add(rc, &des->set_remaining);
258 atomic_set(&src->set_new_count, 0);
260 spin_unlock(&src->set_new_req_lock);
265 * Requests that are added to the ptlrpcd queue are sent via
266 * ptlrpcd_check->ptlrpc_check_set().
268 void ptlrpcd_add_req(struct ptlrpc_request *req)
270 struct ptlrpcd_ctl *pc;
273 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
275 spin_lock(&req->rq_lock);
276 if (req->rq_invalid_rqset) {
277 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
278 back_to_sleep, NULL);
280 req->rq_invalid_rqset = 0;
281 spin_unlock(&req->rq_lock);
282 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
283 } else if (req->rq_set) {
284 /* If we have a vaid "rq_set", just reuse it to avoid double
286 LASSERT(req->rq_phase == RQ_PHASE_NEW);
287 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
289 /* ptlrpc_check_set will decrease the count */
290 atomic_inc(&req->rq_set->set_remaining);
291 spin_unlock(&req->rq_lock);
292 wake_up(&req->rq_set->set_waitq);
295 spin_unlock(&req->rq_lock);
298 pc = ptlrpcd_select_pc(req);
300 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
301 req, pc->pc_name, pc->pc_index);
303 ptlrpc_set_add_new_req(pc, req);
305 EXPORT_SYMBOL(ptlrpcd_add_req);
307 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
309 atomic_inc(&set->set_refcount);
313 * Check if there is more work to do on ptlrpcd set.
316 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
318 struct list_head *tmp, *pos;
319 struct ptlrpc_request *req;
320 struct ptlrpc_request_set *set = pc->pc_set;
325 if (atomic_read(&set->set_new_count)) {
326 spin_lock(&set->set_new_req_lock);
327 if (likely(!list_empty(&set->set_new_requests))) {
328 list_splice_init(&set->set_new_requests,
330 atomic_add(atomic_read(&set->set_new_count),
331 &set->set_remaining);
332 atomic_set(&set->set_new_count, 0);
334 * Need to calculate its timeout.
338 spin_unlock(&set->set_new_req_lock);
341 /* We should call lu_env_refill() before handling new requests to make
342 * sure that env key the requests depending on really exists.
344 rc2 = lu_env_refill(env);
347 * XXX This is very awkward situation, because
348 * execution can neither continue (request
349 * interpreters assume that env is set up), nor repeat
350 * the loop (as this potentially results in a tight
351 * loop of -ENOMEM's).
353 * Fortunately, refill only ever does something when
354 * new modules are loaded, i.e., early during boot up.
356 CERROR("Failure to refill session: %d\n", rc2);
360 if (atomic_read(&set->set_remaining))
361 rc |= ptlrpc_check_set(env, set);
363 /* NB: ptlrpc_check_set has already moved complted request at the
364 * head of seq::set_requests */
365 list_for_each_safe(pos, tmp, &set->set_requests) {
366 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
367 if (req->rq_phase != RQ_PHASE_COMPLETE)
370 list_del_init(&req->rq_set_chain);
372 ptlrpc_req_finished(req);
377 * If new requests have been added, make sure to wake up.
379 rc = atomic_read(&set->set_new_count);
381 /* If we have nothing to do, check whether we can take some
382 * work from our partner threads. */
383 if (rc == 0 && pc->pc_npartners > 0) {
384 struct ptlrpcd_ctl *partner;
385 struct ptlrpc_request_set *ps;
386 int first = pc->pc_cursor;
389 partner = pc->pc_partners[pc->pc_cursor++];
390 if (pc->pc_cursor >= pc->pc_npartners)
395 spin_lock(&partner->pc_lock);
396 ps = partner->pc_set;
398 spin_unlock(&partner->pc_lock);
402 ptlrpc_reqset_get(ps);
403 spin_unlock(&partner->pc_lock);
405 if (atomic_read(&ps->set_new_count)) {
406 rc = ptlrpcd_steal_rqset(set, ps);
408 CDEBUG(D_RPCTRACE, "transfer %d"
409 " async RPCs [%d->%d]\n",
410 rc, partner->pc_index,
413 ptlrpc_reqset_put(ps);
414 } while (rc == 0 && pc->pc_cursor != first);
422 * Main ptlrpcd thread.
423 * ptlrpc's code paths like to execute in process context, so we have this
424 * thread which spins on a set which contains the rpcs and sends them.
427 static int ptlrpcd(void *arg)
429 struct ptlrpcd_ctl *pc = arg;
430 struct ptlrpc_request_set *set;
431 struct lu_context ses = { 0 };
432 struct lu_env env = { .le_ses = &ses };
439 if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
440 CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
443 * Allocate the request set after the thread has been bound
444 * above. This is safe because no requests will be queued
445 * until all ptlrpcd threads have confirmed that they have
446 * successfully started.
448 set = ptlrpc_prep_set();
450 GOTO(failed, rc = -ENOMEM);
451 spin_lock(&pc->pc_lock);
453 spin_unlock(&pc->pc_lock);
455 /* Both client and server (MDT/OST) may use the environment. */
456 rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD |
463 rc = lu_context_init(env.le_ses, LCT_SESSION |
467 lu_context_fini(&env.le_ctx);
471 complete(&pc->pc_starting);
474 * This mainloop strongly resembles ptlrpc_set_wait() except that our
475 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
476 * there are requests in the set. New requests come in on the set's
477 * new_req_list and ptlrpcd_check() moves them into the set.
480 struct l_wait_info lwi;
483 timeout = ptlrpc_set_next_timeout(set);
484 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
485 ptlrpc_expired_set, set);
487 lu_context_enter(&env.le_ctx);
488 lu_context_enter(env.le_ses);
489 l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
490 lu_context_exit(&env.le_ctx);
491 lu_context_exit(env.le_ses);
494 * Abort inflight rpcs for forced stop case.
496 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
497 if (test_bit(LIOD_FORCE, &pc->pc_flags))
498 ptlrpc_abort_set(set);
503 * Let's make one more loop to make sure that ptlrpcd_check()
504 * copied all raced new rpcs into the set so we can kill them.
509 * Wait for inflight requests to drain.
511 if (!list_empty(&set->set_requests))
512 ptlrpc_set_wait(set);
513 lu_context_fini(&env.le_ctx);
514 lu_context_fini(env.le_ses);
516 complete(&pc->pc_finishing);
522 complete(&pc->pc_starting);
526 static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
530 pc->pc_index = index;
532 init_completion(&pc->pc_starting);
533 init_completion(&pc->pc_finishing);
534 spin_lock_init(&pc->pc_lock);
537 /* Recovery thread. */
538 snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
540 /* Regular thread. */
541 snprintf(pc->pc_name, sizeof(pc->pc_name),
542 "ptlrpcd_%02d_%02d", cpt, index);
548 /* XXX: We want multiple CPU cores to share the async RPC load. So we
549 * start many ptlrpcd threads. We also want to reduce the ptlrpcd
550 * overhead caused by data transfer cross-CPU cores. So we bind
551 * all ptlrpcd threads to a CPT, in the expectation that CPTs
552 * will be defined in a way that matches these boundaries. Within
553 * a CPT a ptlrpcd thread can be scheduled on any available core.
555 * Each ptlrpcd thread has its own request queue. This can cause
556 * response delay if the thread is already busy. To help with
557 * this we define partner threads: these are other threads bound
558 * to the same CPT which will check for work in each other's
559 * request queues if they have no work to do.
561 * The desired number of partner threads can be tuned by setting
562 * ptlrpcd_partner_group_size. The default is to create pairs of
565 static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
567 struct ptlrpcd_ctl *pc;
568 struct ptlrpcd_ctl **ppc;
574 LASSERT(index >= 0 && index < pd->pd_nthreads);
575 pc = &pd->pd_threads[index];
576 pc->pc_npartners = pd->pd_groupsize - 1;
578 if (pc->pc_npartners <= 0)
581 OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_table, pc->pc_cpt,
582 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
583 if (pc->pc_partners == NULL) {
584 pc->pc_npartners = 0;
585 GOTO(out, rc = -ENOMEM);
588 first = index - index % pd->pd_groupsize;
589 ppc = pc->pc_partners;
590 for (i = first; i < first + pd->pd_groupsize; i++) {
592 *ppc++ = &pd->pd_threads[i];
598 int ptlrpcd_start(struct ptlrpcd_ctl *pc)
600 struct task_struct *task;
605 * Do not allow starting a second thread for one pc.
607 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
608 CWARN("Starting second thread (%s) for same pc %p\n",
614 * So far only "client" ptlrpcd uses an environment. In the future,
615 * ptlrpcd thread (or a thread-set) has to be given an argument,
616 * describing its "scope".
618 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
622 task = kthread_run(ptlrpcd, pc, pc->pc_name);
624 GOTO(out_set, rc = PTR_ERR(task));
626 wait_for_completion(&pc->pc_starting);
634 if (pc->pc_set != NULL) {
635 struct ptlrpc_request_set *set = pc->pc_set;
637 spin_lock(&pc->pc_lock);
639 spin_unlock(&pc->pc_lock);
640 ptlrpc_set_destroy(set);
642 lu_context_fini(&pc->pc_env.le_ctx);
644 clear_bit(LIOD_START, &pc->pc_flags);
648 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
652 if (!test_bit(LIOD_START, &pc->pc_flags)) {
653 CWARN("Thread for pc %p was not started\n", pc);
657 set_bit(LIOD_STOP, &pc->pc_flags);
659 set_bit(LIOD_FORCE, &pc->pc_flags);
660 wake_up(&pc->pc_set->set_waitq);
666 void ptlrpcd_free(struct ptlrpcd_ctl *pc)
668 struct ptlrpc_request_set *set = pc->pc_set;
671 if (!test_bit(LIOD_START, &pc->pc_flags)) {
672 CWARN("Thread for pc %p was not started\n", pc);
676 wait_for_completion(&pc->pc_finishing);
677 lu_context_fini(&pc->pc_env.le_ctx);
679 spin_lock(&pc->pc_lock);
681 spin_unlock(&pc->pc_lock);
682 ptlrpc_set_destroy(set);
684 clear_bit(LIOD_START, &pc->pc_flags);
685 clear_bit(LIOD_STOP, &pc->pc_flags);
686 clear_bit(LIOD_FORCE, &pc->pc_flags);
689 if (pc->pc_npartners > 0) {
690 LASSERT(pc->pc_partners != NULL);
692 OBD_FREE(pc->pc_partners,
693 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
694 pc->pc_partners = NULL;
696 pc->pc_npartners = 0;
701 static void ptlrpcd_fini(void)
708 if (ptlrpcds != NULL) {
709 for (i = 0; i < ptlrpcds_num; i++) {
710 if (ptlrpcds[i] == NULL)
712 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
713 ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
714 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
715 ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
716 OBD_FREE(ptlrpcds[i], ptlrpcds[i]->pd_size);
719 OBD_FREE(ptlrpcds, sizeof(ptlrpcds[0]) * ptlrpcds_num);
723 ptlrpcd_stop(&ptlrpcd_rcv, 0);
724 ptlrpcd_free(&ptlrpcd_rcv);
726 if (ptlrpcds_cpt_idx != NULL) {
727 ncpts = cfs_cpt_number(cfs_cpt_table);
728 OBD_FREE(ptlrpcds_cpt_idx, ncpts * sizeof(ptlrpcds_cpt_idx[0]));
729 ptlrpcds_cpt_idx = NULL;
735 static int ptlrpcd_init(void)
743 struct cfs_cpt_table *cptable;
751 * Determine the CPTs that ptlrpcd threads will run on.
753 cptable = cfs_cpt_table;
754 ncpts = cfs_cpt_number(cptable);
755 if (ptlrpcd_cpts != NULL) {
756 struct cfs_expr_list *el;
758 size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
759 OBD_ALLOC(ptlrpcds_cpt_idx, size);
760 if (ptlrpcds_cpt_idx == NULL)
761 GOTO(out, rc = -ENOMEM);
763 rc = cfs_expr_list_parse(ptlrpcd_cpts,
764 strlen(ptlrpcd_cpts),
767 CERROR("%s: invalid CPT pattern string: %s",
768 "ptlrpcd_cpts", ptlrpcd_cpts);
769 GOTO(out, rc = -EINVAL);
772 rc = cfs_expr_list_values(el, ncpts, &cpts);
773 cfs_expr_list_free(el);
775 CERROR("%s: failed to parse CPT array %s: %d\n",
776 "ptlrpcd_cpts", ptlrpcd_cpts, rc);
783 * Create the cpt-to-index map. When there is no match
784 * in the cpt table, pick a cpt at random. This could
785 * be changed to take the topology of the system into
788 for (cpt = 0; cpt < ncpts; cpt++) {
789 for (i = 0; i < rc; i++)
794 ptlrpcds_cpt_idx[cpt] = i;
797 cfs_expr_list_values_free(cpts, rc);
800 ptlrpcds_num = ncpts;
802 size = ncpts * sizeof(ptlrpcds[0]);
803 OBD_ALLOC(ptlrpcds, size);
804 if (ptlrpcds == NULL)
805 GOTO(out, rc = -ENOMEM);
808 * The max_ptlrpcds parameter is obsolete, but do something
809 * sane if it has been tuned, and complain if
810 * ptlrpcd_per_cpt_max has also been tuned.
812 if (max_ptlrpcds != 0) {
813 CWARN("max_ptlrpcds is obsolete.\n");
814 if (ptlrpcd_per_cpt_max == 0) {
815 ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
816 /* Round up if there is a remainder. */
817 if (max_ptlrpcds % ncpts != 0)
818 ptlrpcd_per_cpt_max++;
819 CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
820 ptlrpcd_per_cpt_max);
822 CWARN("ptlrpd_per_cpt_max is also set!\n");
827 * The ptlrpcd_bind_policy parameter is obsolete, but do
828 * something sane if it has been tuned, and complain if
829 * ptlrpcd_partner_group_size is also tuned.
831 if (ptlrpcd_bind_policy != 0) {
832 CWARN("ptlrpcd_bind_policy is obsolete.\n");
833 if (ptlrpcd_partner_group_size == 0) {
834 switch (ptlrpcd_bind_policy) {
835 case 1: /* PDB_POLICY_NONE */
836 case 2: /* PDB_POLICY_FULL */
837 ptlrpcd_partner_group_size = 1;
839 case 3: /* PDB_POLICY_PAIR */
840 ptlrpcd_partner_group_size = 2;
842 case 4: /* PDB_POLICY_NEIGHBOR */
844 ptlrpcd_partner_group_size = -1; /* CPT */
846 ptlrpcd_partner_group_size = 3; /* Triplets */
849 default: /* Illegal value, use the default. */
850 ptlrpcd_partner_group_size = 2;
853 CWARN("Setting ptlrpcd_partner_group_size = %d\n",
854 ptlrpcd_partner_group_size);
856 CWARN("ptlrpcd_partner_group_size is also set!\n");
860 if (ptlrpcd_partner_group_size == 0)
861 ptlrpcd_partner_group_size = 2;
862 else if (ptlrpcd_partner_group_size < 0)
863 ptlrpcd_partner_group_size = -1;
864 else if (ptlrpcd_per_cpt_max > 0 &&
865 ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
866 ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
869 * Start the recovery thread first.
871 set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
872 ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
873 rc = ptlrpcd_start(&ptlrpcd_rcv);
877 for (i = 0; i < ncpts; i++) {
883 nthreads = cfs_cpt_weight(cptable, cpt);
884 if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
885 nthreads = ptlrpcd_per_cpt_max;
889 if (ptlrpcd_partner_group_size <= 0) {
890 groupsize = nthreads;
891 } else if (nthreads <= ptlrpcd_partner_group_size) {
892 groupsize = nthreads;
894 groupsize = ptlrpcd_partner_group_size;
895 if (nthreads % groupsize != 0)
896 nthreads += groupsize - (nthreads % groupsize);
899 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
900 OBD_CPT_ALLOC(pd, cptable, cpt, size);
902 GOTO(out, rc = -ENOMEM);
907 pd->pd_nthreads = nthreads;
908 pd->pd_groupsize = groupsize;
912 * The ptlrpcd threads in a partner group can access
913 * each other's struct ptlrpcd_ctl, so these must be
914 * initialized before any thead is started.
916 for (j = 0; j < nthreads; j++) {
917 ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
918 rc = ptlrpcd_partners(pd, j);
923 /* XXX: We start nthreads ptlrpc daemons on this cpt.
924 * Each of them can process any non-recovery
925 * async RPC to improve overall async RPC
928 * But there are some issues with async I/O RPCs
929 * and async non-I/O RPCs processed in the same
930 * set under some cases. The ptlrpcd may be
931 * blocked by some async I/O RPC(s), then will
932 * cause other async non-I/O RPC(s) can not be
935 * Maybe we should distinguish blocked async RPCs
936 * from non-blocked async RPCs, and process them
937 * in different ptlrpcd sets to avoid unnecessary
938 * dependency. But how to distribute async RPCs
939 * load among all the ptlrpc daemons becomes
942 for (j = 0; j < nthreads; j++) {
943 rc = ptlrpcd_start(&pd->pd_threads[j]);
955 int ptlrpcd_addref(void)
960 mutex_lock(&ptlrpcd_mutex);
961 if (++ptlrpcd_users == 1) {
966 mutex_unlock(&ptlrpcd_mutex);
969 EXPORT_SYMBOL(ptlrpcd_addref);
971 void ptlrpcd_decref(void)
973 mutex_lock(&ptlrpcd_mutex);
974 if (--ptlrpcd_users == 0)
976 mutex_unlock(&ptlrpcd_mutex);
978 EXPORT_SYMBOL(ptlrpcd_decref);