1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/ptlrpc/ptlrpcd.c
41 /** \defgroup ptlrpcd PortalRPC daemon
43 * ptlrpcd is a special thread with its own set where other user might add
44 * requests when they don't want to wait for their completion.
45 * PtlRPCD will take care of sending such requests and then processing their
46 * replies and calling completion callbacks as necessary.
47 * The callbacks are called directly from ptlrpcd context.
48 * It is important to never significantly block (esp. on RPCs!) within such
49 * completion handler or a deadlock might occur where ptlrpcd enters some
50 * callback that attempts to send another RPC and wait for it to return,
51 * during which time ptlrpcd is completely blocked, so e.g. if import
52 * fails, recovery cannot progress because connection requests are also
58 #define DEBUG_SUBSYSTEM S_RPC
61 # include <libcfs/libcfs.h>
62 #else /* __KERNEL__ */
63 # include <liblustre.h>
67 #include <lustre_net.h>
68 # include <lustre_lib.h>
70 #include <lustre_ha.h>
71 #include <obd_class.h> /* for obd_zombie */
72 #include <obd_support.h> /* for OBD_FAIL_CHECK */
73 #include <cl_object.h> /* cl_env_{get,put}() */
74 #include <lprocfs_status.h>
76 #include "ptlrpc_internal.h"
82 struct ptlrpcd_ctl pd_thread_rcv;
83 struct ptlrpcd_ctl pd_threads[0];
87 static int max_ptlrpcds;
88 CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
89 "Max ptlrpcd thread count to be started.");
91 static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
92 CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
93 "Ptlrpcd threads binding mode.");
95 static struct ptlrpcd *ptlrpcds;
97 cfs_semaphore_t ptlrpcd_sem;
98 static int ptlrpcd_users = 0;
100 void ptlrpcd_wake(struct ptlrpc_request *req)
102 struct ptlrpc_request_set *rq_set = req->rq_set;
104 LASSERT(rq_set != NULL);
106 cfs_waitq_signal(&rq_set->set_waitq);
109 static struct ptlrpcd_ctl *
110 ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
114 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
115 return &ptlrpcds->pd_thread_rcv;
119 case PDL_POLICY_SAME:
120 idx = cfs_smp_processor_id() % ptlrpcds->pd_nthreads;
122 case PDL_POLICY_LOCAL:
123 /* Before CPU partition patches available, process it the same
124 * as "PDL_POLICY_ROUND". */
125 # ifdef CFS_CPU_MODE_NUMA
126 # warning "fix this code to use new CPU partition APIs"
128 /* Fall through to PDL_POLICY_ROUND until the CPU
129 * CPU partition patches are available. */
131 case PDL_POLICY_PREFERRED:
132 if (index >= 0 && index < cfs_num_online_cpus()) {
133 idx = index % ptlrpcds->pd_nthreads;
136 /* Fall through to PDL_POLICY_ROUND for bad index. */
138 /* Fall through to PDL_POLICY_ROUND for unknown policy. */
139 case PDL_POLICY_ROUND:
140 /* We do not care whether it is strict load balance. */
141 idx = ptlrpcds->pd_index + 1;
142 if (idx == cfs_smp_processor_id())
144 idx %= ptlrpcds->pd_nthreads;
145 ptlrpcds->pd_index = idx;
148 #endif /* __KERNEL__ */
150 return &ptlrpcds->pd_threads[idx];
154 * Move all request from an existing request set to the ptlrpcd queue.
155 * All requests from the set must be in phase RQ_PHASE_NEW.
157 void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
159 cfs_list_t *tmp, *pos;
161 struct ptlrpcd_ctl *pc;
162 struct ptlrpc_request_set *new;
165 pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
169 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
170 struct ptlrpc_request *req =
171 cfs_list_entry(pos, struct ptlrpc_request,
174 LASSERT(req->rq_phase == RQ_PHASE_NEW);
177 req->rq_queued_time = cfs_time_current();
179 cfs_list_del_init(&req->rq_set_chain);
181 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
182 cfs_atomic_dec(&set->set_remaining);
187 cfs_spin_lock(&new->set_new_req_lock);
188 cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
189 i = cfs_atomic_read(&set->set_remaining);
190 count = cfs_atomic_add_return(i, &new->set_new_count);
191 cfs_atomic_set(&set->set_remaining, 0);
192 cfs_spin_unlock(&new->set_new_req_lock);
194 cfs_waitq_signal(&new->set_waitq);
196 /* XXX: It maybe unnecessary to wakeup all the partners. But to
197 * guarantee the async RPC can be processed ASAP, we have
198 * no other better choice. It maybe fixed in future. */
199 for (i = 0; i < pc->pc_npartners; i++)
200 cfs_waitq_signal(&pc->pc_partners[i]->pc_set->set_waitq);
204 EXPORT_SYMBOL(ptlrpcd_add_rqset);
208 * Return transferred RPCs count.
210 static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
211 struct ptlrpc_request_set *src)
213 cfs_list_t *tmp, *pos;
214 struct ptlrpc_request *req;
217 cfs_spin_lock(&src->set_new_req_lock);
218 if (likely(!cfs_list_empty(&src->set_new_requests))) {
219 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
220 req = cfs_list_entry(pos, struct ptlrpc_request,
224 cfs_list_splice_init(&src->set_new_requests,
226 rc = cfs_atomic_read(&src->set_new_count);
227 cfs_atomic_add(rc, &des->set_remaining);
228 cfs_atomic_set(&src->set_new_count, 0);
230 cfs_spin_unlock(&src->set_new_req_lock);
236 * Requests that are added to the ptlrpcd queue are sent via
237 * ptlrpcd_check->ptlrpc_check_set().
239 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
241 struct ptlrpcd_ctl *pc;
243 cfs_spin_lock(&req->rq_lock);
244 if (req->rq_invalid_rqset) {
245 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
246 back_to_sleep, NULL);
248 req->rq_invalid_rqset = 0;
249 cfs_spin_unlock(&req->rq_lock);
250 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
251 } else if (req->rq_set) {
252 /* If we have a vaid "rq_set", just reuse it to avoid double
254 LASSERT(req->rq_phase == RQ_PHASE_NEW);
255 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
257 /* ptlrpc_check_set will decrease the count */
258 cfs_atomic_inc(&req->rq_set->set_remaining);
259 cfs_spin_unlock(&req->rq_lock);
260 cfs_waitq_signal(&req->rq_set->set_waitq);
263 cfs_spin_unlock(&req->rq_lock);
266 pc = ptlrpcd_select_pc(req, policy, idx);
268 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
269 req, pc->pc_name, pc->pc_index);
271 ptlrpc_set_add_new_req(pc, req);
274 static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
276 cfs_atomic_inc(&set->set_refcount);
280 * Check if there is more work to do on ptlrpcd set.
283 static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
285 cfs_list_t *tmp, *pos;
286 struct ptlrpc_request *req;
287 struct ptlrpc_request_set *set = pc->pc_set;
292 if (cfs_atomic_read(&set->set_new_count)) {
293 cfs_spin_lock(&set->set_new_req_lock);
294 if (likely(!cfs_list_empty(&set->set_new_requests))) {
295 cfs_list_splice_init(&set->set_new_requests,
297 cfs_atomic_add(cfs_atomic_read(&set->set_new_count),
298 &set->set_remaining);
299 cfs_atomic_set(&set->set_new_count, 0);
301 * Need to calculate its timeout.
305 cfs_spin_unlock(&set->set_new_req_lock);
308 /* We should call lu_env_refill() before handling new requests to make
309 * sure that env key the requests depending on really exists.
311 rc2 = lu_env_refill(env);
314 * XXX This is very awkward situation, because
315 * execution can neither continue (request
316 * interpreters assume that env is set up), nor repeat
317 * the loop (as this potentially results in a tight
318 * loop of -ENOMEM's).
320 * Fortunately, refill only ever does something when
321 * new modules are loaded, i.e., early during boot up.
323 CERROR("Failure to refill session: %d\n", rc2);
327 if (cfs_atomic_read(&set->set_remaining))
328 rc |= ptlrpc_check_set(env, set);
330 if (!cfs_list_empty(&set->set_requests)) {
332 * XXX: our set never completes, so we prune the completed
333 * reqs after each iteration. boy could this be smarter.
335 cfs_list_for_each_safe(pos, tmp, &set->set_requests) {
336 req = cfs_list_entry(pos, struct ptlrpc_request,
338 if (req->rq_phase != RQ_PHASE_COMPLETE)
341 cfs_list_del_init(&req->rq_set_chain);
343 ptlrpc_req_finished(req);
349 * If new requests have been added, make sure to wake up.
351 rc = cfs_atomic_read(&set->set_new_count);
354 /* If we have nothing to do, check whether we can take some
355 * work from our partner threads. */
356 if (rc == 0 && pc->pc_npartners > 0) {
357 struct ptlrpcd_ctl *partner;
358 struct ptlrpc_request_set *ps;
359 int first = pc->pc_cursor;
362 partner = pc->pc_partners[pc->pc_cursor++];
363 if (pc->pc_cursor >= pc->pc_npartners)
368 cfs_spin_lock(&partner->pc_lock);
369 ps = partner->pc_set;
371 cfs_spin_unlock(&partner->pc_lock);
375 ptlrpc_reqset_get(ps);
376 cfs_spin_unlock(&partner->pc_lock);
378 if (cfs_atomic_read(&ps->set_new_count)) {
379 rc = ptlrpcd_steal_rqset(set, ps);
381 CDEBUG(D_RPCTRACE, "transfer %d"
382 " async RPCs [%d->%d]\n",
383 rc, partner->pc_index,
386 ptlrpc_reqset_put(ps);
387 } while (rc == 0 && pc->pc_cursor != first);
397 * Main ptlrpcd thread.
398 * ptlrpc's code paths like to execute in process context, so we have this
399 * thread which spins on a set which contains the rpcs and sends them.
402 static int ptlrpcd(void *arg)
404 struct ptlrpcd_ctl *pc = arg;
405 struct ptlrpc_request_set *set = pc->pc_set;
406 struct lu_env env = { .le_ses = NULL };
410 cfs_daemonize_ctxt(pc->pc_name);
411 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
412 if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
413 int index = pc->pc_index;
415 if (index >= 0 && index < cfs_num_possible_cpus()) {
416 while (!cfs_cpu_online(index)) {
417 if (++index >= cfs_num_possible_cpus())
420 cfs_set_cpus_allowed(cfs_current(),
421 node_to_cpumask(cpu_to_node(index)));
426 * XXX So far only "client" ptlrpcd uses an environment. In
427 * the future, ptlrpcd thread (or a thread-set) has to given
428 * an argument, describing its "scope".
430 rc = lu_context_init(&env.le_ctx,
431 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
432 cfs_complete(&pc->pc_starting);
438 * This mainloop strongly resembles ptlrpc_set_wait() except that our
439 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
440 * there are requests in the set. New requests come in on the set's
441 * new_req_list and ptlrpcd_check() moves them into the set.
444 struct l_wait_info lwi;
447 timeout = ptlrpc_set_next_timeout(set);
448 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
449 ptlrpc_expired_set, set);
451 lu_context_enter(&env.le_ctx);
452 l_wait_event(set->set_waitq,
453 ptlrpcd_check(&env, pc), &lwi);
454 lu_context_exit(&env.le_ctx);
457 * Abort inflight rpcs for forced stop case.
459 if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
460 if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
461 ptlrpc_abort_set(set);
466 * Let's make one more loop to make sure that ptlrpcd_check()
467 * copied all raced new rpcs into the set so we can kill them.
472 * Wait for inflight requests to drain.
474 if (!cfs_list_empty(&set->set_requests))
475 ptlrpc_set_wait(set);
476 lu_context_fini(&env.le_ctx);
477 cfs_complete(&pc->pc_finishing);
479 cfs_clear_bit(LIOD_START, &pc->pc_flags);
480 cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
481 cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
482 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
486 /* XXX: We want multiple CPU cores to share the async RPC load. So we start many
487 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
488 * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
489 * CPU core. But binding all ptlrpcd threads maybe cause response delay
490 * because of some CPU core(s) busy with other loads.
492 * For example: "ls -l", some async RPCs for statahead are assigned to
493 * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
494 * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
495 * thread, statahead thread, and ptlrpcd thread can run in parallel), under
496 * such case, the statahead async RPCs can not be processed in time, it is
497 * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
498 * be better. But it breaks former data transfer policy.
500 * So we shouldn't be blind for avoiding the data transfer. We make some
501 * compromise: divide the ptlrpcd threds pool into two parts. One part is
502 * for bound mode, each ptlrpcd thread in this part is bound to some CPU
503 * core. The other part is for free mode, all the ptlrpcd threads in the
504 * part can be scheduled on any CPU core. We specify some partnership
505 * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
506 * and the async RPC load within the partners are shared.
508 * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
509 * thread can be scheduled in time), and try to guarantee the async RPC
510 * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
513 * As for how to specify the partnership between bound mode ptlrpcd
514 * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
515 * <free bound> pair. In future, we can specify some more complex
516 * partnership based on the patches for CPU partition. But before such
517 * patches are available, we prefer to use the simplest one.
519 # ifdef CFS_CPU_MODE_NUMA
520 # warning "fix ptlrpcd_bind() to use new CPU partition APIs"
522 static int ptlrpcd_bind(int index, int max)
524 struct ptlrpcd_ctl *pc;
528 LASSERT(index <= max - 1);
529 pc = &ptlrpcds->pd_threads[index];
530 switch (ptlrpcd_bind_policy) {
531 case PDB_POLICY_NONE:
532 pc->pc_npartners = -1;
534 case PDB_POLICY_FULL:
535 pc->pc_npartners = 0;
536 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
538 case PDB_POLICY_PAIR:
539 LASSERT(max % 2 == 0);
540 pc->pc_npartners = 1;
542 case PDB_POLICY_NEIGHBOR:
544 pc->pc_npartners = 2;
547 CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
551 if (rc == 0 && pc->pc_npartners > 0) {
552 OBD_ALLOC(pc->pc_partners,
553 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
554 if (pc->pc_partners == NULL) {
555 pc->pc_npartners = 0;
559 cfs_set_bit(LIOD_BIND, &pc->pc_flags);
561 switch (ptlrpcd_bind_policy) {
562 case PDB_POLICY_PAIR:
564 pc->pc_partners[0] = &ptlrpcds->
565 pd_threads[index - 1];
566 ptlrpcds->pd_threads[index - 1].
570 case PDB_POLICY_NEIGHBOR:
572 pc->pc_partners[0] = &ptlrpcds->
573 pd_threads[index - 1];
574 ptlrpcds->pd_threads[index - 1].
576 if (index == max - 1) {
578 &ptlrpcds->pd_threads[0];
579 ptlrpcds->pd_threads[0].
591 #else /* !__KERNEL__ */
594 * In liblustre we do not have separate threads, so this function
595 * is called from time to time all across common code to see
596 * if something needs to be processed on ptlrpcd set.
598 int ptlrpcd_check_async_rpcs(void *arg)
600 struct ptlrpcd_ctl *pc = arg;
608 if (pc->pc_recurred == 1) {
609 rc = lu_env_refill(&pc->pc_env);
611 lu_context_enter(&pc->pc_env.le_ctx);
612 rc = ptlrpcd_check(&pc->pc_env, pc);
614 ptlrpc_expired_set(pc->pc_set);
616 * XXX: send replay requests.
618 if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
619 rc = ptlrpcd_check(&pc->pc_env, pc);
620 lu_context_exit(&pc->pc_env.le_ctx);
628 int ptlrpcd_idle(void *arg)
630 struct ptlrpcd_ctl *pc = arg;
632 return (cfs_atomic_read(&pc->pc_set->set_new_count) == 0 &&
633 cfs_atomic_read(&pc->pc_set->set_remaining) == 0);
638 int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
645 * Do not allow start second thread for one pc.
647 if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
648 CWARN("Starting second thread (%s) for same pc %p\n",
653 pc->pc_index = index;
654 cfs_init_completion(&pc->pc_starting);
655 cfs_init_completion(&pc->pc_finishing);
656 cfs_spin_lock_init(&pc->pc_lock);
657 strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
658 pc->pc_set = ptlrpc_prep_set();
659 if (pc->pc_set == NULL)
660 GOTO(out, rc = -ENOMEM);
662 * So far only "client" ptlrpcd uses an environment. In the future,
663 * ptlrpcd thread (or a thread-set) has to be given an argument,
664 * describing its "scope".
666 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
673 rc = ptlrpcd_bind(index, max);
678 rc = cfs_create_thread(ptlrpcd, pc, 0);
683 cfs_wait_for_completion(&pc->pc_starting);
685 pc->pc_wait_callback =
686 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
687 &ptlrpcd_check_async_rpcs, pc);
688 pc->pc_idle_callback =
689 liblustre_register_idle_callback("ptlrpcd_check_idle_rpcs",
695 if (pc->pc_set != NULL) {
696 struct ptlrpc_request_set *set = pc->pc_set;
698 cfs_spin_lock(&pc->pc_lock);
700 cfs_spin_unlock(&pc->pc_lock);
701 ptlrpc_set_destroy(set);
704 lu_context_fini(&pc->pc_env.le_ctx);
705 cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
709 cfs_clear_bit(LIOD_START, &pc->pc_flags);
714 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
716 struct ptlrpc_request_set *set = pc->pc_set;
719 if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
720 CWARN("Thread for pc %p was not started\n", pc);
724 cfs_set_bit(LIOD_STOP, &pc->pc_flags);
726 cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
727 cfs_waitq_signal(&pc->pc_set->set_waitq);
729 cfs_wait_for_completion(&pc->pc_finishing);
731 liblustre_deregister_wait_callback(pc->pc_wait_callback);
732 liblustre_deregister_idle_callback(pc->pc_idle_callback);
734 lu_context_fini(&pc->pc_env.le_ctx);
736 cfs_spin_lock(&pc->pc_lock);
738 cfs_spin_unlock(&pc->pc_lock);
739 ptlrpc_set_destroy(set);
743 if (pc->pc_npartners > 0) {
744 LASSERT(pc->pc_partners != NULL);
746 OBD_FREE(pc->pc_partners,
747 sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
748 pc->pc_partners = NULL;
750 pc->pc_npartners = 0;
755 static void ptlrpcd_fini(void)
760 if (ptlrpcds != NULL) {
761 for (i = 0; i < ptlrpcds->pd_nthreads; i++)
762 ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
763 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
764 OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
771 static int ptlrpcd_init(void)
773 int nthreads = cfs_num_online_cpus();
775 int size, i = -1, j, rc = 0;
779 if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
780 nthreads = max_ptlrpcds;
783 if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
784 ptlrpcd_bind_policy = PDB_POLICY_PAIR;
785 else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
786 nthreads &= ~1; /* make sure it is even */
791 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
792 OBD_ALLOC(ptlrpcds, size);
793 if (ptlrpcds == NULL)
794 GOTO(out, rc = -ENOMEM);
796 snprintf(name, 15, "ptlrpcd_rcv");
797 cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
798 rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
802 /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
803 * non-recovery async RPC to improve overall async RPC efficiency.
805 * But there are some issues with async I/O RPCs and async non-I/O
806 * RPCs processed in the same set under some cases. The ptlrpcd may
807 * be blocked by some async I/O RPC(s), then will cause other async
808 * non-I/O RPC(s) can not be processed in time.
810 * Maybe we should distinguish blocked async RPCs from non-blocked
811 * async RPCs, and process them in different ptlrpcd sets to avoid
812 * unnecessary dependency. But how to distribute async RPCs load
813 * among all the ptlrpc daemons becomes another trouble. */
814 for (i = 0; i < nthreads; i++) {
815 snprintf(name, 15, "ptlrpcd_%d", i);
816 rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
821 ptlrpcds->pd_size = size;
822 ptlrpcds->pd_index = 0;
823 ptlrpcds->pd_nthreads = nthreads;
826 if (rc != 0 && ptlrpcds != NULL) {
827 for (j = 0; j <= i; j++)
828 ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
829 ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
830 OBD_FREE(ptlrpcds, size);
837 int ptlrpcd_addref(void)
842 cfs_mutex_down(&ptlrpcd_sem);
843 if (++ptlrpcd_users == 1)
845 cfs_mutex_up(&ptlrpcd_sem);
849 void ptlrpcd_decref(void)
851 cfs_mutex_down(&ptlrpcd_sem);
852 if (--ptlrpcd_users == 0)
854 cfs_mutex_up(&ptlrpcd_sem);