X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fptlrpcd.c;h=5b5ef375b87034e5f901a6e1f87a32ec816cba92;hp=7b56097bd1b09a1c80aeb21ab8e350cd50249e86;hb=917655fc2938b90a9c246dd2d58408c42aa1658d;hpb=e35286d8008085c04593db74dbf051a29d97a7bb diff --git a/lustre/ptlrpc/ptlrpcd.c b/lustre/ptlrpc/ptlrpcd.c index 7b56097..5b5ef37 100644 --- a/lustre/ptlrpc/ptlrpcd.c +++ b/lustre/ptlrpc/ptlrpcd.c @@ -1,243 +1,982 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (C) 2001-2003 Cluster File Systems, Inc. - * Author Peter Braam + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.lustre.org. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * GPL HEADER END + */ +/* + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2017, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. * + * lustre/ptlrpc/ptlrpcd.c + */ + +/** \defgroup ptlrpcd PortalRPC daemon + * + * ptlrpcd is a special thread with its own set where other user might add + * requests when they don't want to wait for their completion. + * PtlRPCD will take care of sending such requests and then processing their + * replies and calling completion callbacks as necessary. + * The callbacks are called directly from ptlrpcd context. + * It is important to never significantly block (esp. on RPCs!) within such + * completion handler or a deadlock might occur where ptlrpcd enters some + * callback that attempts to send another RPC and wait for it to return, + * during which time ptlrpcd is completely blocked, so e.g. if import + * fails, recovery cannot progress because connection requests are also + * sent by ptlrpcd. + * + * @{ */ #define DEBUG_SUBSYSTEM S_RPC -#ifdef __KERNEL__ -# include -# include -# include -# include -# include -# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) -# include -# include -# else -# include -# endif -#else /* __KERNEL__ */ -# include -#endif +#include +#include +#include +#include +#include +#include /* for obd_zombie */ +#include /* for OBD_FAIL_CHECK */ +#include /* cl_env_{get,put}() */ +#include + +#include "ptlrpc_internal.h" + +/* One of these per CPT. */ +struct ptlrpcd { + int pd_size; + int pd_index; + int pd_cpt; + int pd_cursor; + int pd_nthreads; + int pd_groupsize; + struct ptlrpcd_ctl pd_threads[0]; +}; + +/* + * max_ptlrpcds is obsolete, but retained to ensure that the kernel + * module will load on a system where it has been tuned. + * A value other than 0 implies it was tuned, in which case the value + * is used to derive a setting for ptlrpcd_per_cpt_max. + */ +static int max_ptlrpcds; +module_param(max_ptlrpcds, int, 0644); +MODULE_PARM_DESC(max_ptlrpcds, + "Max ptlrpcd thread count to be started (obsolete)."); + +/* + * ptlrpcd_bind_policy is obsolete, but retained to ensure that + * the kernel module will load on a system where it has been tuned. + * A value other than 0 implies it was tuned, in which case the value + * is used to derive a setting for ptlrpcd_partner_group_size. + */ +static int ptlrpcd_bind_policy; +module_param(ptlrpcd_bind_policy, int, 0644); +MODULE_PARM_DESC(ptlrpcd_bind_policy, + "Ptlrpcd threads binding mode (obsolete)."); + +/* + * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run + * in a CPT. + */ +static int ptlrpcd_per_cpt_max; +module_param(ptlrpcd_per_cpt_max, int, 0644); +MODULE_PARM_DESC(ptlrpcd_per_cpt_max, + "Max ptlrpcd thread count to be started per CPT."); + +/* + * ptlrpcd_partner_group_size: The desired number of threads in each + * ptlrpcd partner thread group. Default is 2, corresponding to the + * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in + * a CPT partners of each other. + */ +static int ptlrpcd_partner_group_size; +module_param(ptlrpcd_partner_group_size, int, 0644); +MODULE_PARM_DESC(ptlrpcd_partner_group_size, + "Number of ptlrpcd threads in a partner group."); + +/* + * ptlrpcd_cpts: A CPT string describing the CPU partitions that + * ptlrpcd threads should run on. Used to make ptlrpcd threads run on + * a subset of all CPTs. + * + * ptlrpcd_cpts=2 + * ptlrpcd_cpts=[2] + * run ptlrpcd threads only on CPT 2. + * + * ptlrpcd_cpts=0-3 + * ptlrpcd_cpts=[0-3] + * run ptlrpcd threads on CPTs 0, 1, 2, and 3. + * + * ptlrpcd_cpts=[0-3,5,7] + * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7. + */ +static char *ptlrpcd_cpts; +module_param(ptlrpcd_cpts, charp, 0644); +MODULE_PARM_DESC(ptlrpcd_cpts, + "CPU partitions ptlrpcd threads should run in"); + +/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */ +static int *ptlrpcds_cpt_idx; + +/* ptlrpcds_num is the number of entries in the ptlrpcds array. */ +static int ptlrpcds_num; +static struct ptlrpcd **ptlrpcds; + +/* + * In addition to the regular thread pool above, there is a single + * global recovery thread. Recovery isn't critical for performance, + * and doesn't block, but must always be able to proceed, and it is + * possible that all normal ptlrpcd threads are blocked. Hence the + * need for a dedicated thread. + */ +static struct ptlrpcd_ctl ptlrpcd_rcv; -#include -#include +struct mutex ptlrpcd_mutex; +static int ptlrpcd_users = 0; -#ifndef __CYGWIN__ -# include -# include -#else -# include -#endif +void ptlrpcd_wake(struct ptlrpc_request *req) +{ + struct ptlrpc_request_set *set = req->rq_set; -#include -#include /* for OBD_FAIL_CHECK */ -#include - -#define LIOD_STOP 0 -static struct ptlrpcd_ctl { - unsigned long pc_flags; - spinlock_t pc_lock; - struct completion pc_starting; - struct completion pc_finishing; - struct list_head pc_req_list; - wait_queue_head_t pc_waitq; - struct ptlrpc_request_set *pc_set; -} ptlrpcd_pc; - -static DECLARE_MUTEX(ptlrpcd_sem); -static int ptlrpcd_users = 0; + LASSERT(set != NULL); + wake_up(&set->set_waitq); +} +EXPORT_SYMBOL(ptlrpcd_wake); -void ptlrpcd_wake(void) +static struct ptlrpcd_ctl * +ptlrpcd_select_pc(struct ptlrpc_request *req) { - struct ptlrpcd_ctl *pc = &ptlrpcd_pc; - wake_up(&pc->pc_waitq); + struct ptlrpcd *pd; + int cpt; + int idx; + + if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL) + return &ptlrpcd_rcv; + + cpt = cfs_cpt_current(cfs_cpt_table, 1); + if (ptlrpcds_cpt_idx == NULL) + idx = cpt; + else + idx = ptlrpcds_cpt_idx[cpt]; + pd = ptlrpcds[idx]; + + /* We do not care whether it is strict load balance. */ + idx = pd->pd_cursor; + if (++idx == pd->pd_nthreads) + idx = 0; + pd->pd_cursor = idx; + + return &pd->pd_threads[idx]; } -void ptlrpcd_add_req(struct ptlrpc_request *req) +/** + * Move all request from an existing request set to the ptlrpcd queue. + * All requests from the set must be in phase RQ_PHASE_NEW. + */ +void ptlrpcd_add_rqset(struct ptlrpc_request_set *set) { - struct ptlrpcd_ctl *pc = &ptlrpcd_pc; + struct list_head *tmp, *pos; + struct ptlrpcd_ctl *pc; + struct ptlrpc_request_set *new; + int count, i; + + pc = ptlrpcd_select_pc(NULL); + new = pc->pc_set; + + list_for_each_safe(pos, tmp, &set->set_requests) { + struct ptlrpc_request *req = + list_entry(pos, struct ptlrpc_request, + rq_set_chain); + + LASSERT(req->rq_phase == RQ_PHASE_NEW); + req->rq_set = new; + req->rq_queued_time = ktime_get_seconds(); + } + + spin_lock(&new->set_new_req_lock); + list_splice_init(&set->set_requests, &new->set_new_requests); + i = atomic_read(&set->set_remaining); + count = atomic_add_return(i, &new->set_new_count); + atomic_set(&set->set_remaining, 0); + spin_unlock(&new->set_new_req_lock); + if (count == i) { + wake_up(&new->set_waitq); + + /* + * XXX: It maybe unnecessary to wakeup all the partners. But to + * guarantee the async RPC can be processed ASAP, we have + * no other better choice. It maybe fixed in future. + */ + for (i = 0; i < pc->pc_npartners; i++) + wake_up(&pc->pc_partners[i]->pc_set->set_waitq); + } +} - ptlrpc_set_add_new_req(pc->pc_set, req); - ptlrpcd_wake(); +/** + * Return transferred RPCs count. + */ +static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, + struct ptlrpc_request_set *src) +{ + struct list_head *tmp, *pos; + struct ptlrpc_request *req; + int rc = 0; + + spin_lock(&src->set_new_req_lock); + if (likely(!list_empty(&src->set_new_requests))) { + list_for_each_safe(pos, tmp, &src->set_new_requests) { + req = list_entry(pos, struct ptlrpc_request, + rq_set_chain); + req->rq_set = des; + } + list_splice_init(&src->set_new_requests, + &des->set_requests); + rc = atomic_read(&src->set_new_count); + atomic_add(rc, &des->set_remaining); + atomic_set(&src->set_new_count, 0); + } + spin_unlock(&src->set_new_req_lock); + return rc; } -static int ptlrpcd_check(struct ptlrpcd_ctl *pc) +/** + * Requests that are added to the ptlrpcd queue are sent via + * ptlrpcd_check->ptlrpc_check_set(). + */ +void ptlrpcd_add_req(struct ptlrpc_request *req) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; - unsigned long flags; - int rc = 0; - ENTRY; + struct ptlrpcd_ctl *pc; + + if (req->rq_reqmsg) + lustre_msg_set_jobid(req->rq_reqmsg, NULL); + + spin_lock(&req->rq_lock); + if (req->rq_invalid_rqset) { + struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5), + back_to_sleep, NULL); + + req->rq_invalid_rqset = 0; + spin_unlock(&req->rq_lock); + l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi); + } else if (req->rq_set) { + /* + * If we have a vaid "rq_set", just reuse it to avoid double + * linked. + */ + LASSERT(req->rq_phase == RQ_PHASE_NEW); + LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY); + + /* ptlrpc_check_set will decrease the count */ + atomic_inc(&req->rq_set->set_remaining); + spin_unlock(&req->rq_lock); + wake_up(&req->rq_set->set_waitq); + return; + } else { + spin_unlock(&req->rq_lock); + } + + pc = ptlrpcd_select_pc(req); + + DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s+%d]", + req, pc->pc_name, pc->pc_index); + + ptlrpc_set_add_new_req(pc, req); +} +EXPORT_SYMBOL(ptlrpcd_add_req); - if (test_bit(LIOD_STOP, &pc->pc_flags)) - RETURN(1); +static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set) +{ + atomic_inc(&set->set_refcount); +} - spin_lock_irqsave(&pc->pc_set->set_new_req_lock, flags); - list_for_each_safe(pos, tmp, &pc->pc_set->set_new_requests) { - req = list_entry(pos, struct ptlrpc_request, rq_set_chain); - list_del_init(&req->rq_set_chain); - ptlrpc_set_add_req(pc->pc_set, req); - rc = 1; /* need to calculate its timeout */ - } - spin_unlock_irqrestore(&pc->pc_set->set_new_req_lock, flags); +/** + * Check if there is more work to do on ptlrpcd set. + * Returns 1 if yes. + */ +static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) +{ + struct list_head *tmp, *pos; + struct ptlrpc_request *req; + struct ptlrpc_request_set *set = pc->pc_set; + int rc = 0; + int rc2; + + ENTRY; + + if (atomic_read(&set->set_new_count)) { + spin_lock(&set->set_new_req_lock); + if (likely(!list_empty(&set->set_new_requests))) { + list_splice_init(&set->set_new_requests, + &set->set_requests); + atomic_add(atomic_read(&set->set_new_count), + &set->set_remaining); + atomic_set(&set->set_new_count, 0); + /* + * Need to calculate its timeout. + */ + rc = 1; + } + spin_unlock(&set->set_new_req_lock); + } + + /* + * We should call lu_env_refill() before handling new requests to make + * sure that env key the requests depending on really exists. + */ + rc2 = lu_env_refill(env); + if (rc2 != 0) { + /* + * XXX This is very awkward situation, because + * execution can neither continue (request + * interpreters assume that env is set up), nor repeat + * the loop (as this potentially results in a tight + * loop of -ENOMEM's). + * + * Fortunately, refill only ever does something when + * new modules are loaded, i.e., early during boot up. + */ + CERROR("Failure to refill session: %d\n", rc2); + RETURN(rc); + } + + if (atomic_read(&set->set_remaining)) + rc |= ptlrpc_check_set(env, set); + + /* + * NB: ptlrpc_check_set has already moved complted request at the + * head of seq::set_requests + */ + list_for_each_safe(pos, tmp, &set->set_requests) { + req = list_entry(pos, struct ptlrpc_request, rq_set_chain); + if (req->rq_phase != RQ_PHASE_COMPLETE) + break; + + list_del_init(&req->rq_set_chain); + req->rq_set = NULL; + ptlrpc_req_finished(req); + } + + if (rc == 0) { + /* + * If new requests have been added, make sure to wake up. + */ + rc = atomic_read(&set->set_new_count); + + /* + * If we have nothing to do, check whether we can take some + * work from our partner threads. + */ + if (rc == 0 && pc->pc_npartners > 0) { + struct ptlrpcd_ctl *partner; + struct ptlrpc_request_set *ps; + int first = pc->pc_cursor; + + do { + partner = pc->pc_partners[pc->pc_cursor++]; + if (pc->pc_cursor >= pc->pc_npartners) + pc->pc_cursor = 0; + if (partner == NULL) + continue; + + spin_lock(&partner->pc_lock); + ps = partner->pc_set; + if (ps == NULL) { + spin_unlock(&partner->pc_lock); + continue; + } + + ptlrpc_reqset_get(ps); + spin_unlock(&partner->pc_lock); + + if (atomic_read(&ps->set_new_count)) { + rc = ptlrpcd_steal_rqset(set, ps); + if (rc > 0) + CDEBUG(D_RPCTRACE, + "transfer %d async RPCs [%d->%d]\n", + rc, partner->pc_index, + pc->pc_index); + } + ptlrpc_reqset_put(ps); + } while (rc == 0 && pc->pc_cursor != first); + } + } + + RETURN(rc || test_bit(LIOD_STOP, &pc->pc_flags)); +} - if (pc->pc_set->set_remaining) { - rc = rc | ptlrpc_check_set(pc->pc_set); +/** + * Main ptlrpcd thread. + * ptlrpc's code paths like to execute in process context, so we have this + * thread which spins on a set which contains the rpcs and sends them. + */ +static int ptlrpcd(void *arg) +{ + struct ptlrpcd_ctl *pc = arg; + struct ptlrpc_request_set *set; + struct lu_context ses = { 0 }; + struct lu_env env = { .le_ses = &ses }; + int rc = 0; + int exit = 0; + + ENTRY; + + unshare_fs_struct(); + + if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0) + CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt); + + /* + * Allocate the request set after the thread has been bound + * above. This is safe because no requests will be queued + * until all ptlrpcd threads have confirmed that they have + * successfully started. + */ + set = ptlrpc_prep_set(); + if (set == NULL) + GOTO(failed, rc = -ENOMEM); + spin_lock(&pc->pc_lock); + pc->pc_set = set; + spin_unlock(&pc->pc_lock); + + /* Both client and server (MDT/OST) may use the environment. */ + rc = lu_context_init(&env.le_ctx, LCT_MD_THREAD | + LCT_DT_THREAD | + LCT_CL_THREAD | + LCT_REMEMBER | + LCT_NOREF); + if (rc != 0) + GOTO(failed, rc); + rc = lu_context_init(env.le_ses, LCT_SESSION | + LCT_REMEMBER | + LCT_NOREF); + if (rc != 0) { + lu_context_fini(&env.le_ctx); + GOTO(failed, rc); + } + + complete(&pc->pc_starting); + + /* + * This mainloop strongly resembles ptlrpc_set_wait() except that our + * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when + * there are requests in the set. New requests come in on the set's + * new_req_list and ptlrpcd_check() moves them into the set. + */ + do { + struct l_wait_info lwi; + time64_t timeout; + + timeout = ptlrpc_set_next_timeout(set); + lwi = LWI_TIMEOUT(cfs_time_seconds(timeout), + ptlrpc_expired_set, set); + + lu_context_enter(&env.le_ctx); + lu_context_enter(env.le_ses); + l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi); + lu_context_exit(&env.le_ctx); + lu_context_exit(env.le_ses); + + /* + * Abort inflight rpcs for forced stop case. + */ + if (test_bit(LIOD_STOP, &pc->pc_flags)) { + if (test_bit(LIOD_FORCE, &pc->pc_flags)) + ptlrpc_abort_set(set); + exit++; + } + + /* + * Let's make one more loop to make sure that ptlrpcd_check() + * copied all raced new rpcs into the set so we can kill them. + */ + } while (exit < 2); + + /* + * Wait for inflight requests to drain. + */ + if (!list_empty(&set->set_requests)) + ptlrpc_set_wait(&env, set); + lu_context_fini(&env.le_ctx); + lu_context_fini(env.le_ses); + + complete(&pc->pc_finishing); + + return 0; + +failed: + pc->pc_error = rc; + complete(&pc->pc_starting); + RETURN(rc); +} - /* XXX our set never completes, so we prune the completed - * reqs after each iteration. boy could this be smarter. */ - list_for_each_safe(pos, tmp, &pc->pc_set->set_requests) { - req = list_entry(pos, struct ptlrpc_request, - rq_set_chain); - if (req->rq_phase != RQ_PHASE_COMPLETE) - continue; +static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt) +{ + ENTRY; + + pc->pc_index = index; + pc->pc_cpt = cpt; + init_completion(&pc->pc_starting); + init_completion(&pc->pc_finishing); + spin_lock_init(&pc->pc_lock); + + if (index < 0) { + /* Recovery thread. */ + snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv"); + } else { + /* Regular thread. */ + snprintf(pc->pc_name, sizeof(pc->pc_name), + "ptlrpcd_%02d_%02d", cpt, index); + } + + EXIT; +} - list_del_init(&req->rq_set_chain); - req->rq_set = NULL; - ptlrpc_req_finished (req); - } - } +/* XXX: We want multiple CPU cores to share the async RPC load. So we + * start many ptlrpcd threads. We also want to reduce the ptlrpcd + * overhead caused by data transfer cross-CPU cores. So we bind + * all ptlrpcd threads to a CPT, in the expectation that CPTs + * will be defined in a way that matches these boundaries. Within + * a CPT a ptlrpcd thread can be scheduled on any available core. + * + * Each ptlrpcd thread has its own request queue. This can cause + * response delay if the thread is already busy. To help with + * this we define partner threads: these are other threads bound + * to the same CPT which will check for work in each other's + * request queues if they have no work to do. + * + * The desired number of partner threads can be tuned by setting + * ptlrpcd_partner_group_size. The default is to create pairs of + * partner threads. + */ +static int ptlrpcd_partners(struct ptlrpcd *pd, int index) +{ + struct ptlrpcd_ctl *pc; + struct ptlrpcd_ctl **ppc; + int first; + int i; + int rc = 0; + + ENTRY; + + LASSERT(index >= 0 && index < pd->pd_nthreads); + pc = &pd->pd_threads[index]; + pc->pc_npartners = pd->pd_groupsize - 1; + + if (pc->pc_npartners <= 0) + GOTO(out, rc); + + OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_table, pc->pc_cpt, + sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners); + if (pc->pc_partners == NULL) { + pc->pc_npartners = 0; + GOTO(out, rc = -ENOMEM); + } + + first = index - index % pd->pd_groupsize; + ppc = pc->pc_partners; + for (i = first; i < first + pd->pd_groupsize; i++) { + if (i != index) + *ppc++ = &pd->pd_threads[i]; + } +out: + RETURN(rc); +} - RETURN(rc); +int ptlrpcd_start(struct ptlrpcd_ctl *pc) +{ + struct task_struct *task; + int rc = 0; + + ENTRY; + + /* + * Do not allow starting a second thread for one pc. + */ + if (test_and_set_bit(LIOD_START, &pc->pc_flags)) { + CWARN("Starting second thread (%s) for same pc %p\n", + pc->pc_name, pc); + RETURN(0); + } + + task = kthread_run(ptlrpcd, pc, pc->pc_name); + if (IS_ERR(task)) + GOTO(out_set, rc = PTR_ERR(task)); + + wait_for_completion(&pc->pc_starting); + rc = pc->pc_error; + if (rc != 0) + GOTO(out_set, rc); + + RETURN(0); + +out_set: + if (pc->pc_set != NULL) { + struct ptlrpc_request_set *set = pc->pc_set; + + spin_lock(&pc->pc_lock); + pc->pc_set = NULL; + spin_unlock(&pc->pc_lock); + ptlrpc_set_destroy(set); + } + clear_bit(LIOD_START, &pc->pc_flags); + RETURN(rc); } -#ifdef __KERNEL__ -/* ptlrpc's code paths like to execute in process context, so we have this - * thread which spins on a set which contains the io rpcs. llite specifies - * ptlrpcd's set when it pushes pages down into the oscs */ -static int ptlrpcd(void *arg) +void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force) { - struct ptlrpcd_ctl *pc = arg; - unsigned long flags; - ENTRY; - - kportal_daemonize("ptlrpcd"); - - SIGNAL_MASK_LOCK(current, flags); - sigfillset(¤t->blocked); - RECALC_SIGPENDING; - SIGNAL_MASK_UNLOCK(current, flags); - - complete(&pc->pc_starting); - - /* this mainloop strongly resembles ptlrpc_set_wait except - * that our set never completes. ptlrpcd_check calls ptlrpc_check_set - * when there are requests in the set. new requests come in - * on the set's new_req_list and ptlrpcd_check moves them into - * the set. */ - while (1) { - wait_queue_t set_wait; - struct l_wait_info lwi; - int timeout; - - timeout = ptlrpc_set_next_timeout(pc->pc_set) * HZ; - lwi = LWI_TIMEOUT(timeout, ptlrpc_expired_set, pc->pc_set); - - /* ala the pinger, wait on pc's waitqueue and the set's */ - init_waitqueue_entry(&set_wait, current); - add_wait_queue(&pc->pc_set->set_waitq, &set_wait); - l_wait_event(pc->pc_waitq, ptlrpcd_check(pc), &lwi); - remove_wait_queue(&pc->pc_set->set_waitq, &set_wait); - - if (test_bit(LIOD_STOP, &pc->pc_flags)) - break; - } - /* XXX should be making sure we don't have anything in flight */ - complete(&pc->pc_finishing); - return 0; + ENTRY; + + if (!test_bit(LIOD_START, &pc->pc_flags)) { + CWARN("Thread for pc %p was not started\n", pc); + goto out; + } + + set_bit(LIOD_STOP, &pc->pc_flags); + if (force) + set_bit(LIOD_FORCE, &pc->pc_flags); + wake_up(&pc->pc_set->set_waitq); + +out: + EXIT; } -#else -static int ptlrpcd_recurred = 0; -static void *ptlrpcd_callback; -int ptlrpcd_check_async_rpcs(void *arg) +void ptlrpcd_free(struct ptlrpcd_ctl *pc) { - struct ptlrpcd_ctl *pc = arg; - int rc = 0; + struct ptlrpc_request_set *set = pc->pc_set; + + ENTRY; + + if (!test_bit(LIOD_START, &pc->pc_flags)) { + CWARN("Thread for pc %p was not started\n", pc); + goto out; + } - /* single threaded!! */ - ptlrpcd_recurred++; + wait_for_completion(&pc->pc_finishing); - if (ptlrpcd_recurred == 1) - rc = ptlrpcd_check(pc); + spin_lock(&pc->pc_lock); + pc->pc_set = NULL; + spin_unlock(&pc->pc_lock); + ptlrpc_set_destroy(set); - ptlrpcd_recurred--; - return rc; + clear_bit(LIOD_START, &pc->pc_flags); + clear_bit(LIOD_STOP, &pc->pc_flags); + clear_bit(LIOD_FORCE, &pc->pc_flags); + +out: + if (pc->pc_npartners > 0) { + LASSERT(pc->pc_partners != NULL); + + OBD_FREE(pc->pc_partners, + sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners); + pc->pc_partners = NULL; + } + pc->pc_npartners = 0; + pc->pc_error = 0; + EXIT; } -#endif -int ptlrpcd_addref(void) +static void ptlrpcd_fini(void) +{ + int i; + int j; + int ncpts; + + ENTRY; + + if (ptlrpcds != NULL) { + for (i = 0; i < ptlrpcds_num; i++) { + if (ptlrpcds[i] == NULL) + break; + for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++) + ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0); + for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++) + ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]); + OBD_FREE(ptlrpcds[i], ptlrpcds[i]->pd_size); + ptlrpcds[i] = NULL; + } + OBD_FREE(ptlrpcds, sizeof(ptlrpcds[0]) * ptlrpcds_num); + } + ptlrpcds_num = 0; + + ptlrpcd_stop(&ptlrpcd_rcv, 0); + ptlrpcd_free(&ptlrpcd_rcv); + + if (ptlrpcds_cpt_idx != NULL) { + ncpts = cfs_cpt_number(cfs_cpt_table); + OBD_FREE(ptlrpcds_cpt_idx, ncpts * sizeof(ptlrpcds_cpt_idx[0])); + ptlrpcds_cpt_idx = NULL; + } + + EXIT; +} + +static int ptlrpcd_init(void) { - struct ptlrpcd_ctl *pc = &ptlrpcd_pc; - int rc = 0; - ENTRY; - - down(&ptlrpcd_sem); - if (++ptlrpcd_users != 1) - GOTO(out, rc); - - memset(pc, 0, sizeof(*pc)); - init_completion(&pc->pc_starting); - init_completion(&pc->pc_finishing); - init_waitqueue_head(&pc->pc_waitq); - pc->pc_flags = 0; - spin_lock_init(&pc->pc_lock); - INIT_LIST_HEAD(&pc->pc_req_list); - - pc->pc_set = ptlrpc_prep_set(); - if (pc->pc_set == NULL) - GOTO(out, rc = -ENOMEM); - -#ifdef __KERNEL__ - if (kernel_thread(ptlrpcd, pc, 0) < 0) { - ptlrpc_set_destroy(pc->pc_set); - GOTO(out, rc = -ECHILD); - } - - wait_for_completion(&pc->pc_starting); + int nthreads; + int groupsize; + int size; + int i; + int j; + int rc = 0; + struct cfs_cpt_table *cptable; + __u32 *cpts = NULL; + int ncpts; + int cpt; + struct ptlrpcd *pd; + + ENTRY; + + /* + * Determine the CPTs that ptlrpcd threads will run on. + */ + cptable = cfs_cpt_table; + ncpts = cfs_cpt_number(cptable); + if (ptlrpcd_cpts != NULL) { + struct cfs_expr_list *el; + + size = ncpts * sizeof(ptlrpcds_cpt_idx[0]); + OBD_ALLOC(ptlrpcds_cpt_idx, size); + if (ptlrpcds_cpt_idx == NULL) + GOTO(out, rc = -ENOMEM); + + rc = cfs_expr_list_parse(ptlrpcd_cpts, + strlen(ptlrpcd_cpts), + 0, ncpts - 1, &el); + if (rc != 0) { + CERROR("%s: invalid CPT pattern string: %s", + "ptlrpcd_cpts", ptlrpcd_cpts); + GOTO(out, rc = -EINVAL); + } + + rc = cfs_expr_list_values(el, ncpts, &cpts); + cfs_expr_list_free(el); + if (rc <= 0) { + CERROR("%s: failed to parse CPT array %s: %d\n", + "ptlrpcd_cpts", ptlrpcd_cpts, rc); + if (rc == 0) + rc = -EINVAL; + GOTO(out, rc); + } + + /* + * Create the cpt-to-index map. When there is no match + * in the cpt table, pick a cpt at random. This could + * be changed to take the topology of the system into + * account. + */ + for (cpt = 0; cpt < ncpts; cpt++) { + for (i = 0; i < rc; i++) + if (cpts[i] == cpt) + break; + if (i >= rc) + i = cpt % rc; + ptlrpcds_cpt_idx[cpt] = i; + } + + cfs_expr_list_values_free(cpts, rc); + ncpts = rc; + } + ptlrpcds_num = ncpts; + + size = ncpts * sizeof(ptlrpcds[0]); + OBD_ALLOC(ptlrpcds, size); + if (ptlrpcds == NULL) + GOTO(out, rc = -ENOMEM); + + /* + * The max_ptlrpcds parameter is obsolete, but do something + * sane if it has been tuned, and complain if + * ptlrpcd_per_cpt_max has also been tuned. + */ + if (max_ptlrpcds != 0) { + CWARN("max_ptlrpcds is obsolete.\n"); + if (ptlrpcd_per_cpt_max == 0) { + ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts; + /* Round up if there is a remainder. */ + if (max_ptlrpcds % ncpts != 0) + ptlrpcd_per_cpt_max++; + CWARN("Setting ptlrpcd_per_cpt_max = %d\n", + ptlrpcd_per_cpt_max); + } else { + CWARN("ptlrpd_per_cpt_max is also set!\n"); + } + } + + /* + * The ptlrpcd_bind_policy parameter is obsolete, but do + * something sane if it has been tuned, and complain if + * ptlrpcd_partner_group_size is also tuned. + */ + if (ptlrpcd_bind_policy != 0) { + CWARN("ptlrpcd_bind_policy is obsolete.\n"); + if (ptlrpcd_partner_group_size == 0) { + switch (ptlrpcd_bind_policy) { + case 1: /* PDB_POLICY_NONE */ + case 2: /* PDB_POLICY_FULL */ + ptlrpcd_partner_group_size = 1; + break; + case 3: /* PDB_POLICY_PAIR */ + ptlrpcd_partner_group_size = 2; + break; + case 4: /* PDB_POLICY_NEIGHBOR */ +#ifdef CONFIG_NUMA + ptlrpcd_partner_group_size = -1; /* CPT */ #else - ptlrpcd_callback = - liblustre_register_wait_callback(&ptlrpcd_check_async_rpcs, pc); + ptlrpcd_partner_group_size = 3; /* Triplets */ #endif + break; + default: /* Illegal value, use the default. */ + ptlrpcd_partner_group_size = 2; + break; + } + CWARN("Setting ptlrpcd_partner_group_size = %d\n", + ptlrpcd_partner_group_size); + } else { + CWARN("ptlrpcd_partner_group_size is also set!\n"); + } + } + + if (ptlrpcd_partner_group_size == 0) + ptlrpcd_partner_group_size = 2; + else if (ptlrpcd_partner_group_size < 0) + ptlrpcd_partner_group_size = -1; + else if (ptlrpcd_per_cpt_max > 0 && + ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max) + ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max; + + /* + * Start the recovery thread first. + */ + set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags); + ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY); + rc = ptlrpcd_start(&ptlrpcd_rcv); + if (rc < 0) + GOTO(out, rc); + + for (i = 0; i < ncpts; i++) { + if (cpts == NULL) + cpt = i; + else + cpt = cpts[i]; + + nthreads = cfs_cpt_weight(cptable, cpt); + if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads) + nthreads = ptlrpcd_per_cpt_max; + if (nthreads < 2) + nthreads = 2; + + if (ptlrpcd_partner_group_size <= 0) { + groupsize = nthreads; + } else if (nthreads <= ptlrpcd_partner_group_size) { + groupsize = nthreads; + } else { + groupsize = ptlrpcd_partner_group_size; + if (nthreads % groupsize != 0) + nthreads += groupsize - (nthreads % groupsize); + } + + size = offsetof(struct ptlrpcd, pd_threads[nthreads]); + OBD_CPT_ALLOC(pd, cptable, cpt, size); + + if (!pd) + GOTO(out, rc = -ENOMEM); + pd->pd_size = size; + pd->pd_index = i; + pd->pd_cpt = cpt; + pd->pd_cursor = 0; + pd->pd_nthreads = nthreads; + pd->pd_groupsize = groupsize; + ptlrpcds[i] = pd; + + /* + * The ptlrpcd threads in a partner group can access + * each other's struct ptlrpcd_ctl, so these must be + * initialized before any thead is started. + */ + for (j = 0; j < nthreads; j++) { + ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt); + rc = ptlrpcd_partners(pd, j); + if (rc < 0) + GOTO(out, rc); + } + + /* XXX: We start nthreads ptlrpc daemons on this cpt. + * Each of them can process any non-recovery + * async RPC to improve overall async RPC + * efficiency. + * + * But there are some issues with async I/O RPCs + * and async non-I/O RPCs processed in the same + * set under some cases. The ptlrpcd may be + * blocked by some async I/O RPC(s), then will + * cause other async non-I/O RPC(s) can not be + * processed in time. + * + * Maybe we should distinguish blocked async RPCs + * from non-blocked async RPCs, and process them + * in different ptlrpcd sets to avoid unnecessary + * dependency. But how to distribute async RPCs + * load among all the ptlrpc daemons becomes + * another trouble. + */ + for (j = 0; j < nthreads; j++) { + rc = ptlrpcd_start(&pd->pd_threads[j]); + if (rc < 0) + GOTO(out, rc); + } + } out: - up(&ptlrpcd_sem); - RETURN(rc); + if (rc != 0) + ptlrpcd_fini(); + + RETURN(rc); } -void ptlrpcd_decref(void) +int ptlrpcd_addref(void) { - struct ptlrpcd_ctl *pc = &ptlrpcd_pc; + int rc = 0; + + ENTRY; + + mutex_lock(&ptlrpcd_mutex); + if (++ptlrpcd_users == 1) { + rc = ptlrpcd_init(); + if (rc < 0) + ptlrpcd_users--; + } + mutex_unlock(&ptlrpcd_mutex); + RETURN(rc); +} +EXPORT_SYMBOL(ptlrpcd_addref); - down(&ptlrpcd_sem); - if (--ptlrpcd_users == 0) { - set_bit(LIOD_STOP, &pc->pc_flags); - wake_up(&pc->pc_waitq); -#ifdef __KERNEL__ - wait_for_completion(&pc->pc_finishing); -#else - liblustre_deregister_wait_callback(ptlrpcd_callback); -#endif - ptlrpc_set_destroy(pc->pc_set); - } - up(&ptlrpcd_sem); +void ptlrpcd_decref(void) +{ + mutex_lock(&ptlrpcd_mutex); + if (--ptlrpcd_users == 0) + ptlrpcd_fini(); + mutex_unlock(&ptlrpcd_mutex); } +EXPORT_SYMBOL(ptlrpcd_decref); +/** @} ptlrpcd */