# endif
#endif /* __KERNEL__ */
+#define PTLRPC_NTHRS_MIN 2
+
/**
* The following constants determine how memory is used to buffer incoming
* service requests.
#define LDLM_MAXREPSIZE (1024)
/** Absolute limits */
-#define MDT_MIN_THREADS 2UL
#ifndef MDT_MAX_THREADS
+#define MDT_MIN_THREADS PTLRPC_NTHRS_MIN
#define MDT_MAX_THREADS 512UL
#endif
#define MDS_NBUFS (64 * cfs_num_online_cpus())
unsigned int tc_nthrs_min;
/* max number of service threads to start */
unsigned int tc_nthrs_max;
+ /* user specified threads number, it will be validated due to
+ * other members of this structure. */
+ unsigned int tc_nthrs_user;
/* set NUMA node affinity for service threads */
unsigned int tc_cpu_affinity;
/* Tags for lu_context associated with service thread */
#include <libcfs/list.h>
#include "ldlm_internal.h"
-#ifdef __KERNEL__
static int ldlm_num_threads;
CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
"number of DLM service threads to start");
-#endif
extern cfs_mem_cache_t *ldlm_resource_slab;
extern cfs_mem_cache_t *ldlm_lock_slab;
static struct ptlrpc_service_conf conf;
struct ldlm_bl_pool *blp = NULL;
int rc = 0;
- int ldlm_min_threads = LDLM_THREADS_AUTO_MIN;
- int ldlm_max_threads = LDLM_THREADS_AUTO_MAX;
#ifdef __KERNEL__
int i;
#endif
GOTO(out, rc);
#endif
-#ifdef __KERNEL__
- if (ldlm_num_threads) {
- /* If ldlm_num_threads is set, it is the min and the max. */
- if (ldlm_num_threads > LDLM_THREADS_AUTO_MAX)
- ldlm_num_threads = LDLM_THREADS_AUTO_MAX;
- if (ldlm_num_threads < LDLM_THREADS_AUTO_MIN)
- ldlm_num_threads = LDLM_THREADS_AUTO_MIN;
- ldlm_min_threads = ldlm_max_threads = ldlm_num_threads;
- }
-#endif
memset(&conf, 0, sizeof(conf));
conf = (typeof(conf)) {
.psc_name = "ldlm_cbd",
},
.psc_thr = {
.tc_thr_name = "ldlm_cb",
- .tc_nthrs_min = ldlm_min_threads,
- .tc_nthrs_max = ldlm_max_threads,
+ .tc_nthrs_min = LDLM_THREADS_AUTO_MIN,
+ .tc_nthrs_max = LDLM_THREADS_AUTO_MAX,
+ .tc_nthrs_user = ldlm_num_threads,
.tc_ctx_tags = LCT_MD_THREAD | \
LCT_DT_THREAD,
},
},
.psc_thr = {
.tc_thr_name = "ldlm_cn",
- .tc_nthrs_min = ldlm_min_threads,
- .tc_nthrs_max = ldlm_max_threads,
+ .tc_nthrs_min = LDLM_THREADS_AUTO_MIN,
+ .tc_nthrs_max = LDLM_THREADS_AUTO_MAX,
+ .tc_nthrs_user = ldlm_num_threads,
.tc_ctx_tags = LCT_MD_THREAD | \
LCT_DT_THREAD | \
LCT_CL_THREAD,
cfs_waitq_init(&blp->blp_waitq);
cfs_atomic_set(&blp->blp_num_threads, 0);
cfs_atomic_set(&blp->blp_busy_threads, 0);
- blp->blp_min_threads = ldlm_min_threads;
- blp->blp_max_threads = ldlm_max_threads;
#ifdef __KERNEL__
+ if (ldlm_num_threads == 0) {
+ blp->blp_min_threads = LDLM_THREADS_AUTO_MIN;
+ blp->blp_max_threads = LDLM_THREADS_AUTO_MAX;
+ } else {
+ blp->blp_min_threads = blp->blp_max_threads = \
+ min_t(int, LDLM_THREADS_AUTO_MAX,
+ max_t(int, LDLM_THREADS_AUTO_MIN,
+ ldlm_num_threads));
+ }
+
for (i = 0; i < blp->blp_min_threads; i++) {
rc = ldlm_bl_thread_start(blp);
if (rc < 0)
* Initialized in mdt_mod_init().
*/
static unsigned long mdt_num_threads;
-static unsigned long mdt_min_threads;
-static unsigned long mdt_max_threads;
/* ptlrpc request handler for MDT. All handlers are
* grouped into several slices - struct mdt_opc_slice,
*/
.psc_thr = {
.tc_thr_name = LUSTRE_MDT_NAME,
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_ops = {
},
.psc_thr = {
.tc_thr_name = "mdt_rdpg",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_ops = {
},
.psc_thr = {
.tc_thr_name = "mdt_attr",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_ops = {
},
.psc_thr = {
.tc_thr_name = "mdt_mdsc",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_ops = {
},
.psc_thr = {
.tc_thr_name = "mdt_mdss",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
- .tc_ctx_tags = LCT_MD_THREAD | \
- LCT_DT_THREAD,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD
},
.psc_ops = {
.so_req_handler = mdt_mdss_handle,
},
.psc_thr = {
.tc_thr_name = "mdt_dtss",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
- .tc_ctx_tags = LCT_MD_THREAD | \
- LCT_DT_THREAD,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
+ .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD
},
.psc_ops = {
.so_req_handler = mdt_dtss_handle,
},
.psc_thr = {
.tc_thr_name = "mdt_fld",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
- .tc_ctx_tags = LCT_DT_THREAD | \
- LCT_MD_THREAD,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
+ .tc_ctx_tags = LCT_DT_THREAD | LCT_MD_THREAD
},
.psc_ops = {
.so_req_handler = mdt_fld_handle,
},
.psc_thr = {
.tc_thr_name = "mdt_mds",
- .tc_nthrs_min = mdt_min_threads,
- .tc_nthrs_max = mdt_max_threads,
+ .tc_nthrs_min = MDT_MIN_THREADS,
+ .tc_nthrs_max = MDT_MAX_THREADS,
+ .tc_nthrs_user = mdt_num_threads,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_ops = {
struct lprocfs_static_vars lvars;
int rc;
- if (mdt_num_threads > 0) {
- if (mdt_num_threads > MDT_MAX_THREADS)
- mdt_num_threads = MDT_MAX_THREADS;
- if (mdt_num_threads < MDT_MIN_THREADS)
- mdt_num_threads = MDT_MIN_THREADS;
- mdt_max_threads = mdt_min_threads = mdt_num_threads;
- } else {
- mdt_max_threads = MDT_MAX_THREADS;
- mdt_min_threads = MDT_MIN_THREADS;
- }
-
lprocfs_mdt_init_vars(&lvars);
rc = class_register_type(&mdt_obd_device_ops, NULL,
lvars.module_vars, LUSTRE_MDT_NAME,
static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
{
static struct ptlrpc_service_conf svc_conf;
- struct ost_obd *ost = &obd->u.ost;
- struct lprocfs_static_vars lvars;
- int oss_min_threads;
- int oss_max_threads;
- int oss_min_create_threads;
- int oss_max_create_threads;
- int rc;
- ENTRY;
+ struct ost_obd *ost = &obd->u.ost;
+ struct lprocfs_static_vars lvars;
+ int oss_min_threads = OSS_THREADS_MIN;
+ int oss_max_threads = OSS_THREADS_MAX;
+ int rc;
+ ENTRY;
rc = cfs_cleanup_group_info();
if (rc)
cfs_mutex_init(&ost->ost_health_mutex);
- if (oss_num_threads) {
- /* If oss_num_threads is set, it is the min and the max. */
- if (oss_num_threads > OSS_THREADS_MAX)
- oss_num_threads = OSS_THREADS_MAX;
- if (oss_num_threads < OSS_THREADS_MIN)
- oss_num_threads = OSS_THREADS_MIN;
- oss_max_threads = oss_min_threads = oss_num_threads;
- } else {
+ if (oss_num_threads == 0) {
/* Base min threads on memory and cpus */
oss_min_threads =
cfs_num_online_cpus() * CFS_NUM_CACHEPAGES >>
if (oss_min_threads > OSS_THREADS_MAX / 4)
oss_min_threads = OSS_THREADS_MAX / 4;
oss_max_threads = min(OSS_THREADS_MAX, oss_min_threads * 4 + 1);
- }
+ }
svc_conf = (typeof(svc_conf)) {
.psc_name = LUSTRE_OSS_NAME,
.tc_thr_name = "ll_ost",
.tc_nthrs_min = oss_min_threads,
.tc_nthrs_max = oss_max_threads,
+ .tc_nthrs_user = oss_num_threads,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_ops = {
GOTO(out_lprocfs, rc);
}
- if (oss_num_create_threads) {
- if (oss_num_create_threads > OSS_MAX_CREATE_THREADS)
- oss_num_create_threads = OSS_MAX_CREATE_THREADS;
- if (oss_num_create_threads < OSS_MIN_CREATE_THREADS)
- oss_num_create_threads = OSS_MIN_CREATE_THREADS;
- oss_min_create_threads = oss_max_create_threads =
- oss_num_create_threads;
- } else {
- oss_min_create_threads = OSS_MIN_CREATE_THREADS;
- oss_max_create_threads = OSS_MAX_CREATE_THREADS;
- }
-
memset(&svc_conf, 0, sizeof(svc_conf));
svc_conf = (typeof(svc_conf)) {
.psc_name = "ost_create",
},
.psc_thr = {
.tc_thr_name = "ll_ost_create",
- .tc_nthrs_min = oss_min_create_threads,
- .tc_nthrs_max = oss_max_create_threads,
+ .tc_nthrs_min = OSS_CR_THREADS_MIN,
+ .tc_nthrs_max = OSS_CR_THREADS_MAX,
+ .tc_nthrs_user = oss_num_create_threads,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_ops = {
.tc_thr_name = "ll_ost_io",
.tc_nthrs_min = oss_min_threads,
.tc_nthrs_max = oss_max_threads,
+ .tc_nthrs_user = oss_num_threads,
.tc_cpu_affinity = 1,
.tc_ctx_tags = LCT_DT_THREAD,
},
struct ost_thread_local_cache *ost_tls(struct ptlrpc_request *r);
-#define OSS_MIN_CREATE_THREADS 2UL
-#define OSS_MAX_CREATE_THREADS 16UL
+/* threads for handling "create" request */
+#define OSS_CR_THREADS_MIN 2UL
+#define OSS_CR_THREADS_MAX 16UL
/* Quota stuff */
extern quota_interface_t *quota_interface;
cfs_waitq_signal(&svc->srv_waitq);
}
+static void
+ptlrpc_server_nthreads_check(struct ptlrpc_service_conf *conf,
+ int *min_p, int *max_p)
+{
+#ifdef __KERNEL__
+ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
+ int nthrs_min;
+ int nthrs;
+
+ nthrs_min = PTLRPC_NTHRS_MIN + (conf->psc_ops.so_hpreq_handler != NULL);
+ nthrs_min = max_t(int, nthrs_min, tc->tc_nthrs_min);
+
+ nthrs = tc->tc_nthrs_user;
+ if (nthrs != 0) { /* validate it */
+ nthrs = min_t(int, nthrs, tc->tc_nthrs_max);
+ nthrs = max_t(int, nthrs, nthrs_min);
+ *min_p = *max_p = nthrs;
+ return;
+ }
+
+ /*
+ * NB: we will add some common at here for estimating, for example:
+ * add a new member ptlrpc_service_thr_conf::tc_factor, and estimate
+ * threads number based on:
+ * (online_cpus * conf::tc_factor) + conf::tc_nthrs_base.
+ *
+ * So we can remove code block like estimation in ost_setup, also,
+ * we might estimate MDS threads number as well instead of using
+ * absolute number, and have more threads on fat servers to improve
+ * availability of service.
+ *
+ * Also, we will need to validate threads number at here for
+ * CPT affinity service (CPU ParTiion) in the future.
+ * A service can have percpt thread-pool instead of a global thread
+ * pool for each service, which means user might not always get the
+ * threads number they want even they set it in conf::tc_nthrs_user,
+ * because we need to adjust threads number for each CPT, instead of
+ * just use (conf::tc_nthrs_user / NCPTS), to make sure each pool
+ * will be healthy.
+ */
+ *max_p = tc->tc_nthrs_max;
+ *min_p = nthrs_min;
+#else /* __KERNEL__ */
+ *max_p = *min_p = 1; /* whatever */
+#endif
+}
+
/**
* Initialize service on a given portal.
* This includes starting serving threads , allocating and posting rqbds and
service->srv_req_portal = conf->psc_buf.bc_req_portal;
service->srv_request_seq = 1; /* valid seq #s start at 1 */
service->srv_request_max_cull_seq = 0;
- service->srv_threads_min = conf->psc_thr.tc_nthrs_min;
- service->srv_threads_max = conf->psc_thr.tc_nthrs_max;
+
+ ptlrpc_server_nthreads_check(conf, &service->srv_threads_min,
+ &service->srv_threads_max);
+
service->srv_thread_name = conf->psc_thr.tc_thr_name;
service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags;
service->srv_cpu_affinity = !!conf->psc_thr.tc_cpu_affinity;