LU-6325 added CPT binding to the ptlrpc worker threads on
the servers. This is often desirable, especially where
NUMA latencies are high, but it is not always beneficial.
If NUMA latencies are low, there is little benefit, and
sometimes it can be quite costly:
In particular, if NID-CPT hashing with routers leads to an
unbalanced workload by CPT, it is easy to end up in a
situation where the CPUs in one CPT are maxed out but
others are idle.
To this end, we add module parameters to allow disabling
the strict binding behavior, allowing threads to use all
CPUs.
This is complicated a bit because we still want separate
service partitions - The existing "no affinity" behavior
places all service threads in a single service partition,
which gives only one queue for service wakeups.
So we separate binding behavior from CPT association,
allowing us to keep multiple service partitions where
desired.
Module parameters are added to ldlm, mdt, and ost, of the
form "servicename_cpu_bind", such as "mds_rdpg_cpu_bind".
Setting them to "0" will disable the strict CPU binding
behavior for the threads in that service.
Parameters were not added for certain minor services which
do not have any CPT affinity/binding behavior today. (This
appears to be because they are not expected to be
performance sensitive.)
cray-bug-id: LUS-6518
Signed-off-by: Patrick Farrell <paf@cray.com>
Change-Id: I1f6f9bb7a11da3a3eec7fc14c41d09ed27700f46
Reviewed-on: https://review.whamcloud.com/33262
Tested-by: Jenkins
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Chris Horn <hornc@cray.com>
Reviewed-by: Doug Oucharek <dougso@me.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
int srv_watchdog_factor;
/** under unregister_service */
unsigned srv_is_stopping:1;
int srv_watchdog_factor;
/** under unregister_service */
unsigned srv_is_stopping:1;
+ /** Whether or not to restrict service threads to CPUs in this CPT */
+ unsigned srv_cpt_bind:1;
/** max # request buffers */
int srv_nrqbds_max;
/** max # request buffers in history per partition */
int srv_hist_nrqbds_cpt_max;
/** max # request buffers */
int srv_nrqbds_max;
/** max # request buffers in history per partition */
int srv_hist_nrqbds_cpt_max;
- /** number of CPTs this service bound on */
+ /** number of CPTs this service associated with */
- /** CPTs array this service bound on */
+ /** CPTs array this service associated with */
__u32 *srv_cpts;
/** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
int srv_cpt_bits;
__u32 *srv_cpts;
/** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
int srv_cpt_bits;
/* user specified threads number, it will be validated due to
* other members of this structure. */
unsigned int tc_nthrs_user;
/* user specified threads number, it will be validated due to
* other members of this structure. */
unsigned int tc_nthrs_user;
- /* set NUMA node affinity for service threads */
- unsigned int tc_cpu_affinity;
+ /* bind service threads to only CPUs in their associated CPT */
+ unsigned int tc_cpu_bind;
/* Tags for lu_context associated with service thread */
__u32 tc_ctx_tags;
};
/* Tags for lu_context associated with service thread */
__u32 tc_ctx_tags;
};
struct cfs_cpt_table *cc_cptable;
/* string pattern to describe CPTs for a service */
char *cc_pattern;
struct cfs_cpt_table *cc_cptable;
/* string pattern to describe CPTs for a service */
char *cc_pattern;
+ /* whether or not to have per-CPT service partitions */
+ bool cc_affinity;
};
struct ptlrpc_service_conf {
};
struct ptlrpc_service_conf {
module_param(ldlm_num_threads, int, 0444);
MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
module_param(ldlm_num_threads, int, 0444);
MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
+static unsigned int ldlm_cpu_bind = 1;
+module_param(ldlm_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(ldlm_cpu_bind,
+ "bind DLM service threads to particular CPU partitions");
+
static char *ldlm_cpts;
module_param(ldlm_cpts, charp, 0444);
MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
static char *ldlm_cpts;
module_param(ldlm_cpts, charp, 0444);
MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
.tc_nthrs_base = LDLM_NTHRS_BASE,
.tc_nthrs_max = LDLM_NTHRS_MAX,
.tc_nthrs_user = ldlm_num_threads,
.tc_nthrs_base = LDLM_NTHRS_BASE,
.tc_nthrs_max = LDLM_NTHRS_MAX,
.tc_nthrs_user = ldlm_num_threads,
+ .tc_cpu_bind = ldlm_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = ldlm_cpts,
.tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = ldlm_cpts,
},
.psc_ops = {
.so_req_handler = ldlm_callback_handler,
},
.psc_ops = {
.so_req_handler = ldlm_callback_handler,
.tc_nthrs_base = LDLM_NTHRS_BASE,
.tc_nthrs_max = LDLM_NTHRS_MAX,
.tc_nthrs_user = ldlm_num_threads,
.tc_nthrs_base = LDLM_NTHRS_BASE,
.tc_nthrs_max = LDLM_NTHRS_MAX,
.tc_nthrs_user = ldlm_num_threads,
+ .tc_cpu_bind = ldlm_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD | \
LCT_DT_THREAD | \
LCT_CL_THREAD,
},
.psc_cpt = {
.cc_pattern = ldlm_cpts,
.tc_ctx_tags = LCT_MD_THREAD | \
LCT_DT_THREAD | \
LCT_CL_THREAD,
},
.psc_cpt = {
.cc_pattern = ldlm_cpts,
},
.psc_ops = {
.so_req_handler = ldlm_cancel_handler,
},
.psc_ops = {
.so_req_handler = ldlm_cancel_handler,
module_param(mds_num_threads, ulong, 0444);
MODULE_PARM_DESC(mds_num_threads, "number of MDS service threads to start");
module_param(mds_num_threads, ulong, 0444);
MODULE_PARM_DESC(mds_num_threads, "number of MDS service threads to start");
+static unsigned int mds_cpu_bind = 1;
+module_param(mds_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(mds_cpu_bind,
+ "bind MDS threads to particular CPU partitions");
+
int mds_max_io_threads = 512;
module_param(mds_max_io_threads, int, 0444);
int mds_max_io_threads = 512;
module_param(mds_max_io_threads, int, 0444);
-MODULE_PARM_DESC(mds_max_io_threads, "maximum number of MDS IO service threads");
+MODULE_PARM_DESC(mds_max_io_threads,
+ "maximum number of MDS IO service threads");
+
+static unsigned int mds_io_cpu_bind = 1;
+module_param(mds_io_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(mds_io_cpu_bind,
+ "bind MDS IO threads to particular CPU partitions");
static char *mds_io_num_cpts;
module_param(mds_io_num_cpts, charp, 0444);
static char *mds_io_num_cpts;
module_param(mds_io_num_cpts, charp, 0444);
MODULE_PARM_DESC(mds_rdpg_num_threads,
"number of MDS readpage service threads to start");
MODULE_PARM_DESC(mds_rdpg_num_threads,
"number of MDS readpage service threads to start");
+static unsigned int mds_rdpg_cpu_bind = 1;
+module_param(mds_rdpg_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(mds_rdpg_cpu_bind,
+ "bind MDS readpage threads to particular CPU partitions");
+
static char *mds_rdpg_num_cpts;
module_param(mds_rdpg_num_cpts, charp, 0444);
MODULE_PARM_DESC(mds_rdpg_num_cpts,
static char *mds_rdpg_num_cpts;
module_param(mds_rdpg_num_cpts, charp, 0444);
MODULE_PARM_DESC(mds_rdpg_num_cpts,
MODULE_PARM_DESC(mds_attr_num_threads,
"number of MDS setattr service threads to start");
MODULE_PARM_DESC(mds_attr_num_threads,
"number of MDS setattr service threads to start");
+static unsigned int mds_attr_cpu_bind = 1;
+module_param(mds_attr_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(mds_attr_cpu_bind,
+ "bind MDS setattr threads to particular CPU partitions");
+
static char *mds_attr_num_cpts;
module_param(mds_attr_num_cpts, charp, 0444);
MODULE_PARM_DESC(mds_attr_num_cpts,
static char *mds_attr_num_cpts;
module_param(mds_attr_num_cpts, charp, 0444);
MODULE_PARM_DESC(mds_attr_num_cpts,
.tc_nthrs_base = MDS_NTHRS_BASE,
.tc_nthrs_max = MDS_NTHRS_MAX,
.tc_nthrs_user = mds_num_threads,
.tc_nthrs_base = MDS_NTHRS_BASE,
.tc_nthrs_max = MDS_NTHRS_MAX,
.tc_nthrs_user = mds_num_threads,
+ .tc_cpu_bind = mds_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_num_cpts,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_num_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = MDS_RDPG_NTHRS_BASE,
.tc_nthrs_max = MDS_RDPG_NTHRS_MAX,
.tc_nthrs_user = mds_rdpg_num_threads,
.tc_nthrs_base = MDS_RDPG_NTHRS_BASE,
.tc_nthrs_max = MDS_RDPG_NTHRS_MAX,
.tc_nthrs_user = mds_rdpg_num_threads,
+ .tc_cpu_bind = mds_rdpg_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_rdpg_num_cpts,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_rdpg_num_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = MDS_SETA_NTHRS_BASE,
.tc_nthrs_max = MDS_SETA_NTHRS_MAX,
.tc_nthrs_user = mds_attr_num_threads,
.tc_nthrs_base = MDS_SETA_NTHRS_BASE,
.tc_nthrs_max = MDS_SETA_NTHRS_MAX,
.tc_nthrs_user = mds_attr_num_threads,
+ .tc_cpu_bind = mds_attr_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_attr_num_cpts,
.tc_ctx_tags = LCT_MD_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_attr_num_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = MDS_NTHRS_BASE,
.tc_nthrs_max = MDS_NTHRS_MAX,
.tc_nthrs_user = mds_num_threads,
.tc_nthrs_base = MDS_NTHRS_BASE,
.tc_nthrs_max = MDS_NTHRS_MAX,
.tc_nthrs_user = mds_num_threads,
+ .tc_cpu_bind = mds_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD |
LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_num_cpts,
.tc_ctx_tags = LCT_MD_THREAD |
LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = mds_num_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = mds_max_io_threads,
.tc_nthrs_user = mds_num_threads,
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = mds_max_io_threads,
.tc_nthrs_user = mds_num_threads,
+ .tc_cpu_bind = mds_io_cpu_bind,
.tc_ctx_tags = LCT_DT_THREAD | LCT_MD_THREAD,
},
.psc_cpt = {
.cc_cptable = mdt_io_cptable,
.cc_pattern = mdt_io_cptable == NULL ?
mds_io_num_cpts : NULL,
.tc_ctx_tags = LCT_DT_THREAD | LCT_MD_THREAD,
},
.psc_cpt = {
.cc_cptable = mdt_io_cptable,
.cc_pattern = mdt_io_cptable == NULL ?
mds_io_num_cpts : NULL,
},
.psc_ops = {
.so_thr_init = tgt_io_thread_init,
},
.psc_ops = {
.so_thr_init = tgt_io_thread_init,
module_param(oss_num_threads, int, 0444);
MODULE_PARM_DESC(oss_num_threads, "number of OSS service threads to start");
module_param(oss_num_threads, int, 0444);
MODULE_PARM_DESC(oss_num_threads, "number of OSS service threads to start");
+static unsigned int oss_cpu_bind = 1;
+module_param(oss_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(oss_cpu_bind,
+ "bind OSS service threads to particular CPU partitions");
+
static int oss_num_create_threads;
module_param(oss_num_create_threads, int, 0444);
MODULE_PARM_DESC(oss_num_create_threads, "number of OSS create threads to start");
static int oss_num_create_threads;
module_param(oss_num_create_threads, int, 0444);
MODULE_PARM_DESC(oss_num_create_threads, "number of OSS create threads to start");
+static unsigned int oss_create_cpu_bind = 1;
+module_param(oss_create_cpu_bind, uint, 0444);
+MODULE_PARM_DESC(oss_create_cpu_bind,
+ "bind OSS create threads to particular CPU partitions");
+
static char *oss_cpts;
module_param(oss_cpts, charp, 0444);
MODULE_PARM_DESC(oss_cpts, "CPU partitions OSS threads should run on");
static char *oss_cpts;
module_param(oss_cpts, charp, 0444);
MODULE_PARM_DESC(oss_cpts, "CPU partitions OSS threads should run on");
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = oss_max_threads,
.tc_nthrs_user = oss_num_threads,
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = oss_max_threads,
.tc_nthrs_user = oss_num_threads,
+ .tc_cpu_bind = oss_cpu_bind,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
+ .tc_cpu_bind = oss_create_cpu_bind,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = oss_max_threads,
.tc_nthrs_user = oss_num_threads,
.tc_nthrs_base = OSS_NTHRS_BASE,
.tc_nthrs_max = oss_max_threads,
.tc_nthrs_user = oss_num_threads,
+ .tc_cpu_bind = oss_cpu_bind,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_cptable = ost_io_cptable,
.cc_pattern = ost_io_cptable == NULL ?
oss_io_cpts : NULL,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.cc_cptable = ost_io_cptable,
.cc_pattern = ost_io_cptable == NULL ?
oss_io_cpts : NULL,
},
.psc_ops = {
.so_thr_init = tgt_io_thread_init,
},
.psc_ops = {
.so_thr_init = tgt_io_thread_init,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
+ .tc_cpu_bind = oss_create_cpu_bind,
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
.tc_ctx_tags = LCT_DT_THREAD,
},
.psc_cpt = {
- .cc_pattern = oss_cpts,
+ .cc_pattern = oss_cpts,
+ .cc_affinity = true,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
.tc_nthrs_base = OSS_CR_NTHRS_BASE,
.tc_nthrs_max = OSS_CR_NTHRS_MAX,
.tc_nthrs_user = oss_num_create_threads,
+ .tc_cpu_bind = oss_create_cpu_bind,
.tc_ctx_tags = LCT_MD_THREAD |
LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
.tc_ctx_tags = LCT_MD_THREAD |
LCT_DT_THREAD,
},
.psc_cpt = {
.cc_pattern = oss_cpts,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
},
.psc_ops = {
.so_req_handler = tgt_request_handle,
if (cptable == NULL)
cptable = cfs_cpt_table;
if (cptable == NULL)
cptable = cfs_cpt_table;
- if (!conf->psc_thr.tc_cpu_affinity) {
+ if (conf->psc_thr.tc_cpu_bind > 1) {
+ CERROR("%s: Invalid cpu bind value %d, only 1 or 0 allowed\n",
+ conf->psc_name, conf->psc_thr.tc_cpu_bind);
+ RETURN(ERR_PTR(-EINVAL));
+ }
+
+ if (!cconf->cc_affinity) {
ncpts = 1;
} else {
ncpts = cfs_cpt_number(cptable);
ncpts = 1;
} else {
ncpts = cfs_cpt_number(cptable);
service->srv_cptable = cptable;
service->srv_cpts = cpts;
service->srv_ncpts = ncpts;
service->srv_cptable = cptable;
service->srv_cpts = cpts;
service->srv_ncpts = ncpts;
+ service->srv_cpt_bind = conf->psc_thr.tc_cpu_bind;
service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
service->srv_ops = conf->psc_ops;
for (i = 0; i < ncpts; i++) {
service->srv_ops = conf->psc_ops;
for (i = 0; i < ncpts; i++) {
- if (!conf->psc_thr.tc_cpu_affinity)
+ if (!cconf->cc_affinity)
cpt = CFS_CPT_ANY;
else
cpt = cpts != NULL ? cpts[i] : i;
cpt = CFS_CPT_ANY;
else
cpt = cpts != NULL ? cpts[i] : i;
thread->t_pid = current_pid();
unshare_fs_struct();
thread->t_pid = current_pid();
unshare_fs_struct();
- /* NB: we will call cfs_cpt_bind() for all threads, because we
- * might want to run lustre server only on a subset of system CPUs,
- * in that case ->scp_cpt is CFS_CPT_ANY */
- rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
- if (rc != 0) {
- CWARN("%s: failed to bind %s on CPT %d\n",
- svc->srv_name, thread->t_name, svcpt->scp_cpt);
+ if (svc->srv_cpt_bind) {
+ rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
+ if (rc != 0) {
+ CWARN("%s: failed to bind %s on CPT %d\n",
+ svc->srv_name, thread->t_name, svcpt->scp_cpt);
+ }
}
ginfo = groups_alloc(0);
}
ginfo = groups_alloc(0);