* . NUMA allocators, CPU affinity threads are built over CPU partitions,
* instead of HW CPUs or HW nodes.
*
- * . By default, Lustre modules should refer to the global cfs_cpt_table,
+ * . By default, Lustre modules should refer to the global cfs_cpt_tab,
* instead of accessing HW CPUs directly, so concurrency of Lustre can be
- * configured by cpu_npartitions of the global cfs_cpt_table
+ * configured by cpu_npartitions of the global cfs_cpt_tab
*
* . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
* same way as 2.2 or earlier versions
/* any CPU partition */
#define CFS_CPT_ANY (-1)
-extern struct cfs_cpt_table *cfs_cpt_table;
+extern struct cfs_cpt_table *cfs_cpt_tab;
/**
* destroy a CPU partition table
#include <libcfs/libcfs.h>
/** Global CPU partition table */
-struct cfs_cpt_table *cfs_cpt_table __read_mostly;
-EXPORT_SYMBOL(cfs_cpt_table);
+struct cfs_cpt_table *cfs_cpt_tab __read_mostly;
+EXPORT_SYMBOL(cfs_cpt_tab);
/**
* modparam for setting number of partitions
void cfs_cpu_fini(void)
{
- if (!IS_ERR_OR_NULL(cfs_cpt_table))
- cfs_cpt_table_free(cfs_cpt_table);
+ if (!IS_ERR_OR_NULL(cfs_cpt_tab))
+ cfs_cpt_table_free(cfs_cpt_tab);
#ifdef CONFIG_HOTPLUG_CPU
#ifdef HAVE_HOTPLUG_STATE_MACHINE
{
int ret;
- LASSERT(!cfs_cpt_table);
+ LASSERT(!cfs_cpt_tab);
#ifdef CONFIG_HOTPLUG_CPU
#ifdef HAVE_HOTPLUG_STATE_MACHINE
get_online_cpus();
if (*cpu_pattern) {
- cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
- if (IS_ERR(cfs_cpt_table)) {
+ cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern);
+ if (IS_ERR(cfs_cpt_tab)) {
CERROR("Failed to create cptab from pattern '%s'\n",
cpu_pattern);
- ret = PTR_ERR(cfs_cpt_table);
+ ret = PTR_ERR(cfs_cpt_tab);
goto failed_alloc_table;
}
} else {
- cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
- if (IS_ERR(cfs_cpt_table)) {
+ cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions);
+ if (IS_ERR(cfs_cpt_tab)) {
CERROR("Failed to create cptab with npartitions %d\n",
cpu_npartitions);
- ret = PTR_ERR(cfs_cpt_table);
+ ret = PTR_ERR(cfs_cpt_tab);
goto failed_alloc_table;
}
}
LCONSOLE(0, "HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
num_online_nodes(), num_online_cpus(),
- cfs_cpt_number(cfs_cpt_table));
+ cfs_cpt_number(cfs_cpt_tab));
return 0;
failed_alloc_table:
put_online_cpus();
- if (!IS_ERR_OR_NULL(cfs_cpt_table))
- cfs_cpt_table_free(cfs_cpt_table);
+ if (!IS_ERR_OR_NULL(cfs_cpt_tab))
+ cfs_cpt_table_free(cfs_cpt_tab);
#ifdef CONFIG_HOTPLUG_CPU
#ifdef HAVE_HOTPLUG_STATE_MACHINE
if (write)
return -EPERM;
- LASSERT(cfs_cpt_table != NULL);
+ LASSERT(cfs_cpt_tab);
while (1) {
LIBCFS_ALLOC(buf, len);
if (buf == NULL)
return -ENOMEM;
- rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
+ rc = cfs_cpt_table_print(cfs_cpt_tab, buf, len);
if (rc >= 0)
break;
if (write)
return -EPERM;
- LASSERT(cfs_cpt_table != NULL);
+ LASSERT(cfs_cpt_tab);
while (1) {
LIBCFS_ALLOC(buf, len);
if (buf == NULL)
return -ENOMEM;
- rc = cfs_cpt_distance_print(cfs_cpt_table, buf, len);
+ rc = cfs_cpt_distance_print(cfs_cpt_tab, buf, len);
if (rc >= 0)
break;
lnet_assert_wire_constants();
/* refer to global cfs_cpt_table for now */
- the_lnet.ln_cpt_table = cfs_cpt_table;
- the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
+ the_lnet.ln_cpt_table = cfs_cpt_tab;
+ the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_tab);
LASSERT(the_lnet.ln_cpt_number > 0);
if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
nbuf * (srpc_serv_is_framework(svc) ?
- 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
+ 1 : cfs_cpt_number(cfs_cpt_tab)), svc->sv_name);
return 0;
}
}
- mask = cfs_cpt_nodemask(cfs_cpt_table, CFS_CPT_ANY);
+ mask = cfs_cpt_nodemask(cfs_cpt_tab, CFS_CPT_ANY);
/* event CPT feature is disabled in libcfs level by set partition
* number to 1, we still want to set node affinity for io service */
- if (cfs_cpt_number(cfs_cpt_table) == 1 && nodes_weight(*mask) > 1) {
+ if (cfs_cpt_number(cfs_cpt_tab) == 1 && nodes_weight(*mask) > 1) {
int cpt = 0;
int i;
GOTO(out_service, rc);
}
- mask = cfs_cpt_nodemask(cfs_cpt_table, CFS_CPT_ANY);
+ mask = cfs_cpt_nodemask(cfs_cpt_tab, CFS_CPT_ANY);
/* event CPT feature is disabled in libcfs level by set partition
* number to 1, we still want to set node affinity for io service */
- if (cfs_cpt_number(cfs_cpt_table) == 1 && nodes_weight(*mask) > 1) {
+ if (cfs_cpt_number(cfs_cpt_tab) == 1 && nodes_weight(*mask) > 1) {
int cpt = 0;
int i;
int cpt;
ENTRY;
- cpt = cfs_cpt_current(cfs_cpt_table, 0);
- OBD_CPT_ALLOC(set, cfs_cpt_table, cpt, sizeof(*set));
+ cpt = cfs_cpt_current(cfs_cpt_tab, 0);
+ OBD_CPT_ALLOC(set, cfs_cpt_tab, cpt, sizeof(*set));
if (!set)
RETURN(NULL);
atomic_set(&set->set_refcount, 1);
if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
return &ptlrpcd_rcv;
- cpt = cfs_cpt_current(cfs_cpt_table, 1);
+ cpt = cfs_cpt_current(cfs_cpt_tab, 1);
if (ptlrpcds_cpt_idx == NULL)
idx = cpt;
else
unshare_fs_struct();
- if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
+ if (cfs_cpt_bind(cfs_cpt_tab, pc->pc_cpt) != 0)
CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
/*
if (pc->pc_npartners <= 0)
GOTO(out, rc);
- OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_table, pc->pc_cpt,
+ OBD_CPT_ALLOC(pc->pc_partners, cfs_cpt_tab, pc->pc_cpt,
sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
if (pc->pc_partners == NULL) {
pc->pc_npartners = 0;
ptlrpcd_free(&ptlrpcd_rcv);
if (ptlrpcds_cpt_idx != NULL) {
- ncpts = cfs_cpt_number(cfs_cpt_table);
+ ncpts = cfs_cpt_number(cfs_cpt_tab);
OBD_FREE(ptlrpcds_cpt_idx, ncpts * sizeof(ptlrpcds_cpt_idx[0]));
ptlrpcds_cpt_idx = NULL;
}
/*
* Determine the CPTs that ptlrpcd threads will run on.
*/
- cptable = cfs_cpt_table;
+ cptable = cfs_cpt_tab;
ncpts = cfs_cpt_number(cptable);
if (ptlrpcd_cpts != NULL) {
struct cfs_expr_list *el;
#define HRT_STOPPING 1
struct ptlrpc_hr_service {
- /* CPU partition table, it's just cfs_cpt_table for now */
+ /* CPU partition table, it's just cfs_cpt_tab for now */
struct cfs_cpt_table *hr_cpt_table;
/** controller sleep waitq */
wait_queue_head_t hr_waitq;
cptable = cconf->cc_cptable;
if (cptable == NULL)
- cptable = cfs_cpt_table;
+ cptable = cfs_cpt_tab;
if (conf->psc_thr.tc_cpu_bind > 1) {
CERROR("%s: Invalid cpu bind value %d, only 1 or 0 allowed\n",
ENTRY;
memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
- ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
+ ptlrpc_hr.hr_cpt_table = cfs_cpt_tab;
ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
sizeof(*hrp));