* . NUMA allocators, CPU affinity threads are built over CPU partitions,
* instead of HW CPUs or HW nodes.
*
- * . By default, Lustre modules should refer to the global cfs_cpt_table,
+ * . By default, Lustre modules should refer to the global cfs_cpt_tab,
* instead of accessing HW CPUs directly, so concurrency of Lustre can be
- * configured by cpu_npartitions of the global cfs_cpt_table
+ * configured by cpu_npartitions of the global cfs_cpt_tab
*
* . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
* same way as 2.2 or earlier versions
/** virtual processing unit */
struct cfs_cpu_partition {
/* CPUs mask for this partition */
- cpumask_t *cpt_cpumask;
+ cpumask_var_t cpt_cpumask;
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* NUMA distance between CPTs */
unsigned int *cpt_distance;
/* spread rotor for NUMA allocator */
- int cpt_spread_rotor;
+ unsigned int cpt_spread_rotor;
/* NUMA node if cpt_nodemask is empty */
int cpt_node;
};
struct cfs_cpt_table {
#ifdef CONFIG_SMP
/* spread rotor for NUMA allocator */
- int ctb_spread_rotor;
+ unsigned int ctb_spread_rotor;
/* maximum NUMA distance between all nodes in table */
unsigned int ctb_distance;
/* partitions tables */
nodemask_t ctb_nodemask;
#endif /* CONFIG_SMP */
/* all cpus in this partition table */
- cpumask_t *ctb_cpumask;
+ cpumask_var_t ctb_cpumask;
};
/* any CPU partition */
#define CFS_CPT_ANY (-1)
-extern struct cfs_cpt_table *cfs_cpt_table;
+extern struct cfs_cpt_table *cfs_cpt_tab;
/**
* destroy a CPU partition table
/**
* return cpumask of CPU partition \a cpt
*/
-cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
+cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
/**
* return nodemask of CPU partition \a cpt
*/