4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2012, 2013, Intel Corporation.
28 * This file is part of Lustre, http://www.lustre.org/
29 * Lustre is a trademark of Sun Microsystems, Inc.
31 * libcfs/include/libcfs/libcfs_cpu.h
34 * . CPU partition is virtual processing unit
36 * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
37 * in other words, CPU partition is a processors pool.
39 * CPU Partition Table (CPT)
40 * . a set of CPU partitions
42 * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
44 * . User can specify total number of CPU partitions while creating a
45 * CPT, ID of CPU partition is always start from 0.
47 * Example: if there are 8 cores on the system, while creating a CPT
48 * with cpu_npartitions=4:
49 * core[0, 1] = partition[0], core[2, 3] = partition[1]
50 * core[4, 5] = partition[2], core[6, 7] = partition[3]
53 * core[0, 1, ... 7] = partition[0]
55 * . User can also specify CPU partitions by string pattern
57 * Examples: cpu_partitions="0[0,1], 1[2,3]"
58 * cpu_partitions="N 0[0-3], 1[4-8]"
60 * The first character "N" means following numbers are numa ID
62 * . NUMA allocators, CPU affinity threads are built over CPU partitions,
63 * instead of HW CPUs or HW nodes.
65 * . By default, Lustre modules should refer to the global cfs_cpt_table,
66 * instead of accessing HW CPUs directly, so concurrency of Lustre can be
67 * configured by cpu_npartitions of the global cfs_cpt_table
69 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
70 * same way as 2.2 or earlier versions
72 * Author: liang@whamcloud.com
75 #ifndef __LIBCFS_CPU_H__
76 #define __LIBCFS_CPU_H__
78 #ifndef HAVE_LIBCFS_CPT
81 typedef unsigned long cpumask_t;
82 typedef unsigned long nodemask_t;
85 struct cfs_cpt_table {
86 /* # of CPU partitions */
91 nodemask_t ctb_nodemask;
96 #endif /* !HAVE_LIBCFS_CPT */
98 /* any CPU partition */
99 #define CFS_CPT_ANY (-1)
101 extern struct cfs_cpt_table *cfs_cpt_table;
104 * destroy a CPU partition table
106 void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
108 * create a cfs_cpt_table with \a ncpt number of partitions
110 struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
112 * print string information of cpt-table
114 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
116 * return total number of CPU partitions in \a cptab
119 cfs_cpt_number(struct cfs_cpt_table *cptab);
121 * return number of HW cores or hypter-threadings in a CPU partition \a cpt
123 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
125 * is there any online CPU in CPU partition \a cpt
127 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
129 * return cpumask of CPU partition \a cpt
131 cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
133 * return nodemask of CPU partition \a cpt
135 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
137 * shadow current HW processor ID to CPU-partition ID of \a cptab
139 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
141 * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
143 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
145 * bind current thread on a CPU-partition \a cpt of \a cptab
147 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
149 * add \a cpu to CPU partion @cpt of \a cptab, return 1 for success,
150 * otherwise 0 is returned
152 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
154 * remove \a cpu from CPU partition \a cpt of \a cptab
156 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
158 * add all cpus in \a mask to CPU partition \a cpt
159 * return 1 if successfully set all CPUs, otherwise return 0
161 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
162 int cpt, cpumask_t *mask);
164 * remove all cpus in \a mask from CPU partition \a cpt
166 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
167 int cpt, cpumask_t *mask);
169 * add all cpus in NUMA node \a node to CPU partition \a cpt
170 * return 1 if successfully set all CPUs, otherwise return 0
172 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
174 * remove all cpus in NUMA node \a node from CPU partition \a cpt
176 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
179 * add all cpus in node mask \a mask to CPU partition \a cpt
180 * return 1 if successfully set all CPUs, otherwise return 0
182 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
183 int cpt, nodemask_t *mask);
185 * remove all cpus in node mask \a mask from CPU partition \a cpt
187 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
188 int cpt, nodemask_t *mask);
190 * unset all cpus for CPU partition \a cpt
192 void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
194 * convert partition id \a cpt to numa node id, if there are more than one
195 * nodes in this partition, it might return a different node id each time.
197 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
200 * return number of HTs in the same core of \a cpu
202 int cfs_cpu_ht_nsiblings(int cpu);
205 * iterate over all CPU partitions in \a cptab
207 #define cfs_cpt_for_each(i, cptab) \
208 for (i = 0; i < cfs_cpt_number(cptab); i++)
210 #ifndef __read_mostly
211 # define __read_mostly
214 #ifndef ____cacheline_aligned
215 #define ____cacheline_aligned
218 int cfs_cpu_init(void);
219 void cfs_cpu_fini(void);
221 #endif /* __LIBCFS_CPU_H__ */