4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/include/libcfs/libcfs_cpu.h
35 * . CPU partition is virtual processing unit
37 * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
38 * in other words, CPU partition is a processors pool.
40 * CPU Partition Table (CPT)
41 * . a set of CPU partitions
43 * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
45 * . User can specify total number of CPU partitions while creating a
46 * CPT, ID of CPU partition is always start from 0.
48 * Example: if there are 8 cores on the system, while creating a CPT
49 * with cpu_npartitions=4:
50 * core[0, 1] = partition[0], core[2, 3] = partition[1]
51 * core[4, 5] = partition[2], core[6, 7] = partition[3]
54 * core[0, 1, ... 7] = partition[0]
56 * . User can also specify CPU partitions by string pattern
58 * Examples: cpu_partitions="0[0,1], 1[2,3]"
59 * cpu_partitions="N 0[0-3], 1[4-8]"
61 * The first character "N" means following numbers are numa ID
63 * . NUMA allocators, CPU affinity threads are built over CPU partitions,
64 * instead of HW CPUs or HW nodes.
66 * . By default, Lustre modules should refer to the global cfs_cpt_table,
67 * instead of accessing HW CPUs directly, so concurrency of Lustre can be
68 * configured by cpu_npartitions of the global cfs_cpt_table
70 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
71 * same way as 2.2 or earlier versions
73 * Author: liang@whamcloud.com
76 #ifndef __LIBCFS_CPU_H__
77 #define __LIBCFS_CPU_H__
79 #ifndef HAVE_LIBCFS_CPT
82 typedef struct nodemask { DECLARE_BITMAP(bits, 1); } nodemask_t;
83 typedef struct cpumask { DECLARE_BITMAP(bits, 1); } cpumask_t;
85 #define node_set(node, dst) __node_set((node), &(dst))
86 static __always_inline void __node_set(int node, nodemask_t *dstp)
88 set_bit(node, dstp->bits);
90 #endif /* __KERNEL__ */
92 struct cfs_cpt_table {
93 /* # of CPU partitions */
98 nodemask_t ctb_nodemask;
103 #endif /* !HAVE_LIBCFS_CPT */
105 /* any CPU partition */
106 #define CFS_CPT_ANY (-1)
108 extern struct cfs_cpt_table *cfs_cpt_table;
111 * destroy a CPU partition table
113 void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
115 * create a cfs_cpt_table with \a ncpt number of partitions
117 struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
119 * print string information of cpt-table
121 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
123 * return total number of CPU partitions in \a cptab
126 cfs_cpt_number(struct cfs_cpt_table *cptab);
128 * return number of HW cores or hypter-threadings in a CPU partition \a cpt
130 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
132 * is there any online CPU in CPU partition \a cpt
134 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
136 * return cpumask of CPU partition \a cpt
138 cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
140 * return nodemask of CPU partition \a cpt
142 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
144 * shadow current HW processor ID to CPU-partition ID of \a cptab
146 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
148 * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
150 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
152 * bind current thread on a CPU-partition \a cpt of \a cptab
154 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
156 * add \a cpu to CPU partion @cpt of \a cptab, return 1 for success,
157 * otherwise 0 is returned
159 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
161 * remove \a cpu from CPU partition \a cpt of \a cptab
163 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
165 * add all cpus in \a mask to CPU partition \a cpt
166 * return 1 if successfully set all CPUs, otherwise return 0
168 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
169 int cpt, cpumask_t *mask);
171 * remove all cpus in \a mask from CPU partition \a cpt
173 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
174 int cpt, cpumask_t *mask);
176 * add all cpus in NUMA node \a node to CPU partition \a cpt
177 * return 1 if successfully set all CPUs, otherwise return 0
179 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
181 * remove all cpus in NUMA node \a node from CPU partition \a cpt
183 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
186 * add all cpus in node mask \a mask to CPU partition \a cpt
187 * return 1 if successfully set all CPUs, otherwise return 0
189 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
190 int cpt, nodemask_t *mask);
192 * remove all cpus in node mask \a mask from CPU partition \a cpt
194 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
195 int cpt, nodemask_t *mask);
197 * unset all cpus for CPU partition \a cpt
199 void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
201 * convert partition id \a cpt to numa node id, if there are more than one
202 * nodes in this partition, it might return a different node id each time.
204 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
207 * return number of HTs in the same core of \a cpu
209 int cfs_cpu_ht_nsiblings(int cpu);
212 * allocate \a nr_bytes of physical memory from a contiguous region with the
213 * properties of \a flags which are bound to the partition id \a cpt. This
214 * function should only be used for the case when only a few pages of memory
218 cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes,
221 return kmalloc_node(nr_bytes, flags,
222 cfs_cpt_spread_node(cptab, cpt));
226 * allocate \a nr_bytes of virtually contiguous memory that is bound to the
227 * partition id \a cpt.
230 cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
232 /* vzalloc_node() sets __GFP_FS by default but no current Kernel
233 * exported entry-point allows for both a NUMA node specification
234 * and a custom allocation flags mask. This may be an issue since
235 * __GFP_FS usage can cause some deadlock situations in our code,
236 * like when memory reclaim started, within the same context of a
237 * thread doing FS operations, that can also attempt conflicting FS
240 return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
244 * allocate a single page of memory with the properties of \a flags were
245 * that page is bound to the partition id \a cpt.
247 static inline struct page *
248 cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
250 return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
254 * allocate a chunck of memory from a memory pool that is bound to the
255 * partition id \a cpt with the properites of \a flags.
258 cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
259 int cpt, gfp_t flags)
261 return kmem_cache_alloc_node(cachep, flags,
262 cfs_cpt_spread_node(cptab, cpt));
266 * iterate over all CPU partitions in \a cptab
268 #define cfs_cpt_for_each(i, cptab) \
269 for (i = 0; i < cfs_cpt_number(cptab); i++)
271 #ifndef __read_mostly
272 # define __read_mostly
275 #ifndef ____cacheline_aligned
276 #define ____cacheline_aligned
279 int cfs_cpu_init(void);
280 void cfs_cpu_fini(void);
282 #endif /* __LIBCFS_CPU_H__ */