4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
21 * Copyright (c) 2012, 2017, Intel Corporation.
24 * This file is part of Lustre, http://www.lustre.org/
26 * libcfs/include/libcfs/libcfs_cpu.h
29 * . CPU partition is virtual processing unit
31 * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
32 * in other words, CPU partition is a processors pool.
34 * CPU Partition Table (CPT)
35 * . a set of CPU partitions
37 * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
39 * . User can specify total number of CPU partitions while creating a
40 * CPT, ID of CPU partition is always start from 0.
42 * Example: if there are 8 cores on the system, while creating a CPT
43 * with cpu_npartitions=4:
44 * core[0, 1] = partition[0], core[2, 3] = partition[1]
45 * core[4, 5] = partition[2], core[6, 7] = partition[3]
48 * core[0, 1, ... 7] = partition[0]
50 * . User can also specify CPU partitions by string pattern
52 * Examples: cpu_partitions="0[0,1], 1[2,3]"
53 * cpu_partitions="N 0[0-3], 1[4-8]"
55 * The first character "N" means following numbers are numa ID
57 * . NUMA allocators, CPU affinity threads are built over CPU partitions,
58 * instead of HW CPUs or HW nodes.
60 * . By default, Lustre modules should refer to the global cfs_cpt_tab,
61 * instead of accessing HW CPUs directly, so concurrency of Lustre can be
62 * configured by cpu_npartitions of the global cfs_cpt_tab
64 * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
65 * same way as 2.2 or earlier versions
67 * Author: liang@whamcloud.com
70 #ifndef __LIBCFS_CPU_H__
71 #define __LIBCFS_CPU_H__
73 #include <linux/cpu.h>
74 #include <linux/cpuset.h>
75 #include <linux/slab.h>
76 #include <linux/topology.h>
77 #include <linux/version.h>
78 #include <linux/vmalloc.h>
80 #include <libcfs/linux/linux-cpu.h>
82 /* any CPU partition */
83 #define CFS_CPT_ANY (-1)
88 extern struct cfs_cpt_table *cfs_cpt_tab;
91 * destroy a CPU partition table
93 void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
95 * create a cfs_cpt_table with \a ncpt number of partitions
97 struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt);
99 * print string information of cpt-table
101 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
103 * print distance information of cpt-table
105 int cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len);
107 * return total number of CPU partitions in \a cptab
109 int cfs_cpt_number(struct cfs_cpt_table *cptab);
111 * return number of HW cores or hyper-threadings in a CPU partition \a cpt
113 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
115 * is there any online CPU in CPU partition \a cpt
117 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
119 * return cpumask of CPU partition \a cpt
121 cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
123 * return nodemask of CPU partition \a cpt
125 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
127 * shadow current HW processor ID to CPU-partition ID of \a cptab
129 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
131 * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
133 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
135 * shadow HW node ID \a NODE to CPU-partition ID by \a cptab
137 int cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node);
139 * NUMA distance between \a cpt1 and \a cpt2 in \a cptab
141 unsigned int cfs_cpt_distance(struct cfs_cpt_table *cptab, int cpt1, int cpt2);
143 * bind current thread on a CPU-partition \a cpt of \a cptab
145 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
147 * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
148 * otherwise 0 is returned
150 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
152 * remove \a cpu from CPU partition \a cpt of \a cptab
154 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
156 * add all cpus in \a mask to CPU partition \a cpt
157 * return 1 if successfully set all CPUs, otherwise return 0
159 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt,
160 const cpumask_t *mask);
162 * remove all cpus in \a mask from CPU partition \a cpt
164 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt,
165 const cpumask_t *mask);
167 * add all cpus in NUMA node \a node to CPU partition \a cpt
168 * return 1 if successfully set all CPUs, otherwise return 0
170 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
172 * remove all cpus in NUMA node \a node from CPU partition \a cpt
174 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
176 * add all cpus in node mask \a mask to CPU partition \a cpt
177 * return 1 if successfully set all CPUs, otherwise return 0
179 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt,
180 const nodemask_t *mask);
182 * remove all cpus in node mask \a mask from CPU partition \a cpt
184 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt,
185 const nodemask_t *mask);
187 * convert partition id \a cpt to numa node id, if there are more than one
188 * nodes in this partition, it might return a different node id each time.
190 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
192 int cfs_cpu_init(void);
193 void cfs_cpu_fini(void);
195 #else /* !CONFIG_SMP */
197 #define cfs_cpt_tab ((struct cfs_cpt_table *)NULL)
199 static inline void cfs_cpt_table_free(struct cfs_cpt_table *cptab)
203 static inline struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt)
208 static inline int cfs_cpt_table_print(struct cfs_cpt_table *cptab,
213 rc = snprintf(buf, len, "0\t: 0\n");
221 static inline int cfs_cpt_distance_print(struct cfs_cpt_table *cptab,
226 rc = snprintf(buf, len, "0\t: 0:1\n");
234 static inline cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab,
237 return (cpumask_var_t *) cpu_online_mask;
240 static inline int cfs_cpt_number(struct cfs_cpt_table *cptab)
245 static inline int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
250 static inline nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab,
253 return &node_online_map;
256 static inline unsigned int cfs_cpt_distance(struct cfs_cpt_table *cptab,
262 static inline int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt,
268 static inline int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
273 static inline int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
278 static inline int cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node)
283 static inline int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
288 static inline int cfs_cpu_init(void)
293 static inline void cfs_cpu_fini(void)
297 #endif /* CONFIG_SMP */
300 struct workqueue_struct *cfs_cpt_bind_workqueue(const char *wq_name,
301 struct cfs_cpt_table *tbl,
302 int flags, int cpt, int nthrs)
304 cpumask_var_t *mask = cfs_cpt_cpumask(tbl, cpt);
305 struct workqueue_attrs attrs = { };
306 struct workqueue_struct *wq;
308 wq = alloc_workqueue(wq_name, WQ_UNBOUND | flags, nthrs);
310 return ERR_PTR(-ENOMEM);
312 if (mask && alloc_cpumask_var(&attrs.cpumask, GFP_KERNEL)) {
313 cpumask_copy(attrs.cpumask, *mask);
315 cfs_apply_workqueue_attrs(wq, &attrs);
317 free_cpumask_var(attrs.cpumask);
324 * allocate per-cpu-partition data, returned value is an array of pointers,
325 * variable can be indexed by CPU ID.
326 * cptab != NULL: size of array is number of CPU partitions
327 * cptab == NULL: size of array is number of HW cores
329 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
331 * destroy per-cpu-partition variable
333 void cfs_percpt_free(void *vars);
334 int cfs_percpt_number(void *vars);
336 #define cfs_percpt_for_each(var, i, vars) \
337 for (i = 0; i < cfs_percpt_number(vars) && \
338 ((var) = (vars)[i]) != NULL; i++)
341 * percpu partition lock
343 * There are some use-cases like this in Lustre:
344 * . each CPU partition has it's own private data which is frequently changed,
345 * and mostly by the local CPU partition.
346 * . all CPU partitions share some global data, these data are rarely changed.
348 * LNet is typical example.
349 * CPU partition lock is designed for this kind of use-cases:
350 * . each CPU partition has it's own private lock
351 * . change on private data just needs to take the private lock
352 * . read on shared data just needs to take _any_ of private locks
353 * . change on shared data needs to take _all_ private locks,
354 * which is slow and should be really rare.
357 CFS_PERCPT_LOCK_EX = -1, /* negative */
360 struct cfs_percpt_lock {
361 /* cpu-partition-table for this lock */
362 struct cfs_cpt_table *pcl_cptab;
363 /* exclusively locked */
364 unsigned int pcl_locked;
365 /* private lock table */
366 spinlock_t **pcl_locks;
369 /* return number of private locks */
370 #define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
373 * create a cpu-partition lock based on CPU partition table \a cptab,
374 * each private lock has extra \a psize bytes padding data
376 struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
377 struct lock_class_key *keys);
378 /* destroy a cpu-partition lock */
379 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
381 /* lock private lock \a index of \a pcl */
382 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
384 /* unlock private lock \a index of \a pcl */
385 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
387 #define CFS_PERCPT_LOCK_KEYS 256
389 /* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
390 #define cfs_percpt_lock_alloc(cptab) \
392 static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
393 struct cfs_percpt_lock *___lk; \
395 if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
396 ___lk = cfs_percpt_lock_create(cptab, NULL); \
398 ___lk = cfs_percpt_lock_create(cptab, ___keys); \
403 * allocate \a nr_bytes of physical memory from a contiguous region with the
404 * properties of \a flags which are bound to the partition id \a cpt. This
405 * function should only be used for the case when only a few pages of memory
409 cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes,
412 return kmalloc_node(nr_bytes, flags,
413 cfs_cpt_spread_node(cptab, cpt));
417 * allocate \a nr_bytes of virtually contiguous memory that is bound to the
418 * partition id \a cpt.
421 cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
423 /* vzalloc_node() sets __GFP_FS by default but no current Kernel
424 * exported entry-point allows for both a NUMA node specification
425 * and a custom allocation flags mask. This may be an issue since
426 * __GFP_FS usage can cause some deadlock situations in our code,
427 * like when memory reclaim started, within the same context of a
428 * thread doing FS operations, that can also attempt conflicting FS
431 return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
435 * allocate a single page of memory with the properties of \a flags were
436 * that page is bound to the partition id \a cpt.
438 static inline struct page *
439 cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
441 return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
445 * allocate a chunck of memory from a memory pool that is bound to the
446 * partition id \a cpt with the properites of \a flags.
449 cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
450 int cpt, gfp_t flags)
452 return kmem_cache_alloc_node(cachep, flags,
453 cfs_cpt_spread_node(cptab, cpt));
457 * iterate over all CPU partitions in \a cptab
459 #define cfs_cpt_for_each(i, cptab) \
460 for (i = 0; i < cfs_cpt_number(cptab); i++)
462 #endif /* __LIBCFS_CPU_H__ */