4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
20 * Copyright (c) 2012, 2017, Intel Corporation.
23 * This file is part of Lustre, http://www.lustre.org/
24 * Lustre is a trademark of Sun Microsystems, Inc.
26 * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction
28 * Author: liang@whamcloud.com
31 #define DEBUG_SUBSYSTEM S_LNET
33 #include <linux/cpu.h>
34 #include <linux/sched.h>
35 #include <libcfs/libcfs_cpu.h>
36 #include <libcfs/libcfs.h>
38 /** Global CPU partition table */
39 struct cfs_cpt_table *cfs_cpt_tab __read_mostly;
40 EXPORT_SYMBOL(cfs_cpt_tab);
43 * modparam for setting number of partitions
45 * 0 : estimate best value based on cores or NUMA nodes
46 * 1 : disable multiple partitions
47 * >1 : specify number of partitions
49 static int cpu_npartitions;
50 module_param(cpu_npartitions, int, 0444);
51 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
54 * modparam for setting CPU partitions patterns:
56 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
57 * number in bracket is processor ID (core or HT)
59 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
60 * are NUMA node ID, number before bracket is CPU partition ID.
62 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
64 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
66 static char *cpu_pattern = "N";
67 module_param(cpu_pattern, charp, 0444);
68 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
71 struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt)
73 struct cfs_cpt_table *cptab;
76 LIBCFS_ALLOC(cptab, sizeof(*cptab));
80 cptab->ctb_nparts = ncpt;
82 if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS))
83 goto failed_alloc_cpumask;
85 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
86 if (!cptab->ctb_nodemask)
87 goto failed_alloc_nodemask;
89 CFS_ALLOC_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
90 if (!cptab->ctb_cpu2cpt)
91 goto failed_alloc_cpu2cpt;
93 memset(cptab->ctb_cpu2cpt, -1,
94 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
96 CFS_ALLOC_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
97 if (!cptab->ctb_node2cpt)
98 goto failed_alloc_node2cpt;
100 memset(cptab->ctb_node2cpt, -1,
101 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
103 CFS_ALLOC_PTR_ARRAY(cptab->ctb_parts, ncpt);
104 if (!cptab->ctb_parts)
105 goto failed_alloc_ctb_parts;
107 memset(cptab->ctb_parts, -1, ncpt * sizeof(cptab->ctb_parts[0]));
109 for (i = 0; i < ncpt; i++) {
110 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
112 if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS))
113 goto failed_setting_ctb_parts;
115 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
116 if (!part->cpt_nodemask)
117 goto failed_setting_ctb_parts;
119 CFS_ALLOC_PTR_ARRAY(part->cpt_distance, cptab->ctb_nparts);
120 if (!part->cpt_distance)
121 goto failed_setting_ctb_parts;
123 memset(part->cpt_distance, -1,
124 cptab->ctb_nparts * sizeof(part->cpt_distance[0]));
129 failed_setting_ctb_parts:
131 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
133 if (part->cpt_nodemask) {
134 LIBCFS_FREE(part->cpt_nodemask,
135 sizeof(*part->cpt_nodemask));
138 free_cpumask_var(part->cpt_cpumask);
140 if (part->cpt_distance) {
141 CFS_FREE_PTR_ARRAY(part->cpt_distance,
146 if (cptab->ctb_parts)
147 CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
149 failed_alloc_ctb_parts:
150 if (cptab->ctb_node2cpt)
151 CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
153 failed_alloc_node2cpt:
154 if (cptab->ctb_cpu2cpt)
155 CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
157 failed_alloc_cpu2cpt:
158 if (cptab->ctb_nodemask)
159 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
160 failed_alloc_nodemask:
161 free_cpumask_var(cptab->ctb_cpumask);
162 failed_alloc_cpumask:
163 LIBCFS_FREE(cptab, sizeof(*cptab));
166 EXPORT_SYMBOL(cfs_cpt_table_alloc);
168 void cfs_cpt_table_free(struct cfs_cpt_table *cptab)
172 if (cptab->ctb_cpu2cpt)
173 CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
175 if (cptab->ctb_node2cpt)
176 CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
178 for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
179 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
181 if (part->cpt_nodemask) {
182 LIBCFS_FREE(part->cpt_nodemask,
183 sizeof(*part->cpt_nodemask));
186 free_cpumask_var(part->cpt_cpumask);
188 if (part->cpt_distance)
189 CFS_FREE_PTR_ARRAY(part->cpt_distance,
193 if (cptab->ctb_parts)
194 CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
196 if (cptab->ctb_nodemask)
197 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
198 free_cpumask_var(cptab->ctb_cpumask);
200 LIBCFS_FREE(cptab, sizeof(*cptab));
202 EXPORT_SYMBOL(cfs_cpt_table_free);
204 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
211 for (i = 0; i < cptab->ctb_nparts; i++) {
215 rc = snprintf(tmp, len, "%d\t:", i);
222 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
223 rc = snprintf(tmp, len, " %d", j);
239 EXPORT_SYMBOL(cfs_cpt_table_print);
241 int cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len)
248 for (i = 0; i < cptab->ctb_nparts; i++) {
252 rc = snprintf(tmp, len, "%d\t:", i);
259 for (j = 0; j < cptab->ctb_nparts; j++) {
260 rc = snprintf(tmp, len, " %d:%d", j,
261 cptab->ctb_parts[i].cpt_distance[j]);
277 EXPORT_SYMBOL(cfs_cpt_distance_print);
279 int cfs_cpt_number(struct cfs_cpt_table *cptab)
281 return cptab->ctb_nparts;
283 EXPORT_SYMBOL(cfs_cpt_number);
285 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
287 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
289 return cpt == CFS_CPT_ANY ?
290 cpumask_weight(cptab->ctb_cpumask) :
291 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
293 EXPORT_SYMBOL(cfs_cpt_weight);
295 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
297 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
299 return cpt == CFS_CPT_ANY ?
300 cpumask_any_and(cptab->ctb_cpumask,
301 cpu_online_mask) < nr_cpu_ids :
302 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
303 cpu_online_mask) < nr_cpu_ids;
305 EXPORT_SYMBOL(cfs_cpt_online);
307 cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
309 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
311 return cpt == CFS_CPT_ANY ?
312 &cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask;
314 EXPORT_SYMBOL(cfs_cpt_cpumask);
316 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
318 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
320 return cpt == CFS_CPT_ANY ?
321 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
323 EXPORT_SYMBOL(cfs_cpt_nodemask);
325 unsigned int cfs_cpt_distance(struct cfs_cpt_table *cptab, int cpt1, int cpt2)
327 LASSERT(cpt1 == CFS_CPT_ANY || (cpt1 >= 0 && cpt1 < cptab->ctb_nparts));
328 LASSERT(cpt2 == CFS_CPT_ANY || (cpt2 >= 0 && cpt2 < cptab->ctb_nparts));
330 if (cpt1 == CFS_CPT_ANY || cpt2 == CFS_CPT_ANY)
331 return cptab->ctb_distance;
333 return cptab->ctb_parts[cpt1].cpt_distance[cpt2];
335 EXPORT_SYMBOL(cfs_cpt_distance);
338 * Calculate the maximum NUMA distance between all nodes in the
339 * from_mask and all nodes in the to_mask.
341 static unsigned int cfs_cpt_distance_calculate(nodemask_t *from_mask,
344 unsigned int maximum;
345 unsigned int distance;
350 for_each_node_mask(from, *from_mask) {
351 for_each_node_mask(to, *to_mask) {
352 distance = node_distance(from, to);
353 if (maximum < distance)
360 static void cfs_cpt_add_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
362 cptab->ctb_cpu2cpt[cpu] = cpt;
364 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
365 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
368 static void cfs_cpt_del_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
370 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
371 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
373 cptab->ctb_cpu2cpt[cpu] = -1;
376 static void cfs_cpt_add_node(struct cfs_cpt_table *cptab, int cpt, int node)
378 struct cfs_cpu_partition *part;
380 if (!node_isset(node, *cptab->ctb_nodemask)) {
383 /* first time node is added to the CPT table */
384 node_set(node, *cptab->ctb_nodemask);
385 cptab->ctb_node2cpt[node] = cpt;
387 dist = cfs_cpt_distance_calculate(cptab->ctb_nodemask,
388 cptab->ctb_nodemask);
389 cptab->ctb_distance = dist;
392 part = &cptab->ctb_parts[cpt];
393 if (!node_isset(node, *part->cpt_nodemask)) {
396 /* first time node is added to this CPT */
397 node_set(node, *part->cpt_nodemask);
398 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
399 struct cfs_cpu_partition *part2;
402 part2 = &cptab->ctb_parts[cpt2];
403 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
404 part2->cpt_nodemask);
405 part->cpt_distance[cpt2] = dist;
406 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
408 part2->cpt_distance[cpt] = dist;
413 static void cfs_cpt_del_node(struct cfs_cpt_table *cptab, int cpt, int node)
415 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
418 for_each_cpu(cpu, part->cpt_cpumask) {
419 /* this CPT has other CPU belonging to this node? */
420 if (cpu_to_node(cpu) == node)
424 if (cpu >= nr_cpu_ids && node_isset(node, *part->cpt_nodemask)) {
427 /* No more CPUs in the node for this CPT. */
428 node_clear(node, *part->cpt_nodemask);
429 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
430 struct cfs_cpu_partition *part2;
433 part2 = &cptab->ctb_parts[cpt2];
434 if (node_isset(node, *part2->cpt_nodemask))
435 cptab->ctb_node2cpt[node] = cpt2;
437 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
438 part2->cpt_nodemask);
439 part->cpt_distance[cpt2] = dist;
440 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
442 part2->cpt_distance[cpt] = dist;
446 for_each_cpu(cpu, cptab->ctb_cpumask) {
447 /* this CPT-table has other CPUs belonging to this node? */
448 if (cpu_to_node(cpu) == node)
452 if (cpu >= nr_cpu_ids && node_isset(node, *cptab->ctb_nodemask)) {
453 /* No more CPUs in the table for this node. */
454 node_clear(node, *cptab->ctb_nodemask);
455 cptab->ctb_node2cpt[node] = -1;
456 cptab->ctb_distance =
457 cfs_cpt_distance_calculate(cptab->ctb_nodemask,
458 cptab->ctb_nodemask);
462 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
464 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
466 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
467 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
471 if (cptab->ctb_cpu2cpt[cpu] != -1) {
472 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
473 cpu, cptab->ctb_cpu2cpt[cpu]);
477 if (cpumask_test_cpu(cpu, cptab->ctb_cpumask)) {
478 CDEBUG(D_INFO, "CPU %d is already in cpumask\n", cpu);
482 if (cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)) {
483 CDEBUG(D_INFO, "CPU %d is already in partition %d cpumask\n",
484 cpu, cptab->ctb_cpu2cpt[cpu]);
488 cfs_cpt_add_cpu(cptab, cpt, cpu);
489 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
493 EXPORT_SYMBOL(cfs_cpt_set_cpu);
495 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
497 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
499 if (cpu < 0 || cpu >= nr_cpu_ids) {
500 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
504 if (cpt == CFS_CPT_ANY) {
505 /* caller doesn't know the partition ID */
506 cpt = cptab->ctb_cpu2cpt[cpu];
507 if (cpt < 0) { /* not set in this CPT-table */
509 "Try to unset cpu %d which is not in CPT-table %p\n",
514 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
516 "CPU %d is not in CPU partition %d\n", cpu, cpt);
520 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
521 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
523 cfs_cpt_del_cpu(cptab, cpt, cpu);
524 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
526 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
528 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt,
529 const cpumask_t *mask)
533 if (!cpumask_weight(mask) ||
534 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
536 "No online CPU is found in the CPU mask for CPU partition %d\n",
541 for_each_cpu(cpu, mask) {
542 cfs_cpt_add_cpu(cptab, cpt, cpu);
543 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
548 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
550 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt,
551 const cpumask_t *mask)
555 for_each_cpu(cpu, mask) {
556 cfs_cpt_del_cpu(cptab, cpt, cpu);
557 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
560 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
562 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
564 const cpumask_t *mask;
567 if (node < 0 || node >= nr_node_ids) {
569 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
573 mask = cpumask_of_node(node);
575 for_each_cpu(cpu, mask)
576 cfs_cpt_add_cpu(cptab, cpt, cpu);
578 cfs_cpt_add_node(cptab, cpt, node);
582 EXPORT_SYMBOL(cfs_cpt_set_node);
584 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
586 const cpumask_t *mask;
589 if (node < 0 || node >= nr_node_ids) {
591 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
595 mask = cpumask_of_node(node);
597 for_each_cpu(cpu, mask)
598 cfs_cpt_del_cpu(cptab, cpt, cpu);
600 cfs_cpt_del_node(cptab, cpt, node);
602 EXPORT_SYMBOL(cfs_cpt_unset_node);
604 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt,
605 const nodemask_t *mask)
609 for_each_node_mask(node, *mask)
610 cfs_cpt_set_node(cptab, cpt, node);
614 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
616 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt,
617 const nodemask_t *mask)
621 for_each_node_mask(node, *mask)
622 cfs_cpt_unset_node(cptab, cpt, node);
624 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
626 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
633 /* convert CPU partition ID to HW node id */
635 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
636 mask = cptab->ctb_nodemask;
637 rotor = cptab->ctb_spread_rotor++;
639 mask = cptab->ctb_parts[cpt].cpt_nodemask;
640 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
641 node = cptab->ctb_parts[cpt].cpt_node;
644 weight = nodes_weight(*mask);
648 for_each_node_mask(node, *mask) {
656 EXPORT_SYMBOL(cfs_cpt_spread_node);
658 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
664 cpu = smp_processor_id();
665 cpt = cptab->ctb_cpu2cpt[cpu];
667 if (cpt < 0 && remap) {
668 /* don't return negative value for safety of upper layer,
669 * instead we shadow the unknown cpu to a valid partition ID
671 cpt = cpu % cptab->ctb_nparts;
676 EXPORT_SYMBOL(cfs_cpt_current);
678 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
680 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
682 return cptab->ctb_cpu2cpt[cpu];
684 EXPORT_SYMBOL(cfs_cpt_of_cpu);
686 int cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node)
688 if (node < 0 || node > nr_node_ids)
691 return cptab->ctb_node2cpt[node];
693 EXPORT_SYMBOL(cfs_cpt_of_node);
695 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
697 nodemask_t *nodemask;
702 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
704 if (cpt == CFS_CPT_ANY) {
705 cpumask = cptab->ctb_cpumask;
706 nodemask = cptab->ctb_nodemask;
708 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
709 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
712 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
714 "No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
719 for_each_online_cpu(cpu) {
720 if (cpumask_test_cpu(cpu, cpumask))
723 rc = set_cpus_allowed_ptr(current, cpumask);
724 set_mems_allowed(*nodemask);
726 schedule(); /* switch to allowed CPU */
731 /* don't need to set affinity because all online CPUs are covered */
734 EXPORT_SYMBOL(cfs_cpt_bind);
737 * Choose max to \a number CPUs from \a node and set them in \a cpt.
738 * We always prefer to choose CPU in the same core/socket.
740 static int cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
741 cpumask_t *node_mask, int number)
743 cpumask_var_t socket_mask;
744 cpumask_var_t core_mask;
751 if (number >= cpumask_weight(node_mask)) {
752 while (!cpumask_empty(node_mask)) {
753 cpu = cpumask_first(node_mask);
754 cpumask_clear_cpu(cpu, node_mask);
756 if (!cpu_online(cpu))
759 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
767 * Allocate scratch buffers
768 * As we cannot initialize a cpumask_var_t, we need
769 * to alloc both before we can risk trying to free either
771 if (!zalloc_cpumask_var(&socket_mask, GFP_NOFS))
773 if (!zalloc_cpumask_var(&core_mask, GFP_NOFS))
778 while (!cpumask_empty(node_mask)) {
779 cpu = cpumask_first(node_mask);
781 /* get cpumask for cores in the same socket */
782 cpumask_and(socket_mask, topology_core_cpumask(cpu), node_mask);
783 while (!cpumask_empty(socket_mask)) {
784 /* get cpumask for hts in the same core */
785 cpumask_and(core_mask, topology_sibling_cpumask(cpu),
788 for_each_cpu(i, core_mask) {
789 cpumask_clear_cpu(i, socket_mask);
790 cpumask_clear_cpu(i, node_mask);
795 rc = cfs_cpt_set_cpu(cptab, cpt, i);
804 cpu = cpumask_first(socket_mask);
809 free_cpumask_var(socket_mask);
810 free_cpumask_var(core_mask);
814 #define CPT_WEIGHT_MIN 4
816 static int cfs_cpt_num_estimate(void)
818 int nthr = cpumask_weight(topology_sibling_cpumask(smp_processor_id()));
819 int ncpu = num_online_cpus();
822 if (ncpu > CPT_WEIGHT_MIN)
823 for (ncpt = 2; ncpu > 2 * nthr * ncpt; ncpt++)
826 #if (BITS_PER_LONG == 32)
827 /* config many CPU partitions on 32-bit system could consume
833 ncpt--; /* worst case is 1 */
838 static struct cfs_cpt_table *cfs_cpt_table_create(int ncpt)
840 struct cfs_cpt_table *cptab = NULL;
841 cpumask_var_t node_mask;
848 num = cfs_cpt_num_estimate();
852 if (ncpt > num_online_cpus()) {
854 CERROR("libcfs: CPU partition count %d > cores %d: rc = %d\n",
855 ncpt, num_online_cpus(), rc);
859 if (ncpt > 4 * num) {
860 CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
864 cptab = cfs_cpt_table_alloc(ncpt);
866 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
871 if (!zalloc_cpumask_var(&node_mask, GFP_NOFS)) {
872 CERROR("Failed to allocate scratch cpumask\n");
877 num = num_online_cpus() / ncpt;
878 rem = num_online_cpus() % ncpt;
879 for_each_online_node(node) {
880 cpumask_copy(node_mask, cpumask_of_node(node));
882 while (cpt < ncpt && !cpumask_empty(node_mask)) {
883 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
884 int ncpu = cpumask_weight(part->cpt_cpumask);
886 rc = cfs_cpt_choose_ncpus(cptab, cpt, node_mask,
887 (rem > 0) + num - ncpu);
893 ncpu = cpumask_weight(part->cpt_cpumask);
894 if (ncpu == num + !!(rem > 0)) {
901 free_cpumask_var(node_mask);
906 free_cpumask_var(node_mask);
908 CERROR("Failed (rc = %d) to setup CPU partition table with %d partitions, online HW NUMA nodes: %d, HW CPU cores: %d.\n",
909 rc, ncpt, num_online_nodes(), num_online_cpus());
912 cfs_cpt_table_free(cptab);
917 static struct cfs_cpt_table *cfs_cpt_table_create_pattern(const char *pattern)
919 struct cfs_cpt_table *cptab;
931 pattern_dup = kstrdup(pattern, GFP_KERNEL);
933 CERROR("Failed to duplicate pattern '%s'\n", pattern);
934 return ERR_PTR(-ENOMEM);
937 str = strim(pattern_dup);
938 if (*str == 'n' || *str == 'N') {
939 str++; /* skip 'N' char */
940 node = 1; /* NUMA pattern */
943 for_each_online_node(i) {
944 if (!cpumask_empty(cpumask_of_node(i)))
947 if (ncpt == 1) { /* single NUMA node */
949 return cfs_cpt_table_create(cpu_npartitions);
954 if (!ncpt) { /* scanning bracket which is mark of partition */
956 while ((bracket = strchr(bracket, '['))) {
963 (node && ncpt > num_online_nodes()) ||
964 (!node && ncpt > num_online_cpus())) {
965 CERROR("Invalid pattern '%s', or too many partitions %d\n",
971 cptab = cfs_cpt_table_alloc(ncpt);
973 CERROR("Failed to allocate CPU partition table\n");
978 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
979 for_each_online_node(i) {
980 if (cpumask_empty(cpumask_of_node(i)))
983 rc = cfs_cpt_set_node(cptab, cpt++, i);
993 high = node ? nr_node_ids - 1 : nr_cpu_ids - 1;
995 for (str = strim(str), c = 0; /* until break */; c++) {
996 struct cfs_range_expr *range;
997 struct cfs_expr_list *el;
1000 bracket = strchr(str, '[');
1003 CERROR("Invalid pattern '%s'\n", str);
1005 goto err_free_table;
1006 } else if (c != ncpt) {
1007 CERROR("Expect %d partitions but found %d\n",
1010 goto err_free_table;
1015 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
1016 CERROR("Invalid CPU pattern '%s'\n", str);
1018 goto err_free_table;
1021 if (cpt < 0 || cpt >= ncpt) {
1022 CERROR("Invalid partition id %d, total partitions %d\n",
1025 goto err_free_table;
1028 if (cfs_cpt_weight(cptab, cpt)) {
1029 CERROR("Partition %d has already been set.\n", cpt);
1031 goto err_free_table;
1034 str = strim(str + n);
1035 if (str != bracket) {
1036 CERROR("Invalid pattern '%s'\n", str);
1038 goto err_free_table;
1041 bracket = strchr(str, ']');
1043 CERROR("Missing right bracket for partition %d in '%s'\n",
1046 goto err_free_table;
1049 rc = cfs_expr_list_parse(str, (bracket - str) + 1, 0, high,
1052 CERROR("Can't parse number range in '%s'\n", str);
1054 goto err_free_table;
1057 list_for_each_entry(range, &el->el_exprs, re_link) {
1058 for (i = range->re_lo; i <= range->re_hi; i++) {
1059 if ((i - range->re_lo) % range->re_stride)
1062 rc = node ? cfs_cpt_set_node(cptab, cpt, i)
1063 : cfs_cpt_set_cpu(cptab, cpt, i);
1065 cfs_expr_list_free(el);
1067 goto err_free_table;
1072 cfs_expr_list_free(el);
1074 if (!cfs_cpt_online(cptab, cpt)) {
1075 CERROR("No online CPU is found on partition %d\n", cpt);
1077 goto err_free_table;
1080 str = strim(bracket + 1);
1087 cfs_cpt_table_free(cptab);
1093 #ifdef CONFIG_HOTPLUG_CPU
1094 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1095 static enum cpuhp_state lustre_cpu_online;
1097 static int cfs_cpu_online(unsigned int cpu)
1103 static int cfs_cpu_dead(unsigned int cpu)
1107 /* if all HTs in a core are offline, it may break affinity */
1108 warn = cpumask_any_and(topology_sibling_cpumask(cpu),
1109 cpu_online_mask) >= nr_cpu_ids;
1110 CDEBUG(warn ? D_WARNING : D_INFO,
1111 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
1116 #ifndef HAVE_HOTPLUG_STATE_MACHINE
1117 static int cfs_cpu_notify(struct notifier_block *self, unsigned long action,
1120 int cpu = (unsigned long)hcpu;
1124 case CPU_DEAD_FROZEN:
1126 case CPU_ONLINE_FROZEN:
1128 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1129 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1140 static struct notifier_block cfs_cpu_notifier = {
1141 .notifier_call = cfs_cpu_notify,
1144 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1145 #endif /* CONFIG_HOTPLUG_CPU */
1147 void cfs_cpu_fini(void)
1149 if (!IS_ERR_OR_NULL(cfs_cpt_tab))
1150 cfs_cpt_table_free(cfs_cpt_tab);
1152 #ifdef CONFIG_HOTPLUG_CPU
1153 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1154 if (lustre_cpu_online > 0)
1155 cpuhp_remove_state_nocalls(lustre_cpu_online);
1156 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1158 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1159 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1160 #endif /* CONFIG_HOTPLUG_CPU */
1163 int cfs_cpu_init(void)
1167 LASSERT(!cfs_cpt_tab);
1169 #ifdef CONFIG_HOTPLUG_CPU
1170 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1171 ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
1172 "fs/lustre/cfe:dead", NULL,
1175 goto failed_cpu_dead;
1177 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1178 "fs/lustre/cfe:online",
1179 cfs_cpu_online, NULL);
1181 goto failed_cpu_online;
1183 lustre_cpu_online = ret;
1185 register_hotcpu_notifier(&cfs_cpu_notifier);
1186 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1187 #endif /* CONFIG_HOTPLUG_CPU */
1191 cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern);
1192 if (IS_ERR(cfs_cpt_tab)) {
1193 CERROR("Failed to create cptab from pattern '%s'\n",
1195 ret = PTR_ERR(cfs_cpt_tab);
1196 goto failed_alloc_table;
1200 cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions);
1201 if (IS_ERR(cfs_cpt_tab)) {
1202 CERROR("Failed to create cptab with npartitions %d\n",
1204 ret = PTR_ERR(cfs_cpt_tab);
1205 goto failed_alloc_table;
1211 LCONSOLE(0, "HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
1212 num_online_nodes(), num_online_cpus(),
1213 cfs_cpt_number(cfs_cpt_tab));
1219 if (!IS_ERR_OR_NULL(cfs_cpt_tab))
1220 cfs_cpt_table_free(cfs_cpt_tab);
1222 #ifdef CONFIG_HOTPLUG_CPU
1223 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1224 if (lustre_cpu_online > 0)
1225 cpuhp_remove_state_nocalls(lustre_cpu_online);
1227 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1230 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1231 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1232 #endif /* CONFIG_HOTPLUG_CPU */
1236 #else /* ! CONFIG_SMP */
1238 struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt)
1240 struct cfs_cpt_table *cptab;
1243 CERROR("Can't support cpu partition number %d\n", ncpt);
1247 LIBCFS_ALLOC(cptab, sizeof(*cptab));
1251 cpumask_set_cpu(0, cptab->ctb_cpumask);
1252 node_set(0, cptab->ctb_nodemask);
1256 EXPORT_SYMBOL(cfs_cpt_table_alloc);
1258 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
1262 rc = snprintf(buf, len, "0\t: 0\n");
1269 EXPORT_SYMBOL(cfs_cpt_table_print);
1271 int cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len)
1275 rc = snprintf(buf, len, "0\t: 0:1\n");
1282 EXPORT_SYMBOL(cfs_cpt_distance_print);
1284 void cfs_cpu_fini(void)
1286 if (cfs_cpt_table) {
1287 cfs_cpt_table_free(cfs_cpt_table);
1288 cfs_cpt_table = NULL;
1292 int cfs_cpu_init(void)
1294 cfs_cpt_table = cfs_cpt_table_alloc(1);
1296 return cfs_cpt_table ? 0 : -1;
1299 #endif /* !CONFIG_SMP */