4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
20 * Copyright (c) 2012, 2017, Intel Corporation.
23 * This file is part of Lustre, http://www.lustre.org/
24 * Lustre is a trademark of Sun Microsystems, Inc.
26 * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction
28 * Author: liang@whamcloud.com
31 #define DEBUG_SUBSYSTEM S_LNET
33 #include <linux/cpu.h>
34 #include <linux/sched.h>
35 #include <libcfs/libcfs_cpu.h>
36 #include <libcfs/libcfs.h>
38 /** Global CPU partition table */
39 struct cfs_cpt_table *cfs_cpt_tab __read_mostly;
40 EXPORT_SYMBOL(cfs_cpt_tab);
43 * modparam for setting number of partitions
45 * 0 : estimate best value based on cores or NUMA nodes
46 * 1 : disable multiple partitions
47 * >1 : specify number of partitions
49 static int cpu_npartitions;
50 module_param(cpu_npartitions, int, 0444);
51 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
54 * modparam for setting CPU partitions patterns:
56 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
57 * number in bracket is processor ID (core or HT)
59 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
60 * are NUMA node ID, number before bracket is CPU partition ID.
62 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
64 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
66 static char *cpu_pattern = "N";
67 module_param(cpu_pattern, charp, 0444);
68 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
70 struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt)
72 struct cfs_cpt_table *cptab;
75 LIBCFS_ALLOC(cptab, sizeof(*cptab));
79 cptab->ctb_nparts = ncpt;
81 if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS))
82 goto failed_alloc_cpumask;
84 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
85 if (!cptab->ctb_nodemask)
86 goto failed_alloc_nodemask;
88 CFS_ALLOC_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
89 if (!cptab->ctb_cpu2cpt)
90 goto failed_alloc_cpu2cpt;
92 memset(cptab->ctb_cpu2cpt, -1,
93 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
95 CFS_ALLOC_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
96 if (!cptab->ctb_node2cpt)
97 goto failed_alloc_node2cpt;
99 memset(cptab->ctb_node2cpt, -1,
100 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
102 CFS_ALLOC_PTR_ARRAY(cptab->ctb_parts, ncpt);
103 if (!cptab->ctb_parts)
104 goto failed_alloc_ctb_parts;
106 memset(cptab->ctb_parts, -1, ncpt * sizeof(cptab->ctb_parts[0]));
108 for (i = 0; i < ncpt; i++) {
109 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
111 if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS))
112 goto failed_setting_ctb_parts;
114 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
115 if (!part->cpt_nodemask)
116 goto failed_setting_ctb_parts;
118 CFS_ALLOC_PTR_ARRAY(part->cpt_distance, cptab->ctb_nparts);
119 if (!part->cpt_distance)
120 goto failed_setting_ctb_parts;
122 memset(part->cpt_distance, -1,
123 cptab->ctb_nparts * sizeof(part->cpt_distance[0]));
128 failed_setting_ctb_parts:
130 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
132 if (part->cpt_nodemask) {
133 LIBCFS_FREE(part->cpt_nodemask,
134 sizeof(*part->cpt_nodemask));
137 free_cpumask_var(part->cpt_cpumask);
139 if (part->cpt_distance) {
140 CFS_FREE_PTR_ARRAY(part->cpt_distance,
145 if (cptab->ctb_parts)
146 CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
148 failed_alloc_ctb_parts:
149 if (cptab->ctb_node2cpt)
150 CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
152 failed_alloc_node2cpt:
153 if (cptab->ctb_cpu2cpt)
154 CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
156 failed_alloc_cpu2cpt:
157 if (cptab->ctb_nodemask)
158 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
159 failed_alloc_nodemask:
160 free_cpumask_var(cptab->ctb_cpumask);
161 failed_alloc_cpumask:
162 LIBCFS_FREE(cptab, sizeof(*cptab));
165 EXPORT_SYMBOL(cfs_cpt_table_alloc);
167 void cfs_cpt_table_free(struct cfs_cpt_table *cptab)
171 if (cptab->ctb_cpu2cpt)
172 CFS_FREE_PTR_ARRAY(cptab->ctb_cpu2cpt, nr_cpu_ids);
174 if (cptab->ctb_node2cpt)
175 CFS_FREE_PTR_ARRAY(cptab->ctb_node2cpt, nr_node_ids);
177 for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
178 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
180 if (part->cpt_nodemask) {
181 LIBCFS_FREE(part->cpt_nodemask,
182 sizeof(*part->cpt_nodemask));
185 free_cpumask_var(part->cpt_cpumask);
187 if (part->cpt_distance)
188 CFS_FREE_PTR_ARRAY(part->cpt_distance,
192 if (cptab->ctb_parts)
193 CFS_FREE_PTR_ARRAY(cptab->ctb_parts, cptab->ctb_nparts);
195 if (cptab->ctb_nodemask)
196 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
197 free_cpumask_var(cptab->ctb_cpumask);
199 LIBCFS_FREE(cptab, sizeof(*cptab));
201 EXPORT_SYMBOL(cfs_cpt_table_free);
203 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
210 for (i = 0; i < cptab->ctb_nparts; i++) {
214 rc = snprintf(tmp, len, "%d\t:", i);
221 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
222 rc = snprintf(tmp, len, " %d", j);
238 EXPORT_SYMBOL(cfs_cpt_table_print);
240 int cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len)
247 for (i = 0; i < cptab->ctb_nparts; i++) {
251 rc = snprintf(tmp, len, "%d\t:", i);
258 for (j = 0; j < cptab->ctb_nparts; j++) {
259 rc = snprintf(tmp, len, " %d:%d", j,
260 cptab->ctb_parts[i].cpt_distance[j]);
276 EXPORT_SYMBOL(cfs_cpt_distance_print);
278 int cfs_cpt_number(struct cfs_cpt_table *cptab)
280 return cptab->ctb_nparts;
282 EXPORT_SYMBOL(cfs_cpt_number);
284 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
286 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
288 return cpt == CFS_CPT_ANY ?
289 cpumask_weight(cptab->ctb_cpumask) :
290 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
292 EXPORT_SYMBOL(cfs_cpt_weight);
294 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
296 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
298 return cpt == CFS_CPT_ANY ?
299 cpumask_any_and(cptab->ctb_cpumask,
300 cpu_online_mask) < nr_cpu_ids :
301 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
302 cpu_online_mask) < nr_cpu_ids;
304 EXPORT_SYMBOL(cfs_cpt_online);
306 cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
308 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
310 return cpt == CFS_CPT_ANY ?
311 &cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask;
313 EXPORT_SYMBOL(cfs_cpt_cpumask);
315 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
317 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
319 return cpt == CFS_CPT_ANY ?
320 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
322 EXPORT_SYMBOL(cfs_cpt_nodemask);
324 unsigned int cfs_cpt_distance(struct cfs_cpt_table *cptab, int cpt1, int cpt2)
326 LASSERT(cpt1 == CFS_CPT_ANY || (cpt1 >= 0 && cpt1 < cptab->ctb_nparts));
327 LASSERT(cpt2 == CFS_CPT_ANY || (cpt2 >= 0 && cpt2 < cptab->ctb_nparts));
329 if (cpt1 == CFS_CPT_ANY || cpt2 == CFS_CPT_ANY)
330 return cptab->ctb_distance;
332 return cptab->ctb_parts[cpt1].cpt_distance[cpt2];
334 EXPORT_SYMBOL(cfs_cpt_distance);
337 * Calculate the maximum NUMA distance between all nodes in the
338 * from_mask and all nodes in the to_mask.
340 static unsigned int cfs_cpt_distance_calculate(nodemask_t *from_mask,
343 unsigned int maximum;
344 unsigned int distance;
349 for_each_node_mask(from, *from_mask) {
350 for_each_node_mask(to, *to_mask) {
351 distance = node_distance(from, to);
352 if (maximum < distance)
359 static void cfs_cpt_add_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
361 cptab->ctb_cpu2cpt[cpu] = cpt;
363 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
364 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
367 static void cfs_cpt_del_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
369 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
370 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
372 cptab->ctb_cpu2cpt[cpu] = -1;
375 static void cfs_cpt_add_node(struct cfs_cpt_table *cptab, int cpt, int node)
377 struct cfs_cpu_partition *part;
379 if (!node_isset(node, *cptab->ctb_nodemask)) {
382 /* first time node is added to the CPT table */
383 node_set(node, *cptab->ctb_nodemask);
384 cptab->ctb_node2cpt[node] = cpt;
386 dist = cfs_cpt_distance_calculate(cptab->ctb_nodemask,
387 cptab->ctb_nodemask);
388 cptab->ctb_distance = dist;
391 part = &cptab->ctb_parts[cpt];
392 if (!node_isset(node, *part->cpt_nodemask)) {
395 /* first time node is added to this CPT */
396 node_set(node, *part->cpt_nodemask);
397 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
398 struct cfs_cpu_partition *part2;
401 part2 = &cptab->ctb_parts[cpt2];
402 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
403 part2->cpt_nodemask);
404 part->cpt_distance[cpt2] = dist;
405 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
407 part2->cpt_distance[cpt] = dist;
412 static void cfs_cpt_del_node(struct cfs_cpt_table *cptab, int cpt, int node)
414 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
417 for_each_cpu(cpu, part->cpt_cpumask) {
418 /* this CPT has other CPU belonging to this node? */
419 if (cpu_to_node(cpu) == node)
423 if (cpu >= nr_cpu_ids && node_isset(node, *part->cpt_nodemask)) {
426 /* No more CPUs in the node for this CPT. */
427 node_clear(node, *part->cpt_nodemask);
428 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
429 struct cfs_cpu_partition *part2;
432 part2 = &cptab->ctb_parts[cpt2];
433 if (node_isset(node, *part2->cpt_nodemask))
434 cptab->ctb_node2cpt[node] = cpt2;
436 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
437 part2->cpt_nodemask);
438 part->cpt_distance[cpt2] = dist;
439 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
441 part2->cpt_distance[cpt] = dist;
445 for_each_cpu(cpu, cptab->ctb_cpumask) {
446 /* this CPT-table has other CPUs belonging to this node? */
447 if (cpu_to_node(cpu) == node)
451 if (cpu >= nr_cpu_ids && node_isset(node, *cptab->ctb_nodemask)) {
452 /* No more CPUs in the table for this node. */
453 node_clear(node, *cptab->ctb_nodemask);
454 cptab->ctb_node2cpt[node] = -1;
455 cptab->ctb_distance =
456 cfs_cpt_distance_calculate(cptab->ctb_nodemask,
457 cptab->ctb_nodemask);
461 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
463 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
465 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
466 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
470 if (cptab->ctb_cpu2cpt[cpu] != -1) {
471 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
472 cpu, cptab->ctb_cpu2cpt[cpu]);
476 if (cpumask_test_cpu(cpu, cptab->ctb_cpumask)) {
477 CDEBUG(D_INFO, "CPU %d is already in cpumask\n", cpu);
481 if (cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)) {
482 CDEBUG(D_INFO, "CPU %d is already in partition %d cpumask\n",
483 cpu, cptab->ctb_cpu2cpt[cpu]);
487 cfs_cpt_add_cpu(cptab, cpt, cpu);
488 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
492 EXPORT_SYMBOL(cfs_cpt_set_cpu);
494 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
496 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
498 if (cpu < 0 || cpu >= nr_cpu_ids) {
499 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
503 if (cpt == CFS_CPT_ANY) {
504 /* caller doesn't know the partition ID */
505 cpt = cptab->ctb_cpu2cpt[cpu];
506 if (cpt < 0) { /* not set in this CPT-table */
508 "Try to unset cpu %d which is not in CPT-table %p\n",
513 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
515 "CPU %d is not in CPU partition %d\n", cpu, cpt);
519 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
520 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
522 cfs_cpt_del_cpu(cptab, cpt, cpu);
523 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
525 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
527 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt,
528 const cpumask_t *mask)
532 if (!cpumask_weight(mask) ||
533 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
535 "No online CPU is found in the CPU mask for CPU partition %d\n",
540 for_each_cpu(cpu, mask) {
541 cfs_cpt_add_cpu(cptab, cpt, cpu);
542 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
547 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
549 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt,
550 const cpumask_t *mask)
554 for_each_cpu(cpu, mask) {
555 cfs_cpt_del_cpu(cptab, cpt, cpu);
556 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
559 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
561 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
563 const cpumask_t *mask;
566 if (node < 0 || node >= nr_node_ids) {
568 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
572 mask = cpumask_of_node(node);
574 for_each_cpu(cpu, mask)
575 cfs_cpt_add_cpu(cptab, cpt, cpu);
577 cfs_cpt_add_node(cptab, cpt, node);
581 EXPORT_SYMBOL(cfs_cpt_set_node);
583 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
585 const cpumask_t *mask;
588 if (node < 0 || node >= nr_node_ids) {
590 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
594 mask = cpumask_of_node(node);
596 for_each_cpu(cpu, mask)
597 cfs_cpt_del_cpu(cptab, cpt, cpu);
599 cfs_cpt_del_node(cptab, cpt, node);
601 EXPORT_SYMBOL(cfs_cpt_unset_node);
603 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt,
604 const nodemask_t *mask)
608 for_each_node_mask(node, *mask)
609 cfs_cpt_set_node(cptab, cpt, node);
613 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
615 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt,
616 const nodemask_t *mask)
620 for_each_node_mask(node, *mask)
621 cfs_cpt_unset_node(cptab, cpt, node);
623 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
625 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
632 /* convert CPU partition ID to HW node id */
634 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
635 mask = cptab->ctb_nodemask;
636 rotor = cptab->ctb_spread_rotor++;
638 mask = cptab->ctb_parts[cpt].cpt_nodemask;
639 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
640 node = cptab->ctb_parts[cpt].cpt_node;
643 weight = nodes_weight(*mask);
647 for_each_node_mask(node, *mask) {
655 EXPORT_SYMBOL(cfs_cpt_spread_node);
657 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
663 cpu = smp_processor_id();
664 cpt = cptab->ctb_cpu2cpt[cpu];
666 if (cpt < 0 && remap) {
667 /* don't return negative value for safety of upper layer,
668 * instead we shadow the unknown cpu to a valid partition ID
670 cpt = cpu % cptab->ctb_nparts;
675 EXPORT_SYMBOL(cfs_cpt_current);
677 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
679 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
681 return cptab->ctb_cpu2cpt[cpu];
683 EXPORT_SYMBOL(cfs_cpt_of_cpu);
685 int cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node)
687 if (node < 0 || node > nr_node_ids)
690 return cptab->ctb_node2cpt[node];
692 EXPORT_SYMBOL(cfs_cpt_of_node);
694 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
696 nodemask_t *nodemask;
701 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
703 if (cpt == CFS_CPT_ANY) {
704 cpumask = cptab->ctb_cpumask;
705 nodemask = cptab->ctb_nodemask;
707 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
708 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
711 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
713 "No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
718 for_each_online_cpu(cpu) {
719 if (cpumask_test_cpu(cpu, cpumask))
722 rc = set_cpus_allowed_ptr(current, cpumask);
723 set_mems_allowed(*nodemask);
725 schedule(); /* switch to allowed CPU */
730 /* don't need to set affinity because all online CPUs are covered */
733 EXPORT_SYMBOL(cfs_cpt_bind);
736 * Choose max to \a number CPUs from \a node and set them in \a cpt.
737 * We always prefer to choose CPU in the same core/socket.
739 static int cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
740 cpumask_t *node_mask, int number)
742 cpumask_var_t socket_mask;
743 cpumask_var_t core_mask;
750 if (number >= cpumask_weight(node_mask)) {
751 while (!cpumask_empty(node_mask)) {
752 cpu = cpumask_first(node_mask);
753 cpumask_clear_cpu(cpu, node_mask);
755 if (!cpu_online(cpu))
758 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
766 * Allocate scratch buffers
767 * As we cannot initialize a cpumask_var_t, we need
768 * to alloc both before we can risk trying to free either
770 if (!zalloc_cpumask_var(&socket_mask, GFP_NOFS))
772 if (!zalloc_cpumask_var(&core_mask, GFP_NOFS))
777 while (!cpumask_empty(node_mask)) {
778 cpu = cpumask_first(node_mask);
780 /* get cpumask for cores in the same socket */
781 cpumask_and(socket_mask, topology_core_cpumask(cpu), node_mask);
782 while (!cpumask_empty(socket_mask)) {
783 /* get cpumask for hts in the same core */
784 cpumask_and(core_mask, topology_sibling_cpumask(cpu),
787 for_each_cpu(i, core_mask) {
788 cpumask_clear_cpu(i, socket_mask);
789 cpumask_clear_cpu(i, node_mask);
794 rc = cfs_cpt_set_cpu(cptab, cpt, i);
803 cpu = cpumask_first(socket_mask);
808 free_cpumask_var(socket_mask);
809 free_cpumask_var(core_mask);
813 #define CPT_WEIGHT_MIN 4
815 static int cfs_cpt_num_estimate(void)
817 int nthr = cpumask_weight(topology_sibling_cpumask(smp_processor_id()));
818 int ncpu = num_online_cpus();
821 if (ncpu > CPT_WEIGHT_MIN)
822 for (ncpt = 2; ncpu > 2 * nthr * ncpt; ncpt++)
825 #if (BITS_PER_LONG == 32)
826 /* config many CPU partitions on 32-bit system could consume
832 ncpt--; /* worst case is 1 */
837 static struct cfs_cpt_table *cfs_cpt_table_create(int ncpt)
839 struct cfs_cpt_table *cptab = NULL;
840 cpumask_var_t node_mask;
847 num = cfs_cpt_num_estimate();
851 if (ncpt > num_online_cpus()) {
853 CERROR("libcfs: CPU partition count %d > cores %d: rc = %d\n",
854 ncpt, num_online_cpus(), rc);
858 if (ncpt > 4 * num) {
859 CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
863 cptab = cfs_cpt_table_alloc(ncpt);
865 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
870 if (!zalloc_cpumask_var(&node_mask, GFP_NOFS)) {
871 CERROR("Failed to allocate scratch cpumask\n");
876 num = num_online_cpus() / ncpt;
877 rem = num_online_cpus() % ncpt;
878 for_each_online_node(node) {
879 cpumask_copy(node_mask, cpumask_of_node(node));
881 while (cpt < ncpt && !cpumask_empty(node_mask)) {
882 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
883 int ncpu = cpumask_weight(part->cpt_cpumask);
885 rc = cfs_cpt_choose_ncpus(cptab, cpt, node_mask,
886 (rem > 0) + num - ncpu);
892 ncpu = cpumask_weight(part->cpt_cpumask);
893 if (ncpu == num + !!(rem > 0)) {
900 free_cpumask_var(node_mask);
905 free_cpumask_var(node_mask);
907 CERROR("Failed (rc = %d) to setup CPU partition table with %d partitions, online HW NUMA nodes: %d, HW CPU cores: %d.\n",
908 rc, ncpt, num_online_nodes(), num_online_cpus());
911 cfs_cpt_table_free(cptab);
916 static struct cfs_cpt_table *cfs_cpt_table_create_pattern(const char *pattern)
918 struct cfs_cpt_table *cptab;
930 pattern_dup = kstrdup(pattern, GFP_KERNEL);
932 CERROR("Failed to duplicate pattern '%s'\n", pattern);
933 return ERR_PTR(-ENOMEM);
936 str = strim(pattern_dup);
937 if (*str == 'n' || *str == 'N') {
938 str++; /* skip 'N' char */
939 node = 1; /* NUMA pattern */
942 for_each_online_node(i) {
943 if (!cpumask_empty(cpumask_of_node(i)))
946 if (ncpt == 1) { /* single NUMA node */
948 return cfs_cpt_table_create(cpu_npartitions);
953 if (!ncpt) { /* scanning bracket which is mark of partition */
955 while ((bracket = strchr(bracket, '['))) {
962 (node && ncpt > num_online_nodes()) ||
963 (!node && ncpt > num_online_cpus())) {
964 CERROR("Invalid pattern '%s', or too many partitions %d\n",
970 cptab = cfs_cpt_table_alloc(ncpt);
972 CERROR("Failed to allocate CPU partition table\n");
977 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
978 for_each_online_node(i) {
979 if (cpumask_empty(cpumask_of_node(i)))
982 rc = cfs_cpt_set_node(cptab, cpt++, i);
992 high = node ? nr_node_ids - 1 : nr_cpu_ids - 1;
994 for (str = strim(str), c = 0; /* until break */; c++) {
995 struct cfs_range_expr *range;
996 struct cfs_expr_list *el;
999 bracket = strchr(str, '[');
1002 CERROR("Invalid pattern '%s'\n", str);
1004 goto err_free_table;
1005 } else if (c != ncpt) {
1006 CERROR("Expect %d partitions but found %d\n",
1009 goto err_free_table;
1014 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
1015 CERROR("Invalid CPU pattern '%s'\n", str);
1017 goto err_free_table;
1020 if (cpt < 0 || cpt >= ncpt) {
1021 CERROR("Invalid partition id %d, total partitions %d\n",
1024 goto err_free_table;
1027 if (cfs_cpt_weight(cptab, cpt)) {
1028 CERROR("Partition %d has already been set.\n", cpt);
1030 goto err_free_table;
1033 str = strim(str + n);
1034 if (str != bracket) {
1035 CERROR("Invalid pattern '%s'\n", str);
1037 goto err_free_table;
1040 bracket = strchr(str, ']');
1042 CERROR("Missing right bracket for partition %d in '%s'\n",
1045 goto err_free_table;
1048 rc = cfs_expr_list_parse(str, (bracket - str) + 1, 0, high,
1051 CERROR("Can't parse number range in '%s'\n", str);
1053 goto err_free_table;
1056 list_for_each_entry(range, &el->el_exprs, re_link) {
1057 for (i = range->re_lo; i <= range->re_hi; i++) {
1058 if ((i - range->re_lo) % range->re_stride)
1061 rc = node ? cfs_cpt_set_node(cptab, cpt, i)
1062 : cfs_cpt_set_cpu(cptab, cpt, i);
1064 cfs_expr_list_free(el);
1066 goto err_free_table;
1071 cfs_expr_list_free(el);
1073 if (!cfs_cpt_online(cptab, cpt)) {
1074 CERROR("No online CPU is found on partition %d\n", cpt);
1076 goto err_free_table;
1079 str = strim(bracket + 1);
1086 cfs_cpt_table_free(cptab);
1092 #ifdef CONFIG_HOTPLUG_CPU
1093 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1094 static enum cpuhp_state lustre_cpu_online;
1096 static int cfs_cpu_online(unsigned int cpu)
1102 static int cfs_cpu_dead(unsigned int cpu)
1106 /* if all HTs in a core are offline, it may break affinity */
1107 warn = cpumask_any_and(topology_sibling_cpumask(cpu),
1108 cpu_online_mask) >= nr_cpu_ids;
1109 CDEBUG(warn ? D_WARNING : D_INFO,
1110 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
1115 #ifndef HAVE_HOTPLUG_STATE_MACHINE
1116 static int cfs_cpu_notify(struct notifier_block *self, unsigned long action,
1119 int cpu = (unsigned long)hcpu;
1123 case CPU_DEAD_FROZEN:
1125 case CPU_ONLINE_FROZEN:
1127 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1128 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1139 static struct notifier_block cfs_cpu_notifier = {
1140 .notifier_call = cfs_cpu_notify,
1143 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1144 #endif /* CONFIG_HOTPLUG_CPU */
1146 void cfs_cpu_fini(void)
1148 if (!IS_ERR_OR_NULL(cfs_cpt_tab))
1149 cfs_cpt_table_free(cfs_cpt_tab);
1151 #ifdef CONFIG_HOTPLUG_CPU
1152 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1153 if (lustre_cpu_online > 0)
1154 cpuhp_remove_state_nocalls(lustre_cpu_online);
1155 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1157 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1158 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1159 #endif /* CONFIG_HOTPLUG_CPU */
1162 int cfs_cpu_init(void)
1166 LASSERT(!cfs_cpt_tab);
1168 #ifdef CONFIG_HOTPLUG_CPU
1169 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1170 ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
1171 "fs/lustre/cfe:dead", NULL,
1174 goto failed_cpu_dead;
1176 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1177 "fs/lustre/cfe:online",
1178 cfs_cpu_online, NULL);
1180 goto failed_cpu_online;
1182 lustre_cpu_online = ret;
1184 register_hotcpu_notifier(&cfs_cpu_notifier);
1185 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1186 #endif /* CONFIG_HOTPLUG_CPU */
1190 cfs_cpt_tab = cfs_cpt_table_create_pattern(cpu_pattern);
1191 if (IS_ERR(cfs_cpt_tab)) {
1192 CERROR("Failed to create cptab from pattern '%s'\n",
1194 ret = PTR_ERR(cfs_cpt_tab);
1195 goto failed_alloc_table;
1199 cfs_cpt_tab = cfs_cpt_table_create(cpu_npartitions);
1200 if (IS_ERR(cfs_cpt_tab)) {
1201 CERROR("Failed to create cptab with npartitions %d\n",
1203 ret = PTR_ERR(cfs_cpt_tab);
1204 goto failed_alloc_table;
1210 LCONSOLE(0, "HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
1211 num_online_nodes(), num_online_cpus(),
1212 cfs_cpt_number(cfs_cpt_tab));
1218 if (!IS_ERR_OR_NULL(cfs_cpt_tab))
1219 cfs_cpt_table_free(cfs_cpt_tab);
1221 #ifdef CONFIG_HOTPLUG_CPU
1222 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1223 if (lustre_cpu_online > 0)
1224 cpuhp_remove_state_nocalls(lustre_cpu_online);
1226 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1229 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1230 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1231 #endif /* CONFIG_HOTPLUG_CPU */