4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
19 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
21 * Copyright (c) 2012, 2017, Intel Corporation.
24 * This file is part of Lustre, http://www.lustre.org/
25 * Lustre is a trademark of Sun Microsystems, Inc.
27 * Author: liang@whamcloud.com
30 #define DEBUG_SUBSYSTEM S_LNET
32 #include <linux/cpu.h>
33 #include <linux/sched.h>
34 #include <libcfs/libcfs.h>
39 * modparam for setting number of partitions
41 * 0 : estimate best value based on cores or NUMA nodes
42 * 1 : disable multiple partitions
43 * >1 : specify number of partitions
45 static int cpu_npartitions;
46 module_param(cpu_npartitions, int, 0444);
47 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
50 * modparam for setting CPU partitions patterns:
52 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
53 * number in bracket is processor ID (core or HT)
55 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
56 * are NUMA node ID, number before bracket is CPU partition ID.
58 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
60 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
62 static char *cpu_pattern = "N";
63 module_param(cpu_pattern, charp, 0444);
64 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
66 void cfs_cpt_table_free(struct cfs_cpt_table *cptab)
70 if (cptab->ctb_cpu2cpt) {
71 LIBCFS_FREE(cptab->ctb_cpu2cpt,
72 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
75 if (cptab->ctb_node2cpt) {
76 LIBCFS_FREE(cptab->ctb_node2cpt,
77 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
80 for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
81 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
83 if (part->cpt_nodemask) {
84 LIBCFS_FREE(part->cpt_nodemask,
85 sizeof(*part->cpt_nodemask));
88 if (part->cpt_cpumask)
89 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
91 if (part->cpt_distance) {
92 LIBCFS_FREE(part->cpt_distance,
94 sizeof(part->cpt_distance[0]));
98 if (cptab->ctb_parts) {
99 LIBCFS_FREE(cptab->ctb_parts,
100 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
103 if (cptab->ctb_nodemask)
104 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
105 if (cptab->ctb_cpumask)
106 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
108 LIBCFS_FREE(cptab, sizeof(*cptab));
110 EXPORT_SYMBOL(cfs_cpt_table_free);
112 struct cfs_cpt_table *cfs_cpt_table_alloc(int ncpt)
114 struct cfs_cpt_table *cptab;
117 LIBCFS_ALLOC(cptab, sizeof(*cptab));
121 cptab->ctb_nparts = ncpt;
123 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
124 if (!cptab->ctb_cpumask)
125 goto failed_alloc_cpumask;
127 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
128 if (!cptab->ctb_nodemask)
129 goto failed_alloc_nodemask;
131 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
132 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
133 if (!cptab->ctb_cpu2cpt)
134 goto failed_alloc_cpu2cpt;
136 memset(cptab->ctb_cpu2cpt, -1,
137 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
139 LIBCFS_ALLOC(cptab->ctb_node2cpt,
140 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
141 if (!cptab->ctb_node2cpt)
142 goto failed_alloc_node2cpt;
144 memset(cptab->ctb_node2cpt, -1,
145 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
147 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
148 if (!cptab->ctb_parts)
149 goto failed_alloc_ctb_parts;
151 for (i = 0; i < ncpt; i++) {
152 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
154 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
155 if (!part->cpt_cpumask)
156 goto failed_setting_ctb_parts;
158 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
159 if (!part->cpt_nodemask)
160 goto failed_setting_ctb_parts;
162 LIBCFS_ALLOC(part->cpt_distance,
163 cptab->ctb_nparts * sizeof(part->cpt_distance[0]));
164 if (!part->cpt_distance)
165 goto failed_setting_ctb_parts;
170 failed_setting_ctb_parts:
172 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
174 if (part->cpt_nodemask) {
175 LIBCFS_FREE(part->cpt_nodemask,
176 sizeof(*part->cpt_nodemask));
179 if (part->cpt_cpumask)
180 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
182 if (part->cpt_distance) {
183 LIBCFS_FREE(part->cpt_distance,
185 sizeof(part->cpt_distance[0]));
189 if (cptab->ctb_parts) {
190 LIBCFS_FREE(cptab->ctb_parts,
191 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
193 failed_alloc_ctb_parts:
194 if (cptab->ctb_node2cpt) {
195 LIBCFS_FREE(cptab->ctb_node2cpt,
196 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
198 failed_alloc_node2cpt:
199 if (cptab->ctb_cpu2cpt) {
200 LIBCFS_FREE(cptab->ctb_cpu2cpt,
201 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
203 failed_alloc_cpu2cpt:
204 if (cptab->ctb_nodemask)
205 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
206 failed_alloc_nodemask:
207 if (cptab->ctb_cpumask)
208 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
209 failed_alloc_cpumask:
210 LIBCFS_FREE(cptab, sizeof(*cptab));
213 EXPORT_SYMBOL(cfs_cpt_table_alloc);
215 int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
222 for (i = 0; i < cptab->ctb_nparts; i++) {
226 rc = snprintf(tmp, len, "%d\t:", i);
233 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
234 rc = snprintf(tmp, len, " %d", j);
251 EXPORT_SYMBOL(cfs_cpt_table_print);
253 int cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len)
260 for (i = 0; i < cptab->ctb_nparts; i++) {
264 rc = snprintf(tmp, len, "%d\t:", i);
271 for (j = 0; j < cptab->ctb_nparts; j++) {
272 rc = snprintf(tmp, len, " %d:%d",
273 j, cptab->ctb_parts[i].cpt_distance[j]);
290 EXPORT_SYMBOL(cfs_cpt_distance_print);
292 int cfs_cpt_number(struct cfs_cpt_table *cptab)
294 return cptab->ctb_nparts;
296 EXPORT_SYMBOL(cfs_cpt_number);
298 int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
300 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
302 return cpt == CFS_CPT_ANY ?
303 cpumask_weight(cptab->ctb_cpumask) :
304 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
306 EXPORT_SYMBOL(cfs_cpt_weight);
308 int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
310 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
312 return cpt == CFS_CPT_ANY ?
313 cpumask_any_and(cptab->ctb_cpumask,
314 cpu_online_mask) < nr_cpu_ids :
315 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
316 cpu_online_mask) < nr_cpu_ids;
318 EXPORT_SYMBOL(cfs_cpt_online);
320 cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
322 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
324 return cpt == CFS_CPT_ANY ?
325 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
327 EXPORT_SYMBOL(cfs_cpt_cpumask);
329 nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
331 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
333 return cpt == CFS_CPT_ANY ?
334 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
336 EXPORT_SYMBOL(cfs_cpt_nodemask);
338 unsigned int cfs_cpt_distance(struct cfs_cpt_table *cptab, int cpt1, int cpt2)
340 LASSERT(cpt1 == CFS_CPT_ANY || (cpt1 >= 0 && cpt1 < cptab->ctb_nparts));
341 LASSERT(cpt2 == CFS_CPT_ANY || (cpt2 >= 0 && cpt2 < cptab->ctb_nparts));
343 if (cpt1 == CFS_CPT_ANY || cpt2 == CFS_CPT_ANY)
344 return cptab->ctb_distance;
346 return cptab->ctb_parts[cpt1].cpt_distance[cpt2];
348 EXPORT_SYMBOL(cfs_cpt_distance);
351 * Calculate the maximum NUMA distance between all nodes in the
352 * from_mask and all nodes in the to_mask.
354 static unsigned int cfs_cpt_distance_calculate(nodemask_t *from_mask,
357 unsigned int maximum;
358 unsigned int distance;
363 for_each_node_mask(from, *from_mask) {
364 for_each_node_mask(to, *to_mask) {
365 distance = node_distance(from, to);
366 if (maximum < distance)
373 static void cfs_cpt_add_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
375 cptab->ctb_cpu2cpt[cpu] = cpt;
377 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
378 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
381 static void cfs_cpt_del_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
383 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
384 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
386 cptab->ctb_cpu2cpt[cpu] = -1;
389 static void cfs_cpt_add_node(struct cfs_cpt_table *cptab, int cpt, int node)
391 struct cfs_cpu_partition *part;
393 if (!node_isset(node, *cptab->ctb_nodemask)) {
396 /* first time node is added to the CPT table */
397 node_set(node, *cptab->ctb_nodemask);
398 cptab->ctb_node2cpt[node] = cpt;
400 dist = cfs_cpt_distance_calculate(cptab->ctb_nodemask,
401 cptab->ctb_nodemask);
402 cptab->ctb_distance = dist;
405 part = &cptab->ctb_parts[cpt];
406 if (!node_isset(node, *part->cpt_nodemask)) {
409 /* first time node is added to this CPT */
410 node_set(node, *part->cpt_nodemask);
411 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
412 struct cfs_cpu_partition *part2;
415 part2 = &cptab->ctb_parts[cpt2];
416 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
417 part2->cpt_nodemask);
418 part->cpt_distance[cpt2] = dist;
419 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
421 part2->cpt_distance[cpt] = dist;
426 static void cfs_cpt_del_node(struct cfs_cpt_table *cptab, int cpt, int node)
428 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
431 for_each_cpu(cpu, part->cpt_cpumask) {
432 /* this CPT has other CPU belonging to this node? */
433 if (cpu_to_node(cpu) == node)
437 if (cpu >= nr_cpu_ids && node_isset(node, *part->cpt_nodemask)) {
440 /* No more CPUs in the node for this CPT. */
441 node_clear(node, *part->cpt_nodemask);
442 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
443 struct cfs_cpu_partition *part2;
446 part2 = &cptab->ctb_parts[cpt2];
447 if (node_isset(node, *part2->cpt_nodemask))
448 cptab->ctb_node2cpt[node] = cpt2;
450 dist = cfs_cpt_distance_calculate(part->cpt_nodemask,
451 part2->cpt_nodemask);
452 part->cpt_distance[cpt2] = dist;
453 dist = cfs_cpt_distance_calculate(part2->cpt_nodemask,
455 part2->cpt_distance[cpt] = dist;
459 for_each_cpu(cpu, cptab->ctb_cpumask) {
460 /* this CPT-table has other CPUs belonging to this node? */
461 if (cpu_to_node(cpu) == node)
465 if (cpu >= nr_cpu_ids && node_isset(node, *cptab->ctb_nodemask)) {
466 /* No more CPUs in the table for this node. */
467 node_clear(node, *cptab->ctb_nodemask);
468 cptab->ctb_node2cpt[node] = -1;
469 cptab->ctb_distance =
470 cfs_cpt_distance_calculate(cptab->ctb_nodemask,
471 cptab->ctb_nodemask);
475 int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
477 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
479 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
480 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
484 if (cptab->ctb_cpu2cpt[cpu] != -1) {
485 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
486 cpu, cptab->ctb_cpu2cpt[cpu]);
490 if (cpumask_test_cpu(cpu, cptab->ctb_cpumask)) {
491 CDEBUG(D_INFO, "CPU %d is already in cpumask\n", cpu);
495 if (cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)) {
496 CDEBUG(D_INFO, "CPU %d is already in partition %d cpumask\n",
497 cpu, cptab->ctb_cpu2cpt[cpu]);
501 cfs_cpt_add_cpu(cptab, cpt, cpu);
502 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
506 EXPORT_SYMBOL(cfs_cpt_set_cpu);
508 void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
510 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
512 if (cpu < 0 || cpu >= nr_cpu_ids) {
513 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
517 if (cpt == CFS_CPT_ANY) {
518 /* caller doesn't know the partition ID */
519 cpt = cptab->ctb_cpu2cpt[cpu];
520 if (cpt < 0) { /* not set in this CPT-table */
522 "Try to unset cpu %d which is not in CPT-table %p\n",
527 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
529 "CPU %d is not in CPU partition %d\n", cpu, cpt);
533 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
534 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
536 cfs_cpt_del_cpu(cptab, cpt, cpu);
537 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
539 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
541 int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt,
542 const cpumask_t *mask)
546 if (!cpumask_weight(mask) ||
547 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
549 "No online CPU is found in the CPU mask for CPU partition %d\n",
554 for_each_cpu(cpu, mask) {
555 cfs_cpt_add_cpu(cptab, cpt, cpu);
556 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
561 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
563 void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt,
564 const cpumask_t *mask)
568 for_each_cpu(cpu, mask) {
569 cfs_cpt_del_cpu(cptab, cpt, cpu);
570 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
573 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
575 int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
577 const cpumask_t *mask;
580 if (node < 0 || node >= nr_node_ids) {
582 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
586 mask = cpumask_of_node(node);
588 for_each_cpu(cpu, mask)
589 cfs_cpt_add_cpu(cptab, cpt, cpu);
591 cfs_cpt_add_node(cptab, cpt, node);
595 EXPORT_SYMBOL(cfs_cpt_set_node);
597 void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
599 const cpumask_t *mask;
602 if (node < 0 || node >= nr_node_ids) {
604 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
608 mask = cpumask_of_node(node);
610 for_each_cpu(cpu, mask)
611 cfs_cpt_del_cpu(cptab, cpt, cpu);
613 cfs_cpt_del_node(cptab, cpt, node);
615 EXPORT_SYMBOL(cfs_cpt_unset_node);
617 int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt,
618 const nodemask_t *mask)
622 for_each_node_mask(node, *mask)
623 cfs_cpt_set_node(cptab, cpt, node);
627 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
629 void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt,
630 const nodemask_t *mask)
634 for_each_node_mask(node, *mask)
635 cfs_cpt_unset_node(cptab, cpt, node);
637 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
639 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
646 /* convert CPU partition ID to HW node id */
648 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
649 mask = cptab->ctb_nodemask;
650 rotor = cptab->ctb_spread_rotor++;
652 mask = cptab->ctb_parts[cpt].cpt_nodemask;
653 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
654 node = cptab->ctb_parts[cpt].cpt_node;
657 weight = nodes_weight(*mask);
661 for_each_node_mask(node, *mask) {
669 EXPORT_SYMBOL(cfs_cpt_spread_node);
671 int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
673 int cpu = smp_processor_id();
674 int cpt = cptab->ctb_cpu2cpt[cpu];
680 /* don't return negative value for safety of upper layer,
681 * instead we shadow the unknown cpu to a valid partition ID
683 cpt = cpu % cptab->ctb_nparts;
688 EXPORT_SYMBOL(cfs_cpt_current);
690 int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
692 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
694 return cptab->ctb_cpu2cpt[cpu];
696 EXPORT_SYMBOL(cfs_cpt_of_cpu);
698 int cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node)
700 if (node < 0 || node > nr_node_ids)
703 return cptab->ctb_node2cpt[node];
705 EXPORT_SYMBOL(cfs_cpt_of_node);
707 int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
709 nodemask_t *nodemask;
714 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
716 if (cpt == CFS_CPT_ANY) {
717 cpumask = cptab->ctb_cpumask;
718 nodemask = cptab->ctb_nodemask;
720 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
721 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
724 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
726 "No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
731 for_each_online_cpu(cpu) {
732 if (cpumask_test_cpu(cpu, cpumask))
735 rc = set_cpus_allowed_ptr(current, cpumask);
736 set_mems_allowed(*nodemask);
738 schedule(); /* switch to allowed CPU */
743 /* don't need to set affinity because all online CPUs are covered */
746 EXPORT_SYMBOL(cfs_cpt_bind);
749 * Choose max to \a number CPUs from \a node and set them in \a cpt.
750 * We always prefer to choose CPU in the same core/socket.
752 static int cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
753 cpumask_t *node_mask, int number)
755 cpumask_t *socket_mask = NULL;
756 cpumask_t *core_mask = NULL;
763 if (number >= cpumask_weight(node_mask)) {
764 while (!cpumask_empty(node_mask)) {
765 cpu = cpumask_first(node_mask);
766 cpumask_clear_cpu(cpu, node_mask);
768 if (!cpu_online(cpu))
771 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
778 /* allocate scratch buffer */
779 LIBCFS_ALLOC(socket_mask, cpumask_size());
780 LIBCFS_ALLOC(core_mask, cpumask_size());
781 if (!socket_mask || !core_mask) {
786 while (!cpumask_empty(node_mask)) {
787 cpu = cpumask_first(node_mask);
789 /* get cpumask for cores in the same socket */
790 cpumask_and(socket_mask, topology_core_cpumask(cpu), node_mask);
791 while (!cpumask_empty(socket_mask)) {
792 /* get cpumask for hts in the same core */
793 cpumask_and(core_mask, topology_sibling_cpumask(cpu),
796 for_each_cpu(i, core_mask) {
797 cpumask_clear_cpu(i, socket_mask);
798 cpumask_clear_cpu(i, node_mask);
803 rc = cfs_cpt_set_cpu(cptab, cpt, i);
812 cpu = cpumask_first(socket_mask);
818 LIBCFS_FREE(core_mask, cpumask_size());
820 LIBCFS_FREE(socket_mask, cpumask_size());
824 #define CPT_WEIGHT_MIN 4
826 static int cfs_cpt_num_estimate(void)
828 int nthr = cpumask_weight(topology_sibling_cpumask(smp_processor_id()));
829 int ncpu = num_online_cpus();
832 if (ncpu > CPT_WEIGHT_MIN)
833 for (ncpt = 2; ncpu > 2 * nthr * ncpt; ncpt++)
836 #if (BITS_PER_LONG == 32)
837 /* config many CPU partitions on 32-bit system could consume
843 ncpt--; /* worst case is 1 */
848 static struct cfs_cpt_table *cfs_cpt_table_create(int ncpt)
850 struct cfs_cpt_table *cptab = NULL;
851 cpumask_t *node_mask = NULL;
858 num = cfs_cpt_num_estimate();
862 if (ncpt > num_online_cpus() || ncpt > 4 * num) {
863 CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n",
867 cptab = cfs_cpt_table_alloc(ncpt);
869 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
874 LIBCFS_ALLOC(node_mask, cpumask_size());
876 CERROR("Failed to allocate scratch cpumask\n");
881 num = num_online_cpus() / ncpt;
882 rem = num_online_cpus() % ncpt;
883 for_each_online_node(node) {
884 cpumask_copy(node_mask, cpumask_of_node(node));
886 while (cpt < ncpt && !cpumask_empty(node_mask)) {
887 struct cfs_cpu_partition *part = &cptab->ctb_parts[cpt];
888 int ncpu = cpumask_weight(part->cpt_cpumask);
890 rc = cfs_cpt_choose_ncpus(cptab, cpt, node_mask,
897 ncpu = cpumask_weight(part->cpt_cpumask);
898 if (ncpu == num + !!(rem > 0)) {
905 LIBCFS_FREE(node_mask, cpumask_size());
911 LIBCFS_FREE(node_mask, cpumask_size());
913 CERROR("Failed (rc = %d) to setup CPU partition table with %d partitions, online HW NUMA nodes: %d, HW CPU cores: %d.\n",
914 rc, ncpt, num_online_nodes(), num_online_cpus());
917 cfs_cpt_table_free(cptab);
922 static struct cfs_cpt_table *cfs_cpt_table_create_pattern(const char *pattern)
924 struct cfs_cpt_table *cptab;
936 pattern_dup = kstrdup(pattern, GFP_KERNEL);
938 CERROR("Failed to duplicate pattern '%s'\n", pattern);
939 return ERR_PTR(-ENOMEM);
942 str = cfs_trimwhite(pattern_dup);
943 if (*str == 'n' || *str == 'N') {
944 str++; /* skip 'N' char */
945 node = 1; /* NUMA pattern */
948 for_each_online_node(i) {
949 if (!cpumask_empty(cpumask_of_node(i)))
952 if (ncpt == 1) { /* single NUMA node */
954 return cfs_cpt_table_create(cpu_npartitions);
959 if (!ncpt) { /* scanning bracket which is mark of partition */
961 while ((bracket = strchr(bracket, '['))) {
968 (node && ncpt > num_online_nodes()) ||
969 (!node && ncpt > num_online_cpus())) {
970 CERROR("Invalid pattern '%s', or too many partitions %d\n",
976 cptab = cfs_cpt_table_alloc(ncpt);
978 CERROR("Failed to allocate CPU partition table\n");
983 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
984 for_each_online_node(i) {
985 if (cpumask_empty(cpumask_of_node(i)))
988 rc = cfs_cpt_set_node(cptab, cpt++, i);
998 high = node ? nr_node_ids - 1 : nr_cpu_ids - 1;
1000 for (str = cfs_trimwhite(str), c = 0; /* until break */; c++) {
1001 struct cfs_range_expr *range;
1002 struct cfs_expr_list *el;
1005 bracket = strchr(str, '[');
1008 CERROR("Invalid pattern '%s'\n", str);
1010 goto err_free_table;
1011 } else if (c != ncpt) {
1012 CERROR("Expect %d partitions but found %d\n",
1015 goto err_free_table;
1020 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
1021 CERROR("Invalid CPU pattern '%s'\n", str);
1023 goto err_free_table;
1026 if (cpt < 0 || cpt >= ncpt) {
1027 CERROR("Invalid partition id %d, total partitions %d\n",
1030 goto err_free_table;
1033 if (cfs_cpt_weight(cptab, cpt)) {
1034 CERROR("Partition %d has already been set.\n", cpt);
1036 goto err_free_table;
1039 str = cfs_trimwhite(str + n);
1040 if (str != bracket) {
1041 CERROR("Invalid pattern '%s'\n", str);
1043 goto err_free_table;
1046 bracket = strchr(str, ']');
1048 CERROR("Missing right bracket for partition %d in '%s'\n",
1051 goto err_free_table;
1054 rc = cfs_expr_list_parse(str, (bracket - str) + 1, 0, high,
1057 CERROR("Can't parse number range in '%s'\n", str);
1059 goto err_free_table;
1062 list_for_each_entry(range, &el->el_exprs, re_link) {
1063 for (i = range->re_lo; i <= range->re_hi; i++) {
1064 if ((i - range->re_lo) % range->re_stride)
1067 rc = node ? cfs_cpt_set_node(cptab, cpt, i)
1068 : cfs_cpt_set_cpu(cptab, cpt, i);
1070 cfs_expr_list_free(el);
1072 goto err_free_table;
1077 cfs_expr_list_free(el);
1079 if (!cfs_cpt_online(cptab, cpt)) {
1080 CERROR("No online CPU is found on partition %d\n", cpt);
1082 goto err_free_table;
1085 str = cfs_trimwhite(bracket + 1);
1092 cfs_cpt_table_free(cptab);
1098 #ifdef CONFIG_HOTPLUG_CPU
1099 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1100 static enum cpuhp_state lustre_cpu_online;
1102 static int cfs_cpu_online(unsigned int cpu)
1108 static int cfs_cpu_dead(unsigned int cpu)
1112 /* if all HTs in a core are offline, it may break affinity */
1113 warn = cpumask_any_and(topology_sibling_cpumask(cpu),
1114 cpu_online_mask) >= nr_cpu_ids;
1115 CDEBUG(warn ? D_WARNING : D_INFO,
1116 "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u]\n",
1121 #ifndef HAVE_HOTPLUG_STATE_MACHINE
1122 static int cfs_cpu_notify(struct notifier_block *self, unsigned long action,
1125 int cpu = (unsigned long)hcpu;
1129 case CPU_DEAD_FROZEN:
1131 case CPU_ONLINE_FROZEN:
1133 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1134 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1145 static struct notifier_block cfs_cpu_notifier = {
1146 .notifier_call = cfs_cpu_notify,
1149 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1150 #endif /* CONFIG_HOTPLUG_CPU */
1152 void cfs_cpu_fini(void)
1154 if (!IS_ERR_OR_NULL(cfs_cpt_table))
1155 cfs_cpt_table_free(cfs_cpt_table);
1157 #ifdef CONFIG_HOTPLUG_CPU
1158 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1159 if (lustre_cpu_online > 0)
1160 cpuhp_remove_state_nocalls(lustre_cpu_online);
1161 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1163 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1164 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1165 #endif /* CONFIG_HOTPLUG_CPU */
1168 int cfs_cpu_init(void)
1172 LASSERT(!cfs_cpt_table);
1174 #ifdef CONFIG_HOTPLUG_CPU
1175 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1176 ret = cpuhp_setup_state_nocalls(CPUHP_LUSTRE_CFS_DEAD,
1177 "fs/lustre/cfe:dead", NULL,
1180 goto failed_cpu_dead;
1182 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
1183 "fs/lustre/cfe:online",
1184 cfs_cpu_online, NULL);
1186 goto failed_cpu_online;
1188 lustre_cpu_online = ret;
1190 register_hotcpu_notifier(&cfs_cpu_notifier);
1191 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1192 #endif /* CONFIG_HOTPLUG_CPU */
1197 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1198 if (IS_ERR(cfs_cpt_table)) {
1199 CERROR("Failed to create cptab from pattern '%s'\n",
1201 ret = PTR_ERR(cfs_cpt_table);
1202 goto failed_alloc_table;
1206 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1207 if (IS_ERR(cfs_cpt_table)) {
1208 CERROR("Failed to create cptab with npartitions %d\n",
1210 ret = PTR_ERR(cfs_cpt_table);
1211 goto failed_alloc_table;
1217 LCONSOLE(0, "HW NUMA nodes: %d, HW CPU cores: %d, npartitions: %d\n",
1218 num_online_nodes(), num_online_cpus(),
1219 cfs_cpt_number(cfs_cpt_table));
1225 if (!IS_ERR_OR_NULL(cfs_cpt_table))
1226 cfs_cpt_table_free(cfs_cpt_table);
1228 #ifdef CONFIG_HOTPLUG_CPU
1229 #ifdef HAVE_HOTPLUG_STATE_MACHINE
1230 if (lustre_cpu_online > 0)
1231 cpuhp_remove_state_nocalls(lustre_cpu_online);
1233 cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
1236 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1237 #endif /* !HAVE_HOTPLUG_STATE_MACHINE */
1238 #endif /* CONFIG_HOTPLUG_CPU */