4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Author: liang@whamcloud.com
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/cpu.h>
38 #include <linux/sched.h>
39 #include <libcfs/libcfs.h>
44 * modparam for setting number of partitions
46 * 0 : estimate best value based on cores or NUMA nodes
47 * 1 : disable multiple partitions
48 * >1 : specify number of partitions
50 static int cpu_npartitions;
51 module_param(cpu_npartitions, int, 0444);
52 MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
55 * modparam for setting CPU partitions patterns:
57 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
58 * number in bracket is processor ID (core or HT)
60 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
61 * are NUMA node ID, number before bracket is CPU partition ID.
63 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
65 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
67 static char *cpu_pattern = "N";
68 module_param(cpu_pattern, charp, 0444);
69 MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern");
71 /* return number of HTs in the same core of \a cpu */
73 cfs_cpu_ht_nsiblings(int cpu)
75 return cpumask_weight(topology_sibling_cpumask(cpu));
77 EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
80 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
84 if (cptab->ctb_cpu2cpt != NULL) {
85 LIBCFS_FREE(cptab->ctb_cpu2cpt,
86 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
89 if (cptab->ctb_node2cpt != NULL) {
90 LIBCFS_FREE(cptab->ctb_node2cpt,
91 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
94 for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
95 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
97 if (part->cpt_nodemask != NULL) {
98 LIBCFS_FREE(part->cpt_nodemask,
99 sizeof(*part->cpt_nodemask));
102 if (part->cpt_cpumask != NULL)
103 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
105 if (part->cpt_distance) {
106 LIBCFS_FREE(part->cpt_distance,
108 sizeof(part->cpt_distance[0]));
112 if (cptab->ctb_parts != NULL) {
113 LIBCFS_FREE(cptab->ctb_parts,
114 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
117 if (cptab->ctb_nodemask != NULL)
118 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
119 if (cptab->ctb_cpumask != NULL)
120 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
122 LIBCFS_FREE(cptab, sizeof(*cptab));
124 EXPORT_SYMBOL(cfs_cpt_table_free);
126 struct cfs_cpt_table *
127 cfs_cpt_table_alloc(unsigned int ncpt)
129 struct cfs_cpt_table *cptab;
132 LIBCFS_ALLOC(cptab, sizeof(*cptab));
136 cptab->ctb_nparts = ncpt;
138 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
139 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
141 if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
144 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
145 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
146 if (cptab->ctb_cpu2cpt == NULL)
149 memset(cptab->ctb_cpu2cpt, -1,
150 nr_cpu_ids * sizeof(cptab->ctb_cpu2cpt[0]));
152 LIBCFS_ALLOC(cptab->ctb_node2cpt,
153 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
154 if (cptab->ctb_node2cpt == NULL)
157 memset(cptab->ctb_node2cpt, -1,
158 nr_node_ids * sizeof(cptab->ctb_node2cpt[0]));
160 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
161 if (cptab->ctb_parts == NULL)
164 for (i = 0; i < ncpt; i++) {
165 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
167 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
168 if (!part->cpt_cpumask)
171 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
172 if (!part->cpt_nodemask)
175 LIBCFS_ALLOC(part->cpt_distance,
176 cptab->ctb_nparts * sizeof(part->cpt_distance[0]));
177 if (!part->cpt_distance)
184 cfs_cpt_table_free(cptab);
187 EXPORT_SYMBOL(cfs_cpt_table_alloc);
190 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
197 for (i = 0; i < cptab->ctb_nparts; i++) {
201 rc = snprintf(tmp, len, "%d\t:", i);
208 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
209 rc = snprintf(tmp, len, " %d", j);
227 EXPORT_SYMBOL(cfs_cpt_table_print);
230 cfs_cpt_distance_print(struct cfs_cpt_table *cptab, char *buf, int len)
237 for (i = 0; i < cptab->ctb_nparts; i++) {
241 rc = snprintf(tmp, len, "%d\t:", i);
248 for (j = 0; j < cptab->ctb_nparts; j++) {
249 rc = snprintf(tmp, len, " %d:%d",
250 j, cptab->ctb_parts[i].cpt_distance[j]);
268 EXPORT_SYMBOL(cfs_cpt_distance_print);
271 cfs_cpt_number(struct cfs_cpt_table *cptab)
273 return cptab->ctb_nparts;
275 EXPORT_SYMBOL(cfs_cpt_number);
278 cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
280 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
282 return cpt == CFS_CPT_ANY ?
283 cpumask_weight(cptab->ctb_cpumask) :
284 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
286 EXPORT_SYMBOL(cfs_cpt_weight);
289 cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
291 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
293 return cpt == CFS_CPT_ANY ?
294 cpumask_any_and(cptab->ctb_cpumask,
295 cpu_online_mask) < nr_cpu_ids :
296 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
297 cpu_online_mask) < nr_cpu_ids;
299 EXPORT_SYMBOL(cfs_cpt_online);
302 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
304 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
306 return cpt == CFS_CPT_ANY ?
307 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
309 EXPORT_SYMBOL(cfs_cpt_cpumask);
312 cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
314 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
316 return cpt == CFS_CPT_ANY ?
317 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
319 EXPORT_SYMBOL(cfs_cpt_nodemask);
322 cfs_cpt_distance(struct cfs_cpt_table *cptab, int cpt1, int cpt2)
324 LASSERT(cpt1 == CFS_CPT_ANY || (cpt1 >= 0 && cpt1 < cptab->ctb_nparts));
325 LASSERT(cpt2 == CFS_CPT_ANY || (cpt2 >= 0 && cpt2 < cptab->ctb_nparts));
327 if (cpt1 == CFS_CPT_ANY || cpt2 == CFS_CPT_ANY)
328 return cptab->ctb_distance;
330 return cptab->ctb_parts[cpt1].cpt_distance[cpt2];
332 EXPORT_SYMBOL(cfs_cpt_distance);
335 * Calculate the maximum NUMA distance between all nodes in the
336 * from_mask and all nodes in the to_mask.
339 cfs_cpt_distance_calculate(nodemask_t *from_mask, nodemask_t *to_mask)
347 for_each_node_mask(from, *from_mask) {
348 for_each_node_mask(to, *to_mask) {
349 distance = node_distance(from, to);
350 if (maximum < distance)
357 static void cfs_cpt_add_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
359 cptab->ctb_cpu2cpt[cpu] = cpt;
361 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
362 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
365 static void cfs_cpt_del_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
367 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
368 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
370 cptab->ctb_cpu2cpt[cpu] = -1;
373 static void cfs_cpt_add_node(struct cfs_cpt_table *cptab, int cpt, int node)
376 struct cfs_cpu_partition *part;
377 struct cfs_cpu_partition *part2;
379 if (!node_isset(node, *cptab->ctb_nodemask)) {
380 /* first time node is added to the CPT table */
381 node_set(node, *cptab->ctb_nodemask);
382 cptab->ctb_node2cpt[node] = cpt;
383 cptab->ctb_distance = cfs_cpt_distance_calculate(
385 cptab->ctb_nodemask);
388 part = &cptab->ctb_parts[cpt];
389 if (!node_isset(node, *part->cpt_nodemask)) {
390 /* first time node is added to this CPT */
391 node_set(node, *part->cpt_nodemask);
392 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
393 part2 = &cptab->ctb_parts[cpt2];
394 part->cpt_distance[cpt2] = cfs_cpt_distance_calculate(
396 part2->cpt_nodemask);
397 part2->cpt_distance[cpt] = cfs_cpt_distance_calculate(
404 static void cfs_cpt_del_node(struct cfs_cpt_table *cptab, int cpt, int node)
408 struct cfs_cpu_partition *part;
409 struct cfs_cpu_partition *part2;
411 part = &cptab->ctb_parts[cpt];
413 for_each_cpu(cpu, part->cpt_cpumask) {
414 /* this CPT has other CPU belonging to this node? */
415 if (cpu_to_node(cpu) == node)
419 if (cpu >= nr_cpu_ids && node_isset(node, *part->cpt_nodemask)) {
420 /* No more CPUs in the node for this CPT. */
421 node_clear(node, *part->cpt_nodemask);
422 for (cpt2 = 0; cpt2 < cptab->ctb_nparts; cpt2++) {
423 part2 = &cptab->ctb_parts[cpt2];
424 if (node_isset(node, *part2->cpt_nodemask))
425 cptab->ctb_node2cpt[node] = cpt2;
426 part->cpt_distance[cpt2] = cfs_cpt_distance_calculate(
428 part2->cpt_nodemask);
429 part2->cpt_distance[cpt] = cfs_cpt_distance_calculate(
435 for_each_cpu(cpu, cptab->ctb_cpumask) {
436 /* this CPT-table has other CPUs belonging to this node? */
437 if (cpu_to_node(cpu) == node)
441 if (cpu >= nr_cpu_ids && node_isset(node, *cptab->ctb_nodemask)) {
442 /* No more CPUs in the table for this node. */
443 node_clear(node, *cptab->ctb_nodemask);
444 cptab->ctb_node2cpt[node] = -1;
445 cptab->ctb_distance =
446 cfs_cpt_distance_calculate(cptab->ctb_nodemask,
447 cptab->ctb_nodemask);
452 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
454 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
456 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
457 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
461 if (cptab->ctb_cpu2cpt[cpu] != -1) {
462 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
463 cpu, cptab->ctb_cpu2cpt[cpu]);
467 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
468 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
470 cfs_cpt_add_cpu(cptab, cpt, cpu);
471 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
475 EXPORT_SYMBOL(cfs_cpt_set_cpu);
478 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
480 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
482 if (cpu < 0 || cpu >= nr_cpu_ids) {
483 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
487 if (cpt == CFS_CPT_ANY) {
488 /* caller doesn't know the partition ID */
489 cpt = cptab->ctb_cpu2cpt[cpu];
490 if (cpt < 0) { /* not set in this CPT-table */
491 CDEBUG(D_INFO, "Try to unset cpu %d which is "
492 "not in CPT-table %p\n", cpt, cptab);
496 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
498 "CPU %d is not in cpu-partition %d\n", cpu, cpt);
502 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
503 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
505 cfs_cpt_del_cpu(cptab, cpt, cpu);
506 cfs_cpt_del_node(cptab, cpt, cpu_to_node(cpu));
508 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
511 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, const cpumask_t *mask)
515 if (cpumask_weight(mask) == 0 ||
516 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
517 CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
518 "for CPU partition %d\n", cpt);
522 for_each_cpu(cpu, mask) {
523 cfs_cpt_add_cpu(cptab, cpt, cpu);
524 cfs_cpt_add_node(cptab, cpt, cpu_to_node(cpu));
529 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
532 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt,
533 const cpumask_t *mask)
537 for_each_cpu(cpu, mask)
538 cfs_cpt_unset_cpu(cptab, cpt, cpu);
540 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
543 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
545 const cpumask_t *mask;
548 if (node < 0 || node >= nr_node_ids) {
550 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
554 mask = cpumask_of_node(node);
556 for_each_cpu(cpu, mask)
557 cfs_cpt_add_cpu(cptab, cpt, cpu);
559 cfs_cpt_add_node(cptab, cpt, node);
563 EXPORT_SYMBOL(cfs_cpt_set_node);
566 cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
568 const cpumask_t *mask;
571 if (node < 0 || node >= nr_node_ids) {
573 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
577 mask = cpumask_of_node(node);
579 for_each_cpu(cpu, mask)
580 cfs_cpt_del_cpu(cptab, cpt, cpu);
582 cfs_cpt_del_node(cptab, cpt, node);
584 EXPORT_SYMBOL(cfs_cpt_unset_node);
587 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
591 for_each_node_mask(i, *mask) {
592 if (!cfs_cpt_set_node(cptab, cpt, i))
598 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
601 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
605 for_each_node_mask(i, *mask)
606 cfs_cpt_unset_node(cptab, cpt, i);
608 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
610 int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
617 /* convert CPU partition ID to HW node id */
619 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
620 mask = cptab->ctb_nodemask;
621 rotor = cptab->ctb_spread_rotor++;
623 mask = cptab->ctb_parts[cpt].cpt_nodemask;
624 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
627 weight = nodes_weight(*mask);
632 for_each_node_mask(node, *mask) {
640 EXPORT_SYMBOL(cfs_cpt_spread_node);
643 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
645 int cpu = smp_processor_id();
646 int cpt = cptab->ctb_cpu2cpt[cpu];
652 /* don't return negative value for safety of upper layer,
653 * instead we shadow the unknown cpu to a valid partition ID */
654 cpt = cpu % cptab->ctb_nparts;
659 EXPORT_SYMBOL(cfs_cpt_current);
662 cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
664 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
666 return cptab->ctb_cpu2cpt[cpu];
668 EXPORT_SYMBOL(cfs_cpt_of_cpu);
671 cfs_cpt_of_node(struct cfs_cpt_table *cptab, int node)
673 if (node < 0 || node > nr_node_ids)
676 return cptab->ctb_node2cpt[node];
678 EXPORT_SYMBOL(cfs_cpt_of_node);
681 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
684 nodemask_t *nodemask;
688 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
690 if (cpt == CFS_CPT_ANY) {
691 cpumask = cptab->ctb_cpumask;
692 nodemask = cptab->ctb_nodemask;
694 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
695 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
698 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
699 CERROR("No online CPU found in CPU partition %d, did someone "
700 "do CPU hotplug on system? You might need to reload "
701 "Lustre modules to keep system working well.\n", cpt);
705 for_each_online_cpu(i) {
706 if (cpumask_test_cpu(i, cpumask))
709 rc = set_cpus_allowed_ptr(current, cpumask);
710 set_mems_allowed(*nodemask);
712 schedule(); /* switch to allowed CPU */
717 /* don't need to set affinity because all online CPUs are covered */
720 EXPORT_SYMBOL(cfs_cpt_bind);
723 * Choose max to \a number CPUs from \a node and set them in \a cpt.
724 * We always prefer to choose CPU in the same core/socket.
727 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
728 cpumask_t *node, int number)
730 cpumask_t *socket = NULL;
731 cpumask_t *core = NULL;
737 if (number >= cpumask_weight(node)) {
738 while (!cpumask_empty(node)) {
739 cpu = cpumask_first(node);
741 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
744 cpumask_clear_cpu(cpu, node);
749 /* allocate scratch buffer */
750 LIBCFS_ALLOC(socket, cpumask_size());
751 LIBCFS_ALLOC(core, cpumask_size());
752 if (socket == NULL || core == NULL) {
757 while (!cpumask_empty(node)) {
758 cpu = cpumask_first(node);
760 /* get cpumask for cores in the same socket */
761 cpumask_copy(socket, topology_core_cpumask(cpu));
762 cpumask_and(socket, socket, node);
764 LASSERT(!cpumask_empty(socket));
766 while (!cpumask_empty(socket)) {
769 /* get cpumask for hts in the same core */
770 cpumask_copy(core, topology_sibling_cpumask(cpu));
771 cpumask_and(core, core, node);
773 LASSERT(!cpumask_empty(core));
775 for_each_cpu(i, core) {
776 cpumask_clear_cpu(i, socket);
777 cpumask_clear_cpu(i, node);
779 rc = cfs_cpt_set_cpu(cptab, cpt, i);
788 cpu = cpumask_first(socket);
794 LIBCFS_FREE(socket, cpumask_size());
796 LIBCFS_FREE(core, cpumask_size());
800 #define CPT_WEIGHT_MIN 4u
803 cfs_cpt_num_estimate(void)
805 unsigned nnode = num_online_nodes();
806 unsigned ncpu = num_online_cpus();
809 if (ncpu <= CPT_WEIGHT_MIN) {
814 /* generate reasonable number of CPU partitions based on total number
815 * of CPUs, Preferred N should be power2 and match this condition:
816 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
817 for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) {}
819 if (ncpt <= nnode) { /* fat numa system */
823 } else { /* ncpt > nnode */
824 while ((nnode << 1) <= ncpt)
831 #if (BITS_PER_LONG == 32)
832 /* config many CPU partitions on 32-bit system could consume
834 ncpt = min(2U, ncpt);
836 while (ncpu % ncpt != 0)
837 ncpt--; /* worst case is 1 */
842 static struct cfs_cpt_table *
843 cfs_cpt_table_create(int ncpt)
845 struct cfs_cpt_table *cptab = NULL;
846 cpumask_t *mask = NULL;
852 rc = cfs_cpt_num_estimate();
856 if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
857 CWARN("CPU partition number %d is larger than suggested "
858 "value (%d), your system may have performance"
859 "issue or run out of memory while under pressure\n",
863 if (num_online_cpus() % ncpt != 0) {
864 CERROR("CPU number %d is not multiple of cpu_npartition %d, "
865 "please try different cpu_npartitions value or"
866 "set pattern string by cpu_pattern=STRING\n",
867 (int)num_online_cpus(), ncpt);
871 cptab = cfs_cpt_table_alloc(ncpt);
873 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
877 num = num_online_cpus() / ncpt;
879 CERROR("CPU changed while setting CPU partition\n");
883 LIBCFS_ALLOC(mask, cpumask_size());
885 CERROR("Failed to allocate scratch cpumask\n");
889 for_each_online_node(i) {
890 cpumask_copy(mask, cpumask_of_node(i));
892 while (!cpumask_empty(mask)) {
893 struct cfs_cpu_partition *part;
896 /* Each emulated NUMA node has all allowed CPUs in
898 * End loop when all partitions have assigned CPUs.
903 part = &cptab->ctb_parts[cpt];
905 n = num - cpumask_weight(part->cpt_cpumask);
908 rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
912 LASSERT(num >= cpumask_weight(part->cpt_cpumask));
913 if (num == cpumask_weight(part->cpt_cpumask))
919 num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
920 CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
921 "CPU hotplug/unplug while setting?\n",
922 cptab->ctb_nparts, num, cpt,
923 cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
927 LIBCFS_FREE(mask, cpumask_size());
932 CERROR("Failed to setup CPU-partition-table with %d "
933 "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
934 ncpt, num_online_nodes(), num_online_cpus());
937 LIBCFS_FREE(mask, cpumask_size());
940 cfs_cpt_table_free(cptab);
945 static struct cfs_cpt_table *
946 cfs_cpt_table_create_pattern(char *pattern)
948 struct cfs_cpt_table *cptab;
958 str = cfs_trimwhite(pattern);
959 if (*str == 'n' || *str == 'N') {
961 if (*pattern != '\0') {
962 node = 1; /* numa pattern */
964 } else { /* shortcut to create CPT from NUMA & CPU topology */
966 ncpt = num_online_nodes();
970 if (ncpt == 0) { /* scanning bracket which is mark of partition */
971 for (str = pattern;; str++, ncpt++) {
972 str = strchr(str, '[');
979 (node && ncpt > num_online_nodes()) ||
980 (!node && ncpt > num_online_cpus())) {
981 CERROR("Invalid pattern %s, or too many partitions %d\n",
986 cptab = cfs_cpt_table_alloc(ncpt);
988 CERROR("Failed to allocate cpu partition table\n");
992 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
994 for_each_online_node(i) {
996 CERROR("CPU changed while setting CPU "
997 "partition table, %d/%d\n", cpt, ncpt);
1001 rc = cfs_cpt_set_node(cptab, cpt++, i);
1008 high = node ? nr_node_ids - 1 : nr_cpu_ids - 1;
1010 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
1011 struct cfs_range_expr *range;
1012 struct cfs_expr_list *el;
1013 char *bracket = strchr(str, '[');
1016 if (bracket == NULL) {
1018 CERROR("Invalid pattern %s\n", str);
1020 } else if (c != ncpt) {
1021 CERROR("expect %d partitions but found %d\n",
1028 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
1029 CERROR("Invalid cpu pattern %s\n", str);
1033 if (cpt < 0 || cpt >= ncpt) {
1034 CERROR("Invalid partition id %d, total partitions %d\n",
1039 if (cfs_cpt_weight(cptab, cpt) != 0) {
1040 CERROR("Partition %d has already been set.\n", cpt);
1044 str = cfs_trimwhite(str + n);
1045 if (str != bracket) {
1046 CERROR("Invalid pattern %s\n", str);
1050 bracket = strchr(str, ']');
1051 if (bracket == NULL) {
1052 CERROR("missing right bracket for cpt %d, %s\n",
1057 if (cfs_expr_list_parse(str, (bracket - str) + 1,
1058 0, high, &el) != 0) {
1059 CERROR("Can't parse number range: %s\n", str);
1063 list_for_each_entry(range, &el->el_exprs, re_link) {
1064 for (i = range->re_lo; i <= range->re_hi; i++) {
1065 if ((i - range->re_lo) % range->re_stride != 0)
1068 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
1069 cfs_cpt_set_cpu(cptab, cpt, i);
1071 cfs_expr_list_free(el);
1077 cfs_expr_list_free(el);
1079 if (!cfs_cpt_online(cptab, cpt)) {
1080 CERROR("No online CPU is found on partition %d\n", cpt);
1084 str = cfs_trimwhite(bracket + 1);
1090 cfs_cpt_table_free(cptab);
1094 #ifdef CONFIG_HOTPLUG_CPU
1096 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1098 unsigned int cpu = (unsigned long)hcpu;
1103 case CPU_DEAD_FROZEN:
1105 case CPU_ONLINE_FROZEN:
1107 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1108 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1113 /* if all HTs in a core are offline, it may break affinity */
1114 warn = cpumask_any_and(topology_sibling_cpumask(cpu),
1115 cpu_online_mask) >= nr_cpu_ids;
1116 CDEBUG(warn ? D_WARNING : D_INFO,
1117 "Lustre: can't support CPU plug-out well now, "
1118 "performance and stability could be impacted"
1119 "[CPU %u action: %lx]\n", cpu, action);
1125 static struct notifier_block cfs_cpu_notifier = {
1126 .notifier_call = cfs_cpu_notify,
1135 if (cfs_cpt_table != NULL)
1136 cfs_cpt_table_free(cfs_cpt_table);
1138 #ifdef CONFIG_HOTPLUG_CPU
1139 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1146 LASSERT(cfs_cpt_table == NULL);
1148 #ifdef CONFIG_HOTPLUG_CPU
1149 register_hotcpu_notifier(&cfs_cpu_notifier);
1152 if (*cpu_pattern != 0) {
1153 char *cpu_pattern_dup = kstrdup(cpu_pattern, GFP_KERNEL);
1155 if (cpu_pattern_dup == NULL) {
1156 CERROR("Failed to duplicate cpu_pattern\n");
1160 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern_dup);
1161 kfree(cpu_pattern_dup);
1162 if (cfs_cpt_table == NULL) {
1163 CERROR("Failed to create cptab from pattern %s\n",
1169 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1170 if (cfs_cpt_table == NULL) {
1171 CERROR("Failed to create ptable with npartitions %d\n",
1178 LCONSOLE(0, "HW nodes: %d, HW CPU cores: %d, npartitions: %d\n",
1179 num_online_nodes(), num_online_cpus(),
1180 cfs_cpt_number(cfs_cpt_table));