4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Author: liang@whamcloud.com
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/cpu.h>
38 #include <linux/sched.h>
39 #include <libcfs/libcfs.h>
44 * modparam for setting number of partitions
46 * 0 : estimate best value based on cores or NUMA nodes
47 * 1 : disable multiple partitions
48 * >1 : specify number of partitions
50 static int cpu_npartitions;
51 CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
54 * modparam for setting CPU partitions patterns:
56 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
57 * number in bracket is processor ID (core or HT)
59 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
60 * are NUMA node ID, number before bracket is CPU partition ID.
62 * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
64 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
66 static char *cpu_pattern = "";
67 CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
70 /* serialize hotplug etc */
72 /* reserved for hotplug */
73 unsigned long cpt_version;
74 /* mutex to protect cpt_cpumask */
75 struct mutex cpt_mutex;
76 /* scratch buffer for set/unset_node */
77 cpumask_t *cpt_cpumask;
80 static struct cfs_cpt_data cpt_data;
83 cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
85 /* return cpumask of cores in the same socket */
86 cpumask_copy(mask, topology_core_cpumask(cpu));
88 EXPORT_SYMBOL(cfs_cpu_core_siblings);
90 /* return number of cores in the same socket of \a cpu */
92 cfs_cpu_core_nsiblings(int cpu)
96 mutex_lock(&cpt_data.cpt_mutex);
98 cfs_cpu_core_siblings(cpu, cpt_data.cpt_cpumask);
99 num = cpumask_weight(cpt_data.cpt_cpumask);
101 mutex_unlock(&cpt_data.cpt_mutex);
105 EXPORT_SYMBOL(cfs_cpu_core_nsiblings);
107 /* return cpumask of HTs in the same core */
109 cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
111 cpumask_copy(mask, topology_thread_cpumask(cpu));
113 EXPORT_SYMBOL(cfs_cpu_ht_siblings);
115 /* return number of HTs in the same core of \a cpu */
117 cfs_cpu_ht_nsiblings(int cpu)
121 num = cpumask_weight(topology_thread_cpumask(cpu));
125 EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
128 cfs_node_to_cpumask(int node, cpumask_t *mask)
130 const cpumask_t *tmp = cpumask_of_node(node);
133 cpumask_copy(mask, tmp);
137 EXPORT_SYMBOL(cfs_node_to_cpumask);
140 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
144 if (cptab->ctb_cpu2cpt != NULL) {
145 LIBCFS_FREE(cptab->ctb_cpu2cpt,
146 num_possible_cpus() *
147 sizeof(cptab->ctb_cpu2cpt[0]));
150 for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
151 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
153 if (part->cpt_nodemask != NULL) {
154 LIBCFS_FREE(part->cpt_nodemask,
155 sizeof(*part->cpt_nodemask));
158 if (part->cpt_cpumask != NULL)
159 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
162 if (cptab->ctb_parts != NULL) {
163 LIBCFS_FREE(cptab->ctb_parts,
164 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
167 if (cptab->ctb_nodemask != NULL)
168 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
169 if (cptab->ctb_cpumask != NULL)
170 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
172 LIBCFS_FREE(cptab, sizeof(*cptab));
174 EXPORT_SYMBOL(cfs_cpt_table_free);
176 struct cfs_cpt_table *
177 cfs_cpt_table_alloc(unsigned int ncpt)
179 struct cfs_cpt_table *cptab;
182 LIBCFS_ALLOC(cptab, sizeof(*cptab));
186 cptab->ctb_nparts = ncpt;
188 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
189 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
191 if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
194 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
195 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
196 if (cptab->ctb_cpu2cpt == NULL)
199 memset(cptab->ctb_cpu2cpt, -1,
200 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
202 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
203 if (cptab->ctb_parts == NULL)
206 for (i = 0; i < ncpt; i++) {
207 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
209 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
210 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
211 if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
215 spin_lock(&cpt_data.cpt_lock);
216 /* Reserved for hotplug */
217 cptab->ctb_version = cpt_data.cpt_version;
218 spin_unlock(&cpt_data.cpt_lock);
223 cfs_cpt_table_free(cptab);
226 EXPORT_SYMBOL(cfs_cpt_table_alloc);
229 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
236 for (i = 0; i < cptab->ctb_nparts; i++) {
238 rc = snprintf(tmp, len, "%d\t: ", i);
248 for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
249 rc = snprintf(tmp, len, "%d ", j);
269 EXPORT_SYMBOL(cfs_cpt_table_print);
272 cfs_cpt_number(struct cfs_cpt_table *cptab)
274 return cptab->ctb_nparts;
276 EXPORT_SYMBOL(cfs_cpt_number);
279 cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
281 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
283 return cpt == CFS_CPT_ANY ?
284 cpumask_weight(cptab->ctb_cpumask) :
285 cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
287 EXPORT_SYMBOL(cfs_cpt_weight);
290 cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
292 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
294 return cpt == CFS_CPT_ANY ?
295 cpumask_any_and(cptab->ctb_cpumask,
296 cpu_online_mask) < nr_cpu_ids :
297 cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
298 cpu_online_mask) < nr_cpu_ids;
300 EXPORT_SYMBOL(cfs_cpt_online);
303 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
305 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
307 return cpt == CFS_CPT_ANY ?
308 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
310 EXPORT_SYMBOL(cfs_cpt_cpumask);
313 cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
315 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
317 return cpt == CFS_CPT_ANY ?
318 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
320 EXPORT_SYMBOL(cfs_cpt_nodemask);
323 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
327 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
329 if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
330 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
334 if (cptab->ctb_cpu2cpt[cpu] != -1) {
335 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
336 cpu, cptab->ctb_cpu2cpt[cpu]);
340 cptab->ctb_cpu2cpt[cpu] = cpt;
342 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
343 LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
345 cpumask_set_cpu(cpu, cptab->ctb_cpumask);
346 cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
348 node = cpu_to_node(cpu);
350 /* first CPU of @node in this CPT table */
351 if (!node_isset(node, *cptab->ctb_nodemask))
352 node_set(node, *cptab->ctb_nodemask);
354 /* first CPU of @node in this partition */
355 if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
356 node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
360 EXPORT_SYMBOL(cfs_cpt_set_cpu);
363 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
368 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
370 if (cpu < 0 || cpu >= nr_cpu_ids) {
371 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
375 if (cpt == CFS_CPT_ANY) {
376 /* caller doesn't know the partition ID */
377 cpt = cptab->ctb_cpu2cpt[cpu];
378 if (cpt < 0) { /* not set in this CPT-table */
379 CDEBUG(D_INFO, "Try to unset cpu %d which is "
380 "not in CPT-table %p\n", cpt, cptab);
384 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
386 "CPU %d is not in cpu-partition %d\n", cpu, cpt);
390 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
391 LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
393 cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
394 cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
395 cptab->ctb_cpu2cpt[cpu] = -1;
397 node = cpu_to_node(cpu);
399 LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
400 LASSERT(node_isset(node, *cptab->ctb_nodemask));
402 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
403 /* this CPT has other CPU belonging to this node? */
404 if (cpu_to_node(i) == node)
409 node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
411 for_each_cpu(i, cptab->ctb_cpumask) {
412 /* this CPT-table has other CPU belonging to this node? */
413 if (cpu_to_node(i) == node)
418 node_clear(node, *cptab->ctb_nodemask);
422 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
425 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
429 if (cpumask_weight(mask) == 0 ||
430 cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
431 CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
432 "for CPU partition %d\n", cpt);
436 for_each_cpu(i, mask) {
437 if (!cfs_cpt_set_cpu(cptab, cpt, i))
443 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
446 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
450 for_each_cpu(i, mask)
451 cfs_cpt_unset_cpu(cptab, cpt, i);
453 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
456 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
461 if (node < 0 || node >= MAX_NUMNODES) {
463 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
467 mutex_lock(&cpt_data.cpt_mutex);
469 mask = cpt_data.cpt_cpumask;
470 cfs_node_to_cpumask(node, mask);
472 rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
474 mutex_unlock(&cpt_data.cpt_mutex);
478 EXPORT_SYMBOL(cfs_cpt_set_node);
481 cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
485 if (node < 0 || node >= MAX_NUMNODES) {
487 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
491 mutex_lock(&cpt_data.cpt_mutex);
493 mask = cpt_data.cpt_cpumask;
494 cfs_node_to_cpumask(node, mask);
496 cfs_cpt_unset_cpumask(cptab, cpt, mask);
498 mutex_unlock(&cpt_data.cpt_mutex);
500 EXPORT_SYMBOL(cfs_cpt_unset_node);
503 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
507 for_each_node_mask(i, *mask) {
508 if (!cfs_cpt_set_node(cptab, cpt, i))
514 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
517 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
521 for_each_node_mask(i, *mask)
522 cfs_cpt_unset_node(cptab, cpt, i);
524 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
527 cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
532 if (cpt == CFS_CPT_ANY) {
533 last = cptab->ctb_nparts - 1;
539 for (; cpt <= last; cpt++) {
540 for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
541 cfs_cpt_unset_cpu(cptab, cpt, i);
544 EXPORT_SYMBOL(cfs_cpt_clear);
547 cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
554 /* convert CPU partition ID to HW node id */
556 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
557 mask = cptab->ctb_nodemask;
558 rotor = cptab->ctb_spread_rotor++;
560 mask = cptab->ctb_parts[cpt].cpt_nodemask;
561 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
564 weight = nodes_weight(*mask);
569 for_each_node_mask(node, *mask) {
577 EXPORT_SYMBOL(cfs_cpt_spread_node);
580 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
582 int cpu = smp_processor_id();
583 int cpt = cptab->ctb_cpu2cpt[cpu];
589 /* don't return negative value for safety of upper layer,
590 * instead we shadow the unknown cpu to a valid partition ID */
591 cpt = cpu % cptab->ctb_nparts;
596 EXPORT_SYMBOL(cfs_cpt_current);
599 cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
601 LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
603 return cptab->ctb_cpu2cpt[cpu];
605 EXPORT_SYMBOL(cfs_cpt_of_cpu);
608 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
611 nodemask_t *nodemask;
615 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
617 if (cpt == CFS_CPT_ANY) {
618 cpumask = cptab->ctb_cpumask;
619 nodemask = cptab->ctb_nodemask;
621 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
622 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
625 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
626 CERROR("No online CPU found in CPU partition %d, did someone "
627 "do CPU hotplug on system? You might need to reload "
628 "Lustre modules to keep system working well.\n", cpt);
632 for_each_online_cpu(i) {
633 if (cpumask_test_cpu(i, cpumask))
636 rc = set_cpus_allowed_ptr(current, cpumask);
637 set_mems_allowed(*nodemask);
639 schedule(); /* switch to allowed CPU */
644 /* don't need to set affinity because all online CPUs are covered */
647 EXPORT_SYMBOL(cfs_cpt_bind);
650 * Choose max to \a number CPUs from \a node and set them in \a cpt.
651 * We always prefer to choose CPU in the same core/socket.
654 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
655 cpumask_t *node, int number)
657 cpumask_t *socket = NULL;
658 cpumask_t *core = NULL;
664 if (number >= cpumask_weight(node)) {
665 while (!cpumask_empty(node)) {
666 cpu = cpumask_first(node);
668 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
671 cpumask_clear_cpu(cpu, node);
676 /* allocate scratch buffer */
677 LIBCFS_ALLOC(socket, cpumask_size());
678 LIBCFS_ALLOC(core, cpumask_size());
679 if (socket == NULL || core == NULL) {
684 while (!cpumask_empty(node)) {
685 cpu = cpumask_first(node);
687 /* get cpumask for cores in the same socket */
688 cfs_cpu_core_siblings(cpu, socket);
689 cpumask_and(socket, socket, node);
691 LASSERT(!cpumask_empty(socket));
693 while (!cpumask_empty(socket)) {
696 /* get cpumask for hts in the same core */
697 cfs_cpu_ht_siblings(cpu, core);
698 cpumask_and(core, core, node);
700 LASSERT(!cpumask_empty(core));
702 for_each_cpu(i, core) {
703 cpumask_clear_cpu(i, socket);
704 cpumask_clear_cpu(i, node);
706 rc = cfs_cpt_set_cpu(cptab, cpt, i);
715 cpu = cpumask_first(socket);
721 LIBCFS_FREE(socket, cpumask_size());
723 LIBCFS_FREE(core, cpumask_size());
727 #define CPT_WEIGHT_MIN 4u
730 cfs_cpt_num_estimate(void)
732 unsigned nnode = num_online_nodes();
733 unsigned ncpu = num_online_cpus();
736 if (ncpu <= CPT_WEIGHT_MIN) {
741 /* generate reasonable number of CPU partitions based on total number
742 * of CPUs, Preferred N should be power2 and match this condition:
743 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
744 for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) {}
746 if (ncpt <= nnode) { /* fat numa system */
750 } else { /* ncpt > nnode */
751 while ((nnode << 1) <= ncpt)
758 #if (BITS_PER_LONG == 32)
759 /* config many CPU partitions on 32-bit system could consume
761 ncpt = min(2U, ncpt);
763 while (ncpu % ncpt != 0)
764 ncpt--; /* worst case is 1 */
769 static struct cfs_cpt_table *
770 cfs_cpt_table_create(int ncpt)
772 struct cfs_cpt_table *cptab = NULL;
773 cpumask_t *mask = NULL;
779 rc = cfs_cpt_num_estimate();
783 if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
784 CWARN("CPU partition number %d is larger than suggested "
785 "value (%d), your system may have performance"
786 "issue or run out of memory while under pressure\n",
790 if (num_online_cpus() % ncpt != 0) {
791 CERROR("CPU number %d is not multiple of cpu_npartition %d, "
792 "please try different cpu_npartitions value or"
793 "set pattern string by cpu_pattern=STRING\n",
794 (int)num_online_cpus(), ncpt);
798 cptab = cfs_cpt_table_alloc(ncpt);
800 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
804 num = num_online_cpus() / ncpt;
806 CERROR("CPU changed while setting CPU partition\n");
810 LIBCFS_ALLOC(mask, cpumask_size());
812 CERROR("Failed to allocate scratch cpumask\n");
816 for_each_online_node(i) {
817 cfs_node_to_cpumask(i, mask);
819 while (!cpumask_empty(mask)) {
820 struct cfs_cpu_partition *part;
823 /* Each emulated NUMA node has all allowed CPUs in
825 * End loop when all partitions have assigned CPUs.
830 part = &cptab->ctb_parts[cpt];
832 n = num - cpumask_weight(part->cpt_cpumask);
835 rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
839 LASSERT(num >= cpumask_weight(part->cpt_cpumask));
840 if (num == cpumask_weight(part->cpt_cpumask))
846 num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
847 CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
848 "CPU hotplug/unplug while setting?\n",
849 cptab->ctb_nparts, num, cpt,
850 cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
854 LIBCFS_FREE(mask, cpumask_size());
859 CERROR("Failed to setup CPU-partition-table with %d "
860 "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
861 ncpt, num_online_nodes(), num_online_cpus());
864 LIBCFS_FREE(mask, cpumask_size());
867 cfs_cpt_table_free(cptab);
872 static struct cfs_cpt_table *
873 cfs_cpt_table_create_pattern(char *pattern)
875 struct cfs_cpt_table *cptab;
885 str = cfs_trimwhite(pattern);
886 if (*str == 'n' || *str == 'N') {
888 if (*pattern != '\0') {
889 node = 1; /* numa pattern */
891 } else { /* shortcut to create CPT from NUMA & CPU topology */
893 ncpt = num_online_nodes();
897 if (ncpt == 0) { /* scanning bracket which is mark of partition */
898 for (str = pattern;; str++, ncpt++) {
899 str = strchr(str, '[');
906 (node && ncpt > num_online_nodes()) ||
907 (!node && ncpt > num_online_cpus())) {
908 CERROR("Invalid pattern %s, or too many partitions %d\n",
913 cptab = cfs_cpt_table_alloc(ncpt);
915 CERROR("Failed to allocate cpu partition table\n");
919 if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
921 for_each_online_node(i) {
923 CERROR("CPU changed while setting CPU "
924 "partition table, %d/%d\n", cpt, ncpt);
928 rc = cfs_cpt_set_node(cptab, cpt++, i);
935 high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
937 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
938 struct cfs_range_expr *range;
939 struct cfs_expr_list *el;
940 char *bracket = strchr(str, '[');
943 if (bracket == NULL) {
945 CERROR("Invalid pattern %s\n", str);
947 } else if (c != ncpt) {
948 CERROR("expect %d partitions but found %d\n",
955 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
956 CERROR("Invalid cpu pattern %s\n", str);
960 if (cpt < 0 || cpt >= ncpt) {
961 CERROR("Invalid partition id %d, total partitions %d\n",
966 if (cfs_cpt_weight(cptab, cpt) != 0) {
967 CERROR("Partition %d has already been set.\n", cpt);
971 str = cfs_trimwhite(str + n);
972 if (str != bracket) {
973 CERROR("Invalid pattern %s\n", str);
977 bracket = strchr(str, ']');
978 if (bracket == NULL) {
979 CERROR("missing right bracket for cpt %d, %s\n",
984 if (cfs_expr_list_parse(str, (bracket - str) + 1,
985 0, high, &el) != 0) {
986 CERROR("Can't parse number range: %s\n", str);
990 list_for_each_entry(range, &el->el_exprs, re_link) {
991 for (i = range->re_lo; i <= range->re_hi; i++) {
992 if ((i - range->re_lo) % range->re_stride != 0)
995 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
996 cfs_cpt_set_cpu(cptab, cpt, i);
998 cfs_expr_list_free(el);
1004 cfs_expr_list_free(el);
1006 if (!cfs_cpt_online(cptab, cpt)) {
1007 CERROR("No online CPU is found on partition %d\n", cpt);
1011 str = cfs_trimwhite(bracket + 1);
1017 cfs_cpt_table_free(cptab);
1021 #ifdef CONFIG_HOTPLUG_CPU
1023 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1025 unsigned int cpu = (unsigned long)hcpu;
1030 case CPU_DEAD_FROZEN:
1032 case CPU_ONLINE_FROZEN:
1033 spin_lock(&cpt_data.cpt_lock);
1034 cpt_data.cpt_version++;
1035 spin_unlock(&cpt_data.cpt_lock);
1037 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1038 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1043 mutex_lock(&cpt_data.cpt_mutex);
1044 /* if all HTs in a core are offline, it may break affinity */
1045 cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
1046 warn = cpumask_any_and(cpt_data.cpt_cpumask,
1047 cpu_online_mask) >= nr_cpu_ids;
1048 mutex_unlock(&cpt_data.cpt_mutex);
1049 CDEBUG(warn ? D_WARNING : D_INFO,
1050 "Lustre: can't support CPU plug-out well now, "
1051 "performance and stability could be impacted"
1052 "[CPU %u action: %lx]\n", cpu, action);
1058 static struct notifier_block cfs_cpu_notifier = {
1059 .notifier_call = cfs_cpu_notify,
1068 if (cfs_cpt_table != NULL)
1069 cfs_cpt_table_free(cfs_cpt_table);
1071 #ifdef CONFIG_HOTPLUG_CPU
1072 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1074 if (cpt_data.cpt_cpumask != NULL)
1075 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
1081 LASSERT(cfs_cpt_table == NULL);
1083 memset(&cpt_data, 0, sizeof(cpt_data));
1085 LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
1086 if (cpt_data.cpt_cpumask == NULL) {
1087 CERROR("Failed to allocate scratch buffer\n");
1091 spin_lock_init(&cpt_data.cpt_lock);
1092 mutex_init(&cpt_data.cpt_mutex);
1094 #ifdef CONFIG_HOTPLUG_CPU
1095 register_hotcpu_notifier(&cfs_cpu_notifier);
1098 if (*cpu_pattern != 0) {
1099 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1100 if (cfs_cpt_table == NULL) {
1101 CERROR("Failed to create cptab from pattern %s\n",
1107 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1108 if (cfs_cpt_table == NULL) {
1109 CERROR("Failed to create ptable with npartitions %d\n",
1115 spin_lock(&cpt_data.cpt_lock);
1116 if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
1117 spin_unlock(&cpt_data.cpt_lock);
1118 CERROR("CPU hotplug/unplug during setup\n");
1121 spin_unlock(&cpt_data.cpt_lock);
1123 LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
1124 num_online_cpus(), cfs_cpt_number(cfs_cpt_table));