4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Author: liang@whamcloud.com
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/cpu.h>
38 #include <linux/sched.h>
39 #include <libcfs/libcfs.h>
44 * modparam for setting number of partitions
46 * 0 : estimate best value based on cores or NUMA nodes
47 * 1 : disable multiple partitions
48 * >1 : specify number of partitions
50 static int cpu_npartitions;
51 CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
54 * modparam for setting CPU partitions patterns:
56 * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
57 * number in bracket is processor ID (core or HT)
59 * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
60 * are NUMA node ID, number before bracket is CPU partition ID.
62 * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
64 static char *cpu_pattern = "";
65 CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
68 /* serialize hotplug etc */
70 /* reserved for hotplug */
71 unsigned long cpt_version;
72 /* mutex to protect cpt_cpumask */
73 struct mutex cpt_mutex;
74 /* scratch buffer for set/unset_node */
75 cpumask_t *cpt_cpumask;
78 static struct cfs_cpt_data cpt_data;
81 cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
83 /* return cpumask of cores in the same socket */
84 cpumask_copy(mask, topology_core_cpumask(cpu));
86 EXPORT_SYMBOL(cfs_cpu_core_siblings);
88 /* return number of cores in the same socket of \a cpu */
90 cfs_cpu_core_nsiblings(int cpu)
94 mutex_lock(&cpt_data.cpt_mutex);
96 cfs_cpu_core_siblings(cpu, cpt_data.cpt_cpumask);
97 num = cpus_weight(*cpt_data.cpt_cpumask);
99 mutex_unlock(&cpt_data.cpt_mutex);
103 EXPORT_SYMBOL(cfs_cpu_core_nsiblings);
105 /* return cpumask of HTs in the same core */
107 cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
109 cpumask_copy(mask, topology_thread_cpumask(cpu));
111 EXPORT_SYMBOL(cfs_cpu_ht_siblings);
113 /* return number of HTs in the same core of \a cpu */
115 cfs_cpu_ht_nsiblings(int cpu)
119 mutex_lock(&cpt_data.cpt_mutex);
121 cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
122 num = cpus_weight(*cpt_data.cpt_cpumask);
124 mutex_unlock(&cpt_data.cpt_mutex);
128 EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
131 cfs_node_to_cpumask(int node, cpumask_t *mask)
133 cpumask_copy(mask, cpumask_of_node(node));
135 EXPORT_SYMBOL(cfs_node_to_cpumask);
138 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
142 if (cptab->ctb_cpu2cpt != NULL) {
143 LIBCFS_FREE(cptab->ctb_cpu2cpt,
144 num_possible_cpus() *
145 sizeof(cptab->ctb_cpu2cpt[0]));
148 for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
149 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
151 if (part->cpt_nodemask != NULL) {
152 LIBCFS_FREE(part->cpt_nodemask,
153 sizeof(*part->cpt_nodemask));
156 if (part->cpt_cpumask != NULL)
157 LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
160 if (cptab->ctb_parts != NULL) {
161 LIBCFS_FREE(cptab->ctb_parts,
162 cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
165 if (cptab->ctb_nodemask != NULL)
166 LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
167 if (cptab->ctb_cpumask != NULL)
168 LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
170 LIBCFS_FREE(cptab, sizeof(*cptab));
172 EXPORT_SYMBOL(cfs_cpt_table_free);
174 struct cfs_cpt_table *
175 cfs_cpt_table_alloc(unsigned int ncpt)
177 struct cfs_cpt_table *cptab;
180 LIBCFS_ALLOC(cptab, sizeof(*cptab));
184 cptab->ctb_nparts = ncpt;
186 LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
187 LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
189 if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
192 LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
193 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
194 if (cptab->ctb_cpu2cpt == NULL)
197 memset(cptab->ctb_cpu2cpt, -1,
198 num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
200 LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
201 if (cptab->ctb_parts == NULL)
204 for (i = 0; i < ncpt; i++) {
205 struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
207 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
208 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
209 if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
213 spin_lock(&cpt_data.cpt_lock);
214 /* Reserved for hotplug */
215 cptab->ctb_version = cpt_data.cpt_version;
216 spin_unlock(&cpt_data.cpt_lock);
221 cfs_cpt_table_free(cptab);
224 EXPORT_SYMBOL(cfs_cpt_table_alloc);
227 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
234 for (i = 0; i < cptab->ctb_nparts; i++) {
236 rc = snprintf(tmp, len, "%d\t: ", i);
246 for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) {
247 rc = snprintf(tmp, len, "%d ", j);
267 EXPORT_SYMBOL(cfs_cpt_table_print);
270 cfs_cpt_number(struct cfs_cpt_table *cptab)
272 return cptab->ctb_nparts;
274 EXPORT_SYMBOL(cfs_cpt_number);
277 cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
279 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
281 return cpt == CFS_CPT_ANY ?
282 cpus_weight(*cptab->ctb_cpumask) :
283 cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask);
285 EXPORT_SYMBOL(cfs_cpt_weight);
288 cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
290 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
292 return cpt == CFS_CPT_ANY ?
293 any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS :
294 any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS;
296 EXPORT_SYMBOL(cfs_cpt_online);
299 cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
301 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
303 return cpt == CFS_CPT_ANY ?
304 cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
306 EXPORT_SYMBOL(cfs_cpt_cpumask);
309 cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
311 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
313 return cpt == CFS_CPT_ANY ?
314 cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
316 EXPORT_SYMBOL(cfs_cpt_nodemask);
319 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
323 LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
325 if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) {
326 CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
330 if (cptab->ctb_cpu2cpt[cpu] != -1) {
331 CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
332 cpu, cptab->ctb_cpu2cpt[cpu]);
336 cptab->ctb_cpu2cpt[cpu] = cpt;
338 LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask));
339 LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
341 cpu_set(cpu, *cptab->ctb_cpumask);
342 cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
344 node = cpu_to_node(cpu);
346 /* first CPU of @node in this CPT table */
347 if (!node_isset(node, *cptab->ctb_nodemask))
348 node_set(node, *cptab->ctb_nodemask);
350 /* first CPU of @node in this partition */
351 if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
352 node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
356 EXPORT_SYMBOL(cfs_cpt_set_cpu);
359 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
364 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
366 if (cpu < 0 || cpu >= NR_CPUS) {
367 CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
371 if (cpt == CFS_CPT_ANY) {
372 /* caller doesn't know the partition ID */
373 cpt = cptab->ctb_cpu2cpt[cpu];
374 if (cpt < 0) { /* not set in this CPT-table */
375 CDEBUG(D_INFO, "Try to unset cpu %d which is "
376 "not in CPT-table %p\n", cpt, cptab);
380 } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
382 "CPU %d is not in cpu-partition %d\n", cpu, cpt);
386 LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
387 LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask));
389 cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
390 cpu_clear(cpu, *cptab->ctb_cpumask);
391 cptab->ctb_cpu2cpt[cpu] = -1;
393 node = cpu_to_node(cpu);
395 LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
396 LASSERT(node_isset(node, *cptab->ctb_nodemask));
398 for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) {
399 /* this CPT has other CPU belonging to this node? */
400 if (cpu_to_node(i) == node)
405 node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
407 for_each_cpu_mask(i, *cptab->ctb_cpumask) {
408 /* this CPT-table has other CPU belonging to this node? */
409 if (cpu_to_node(i) == node)
414 node_clear(node, *cptab->ctb_nodemask);
418 EXPORT_SYMBOL(cfs_cpt_unset_cpu);
421 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
425 if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
426 CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
427 "for CPU partition %d\n", cpt);
431 for_each_cpu_mask(i, *mask) {
432 if (!cfs_cpt_set_cpu(cptab, cpt, i))
438 EXPORT_SYMBOL(cfs_cpt_set_cpumask);
441 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
445 for_each_cpu_mask(i, *mask)
446 cfs_cpt_unset_cpu(cptab, cpt, i);
448 EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
451 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
456 if (node < 0 || node >= MAX_NUMNODES) {
458 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
462 mutex_lock(&cpt_data.cpt_mutex);
464 mask = cpt_data.cpt_cpumask;
465 cfs_node_to_cpumask(node, mask);
467 rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
469 mutex_unlock(&cpt_data.cpt_mutex);
473 EXPORT_SYMBOL(cfs_cpt_set_node);
476 cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
480 if (node < 0 || node >= MAX_NUMNODES) {
482 "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
486 mutex_lock(&cpt_data.cpt_mutex);
488 mask = cpt_data.cpt_cpumask;
489 cfs_node_to_cpumask(node, mask);
491 cfs_cpt_unset_cpumask(cptab, cpt, mask);
493 mutex_unlock(&cpt_data.cpt_mutex);
495 EXPORT_SYMBOL(cfs_cpt_unset_node);
498 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
502 for_each_node_mask(i, *mask) {
503 if (!cfs_cpt_set_node(cptab, cpt, i))
509 EXPORT_SYMBOL(cfs_cpt_set_nodemask);
512 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
516 for_each_node_mask(i, *mask)
517 cfs_cpt_unset_node(cptab, cpt, i);
519 EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
522 cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
527 if (cpt == CFS_CPT_ANY) {
528 last = cptab->ctb_nparts - 1;
534 for (; cpt <= last; cpt++) {
535 for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask)
536 cfs_cpt_unset_cpu(cptab, cpt, i);
539 EXPORT_SYMBOL(cfs_cpt_clear);
542 cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
549 /* convert CPU partition ID to HW node id */
551 if (cpt < 0 || cpt >= cptab->ctb_nparts) {
552 mask = cptab->ctb_nodemask;
553 rotor = cptab->ctb_spread_rotor++;
555 mask = cptab->ctb_parts[cpt].cpt_nodemask;
556 rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
559 weight = nodes_weight(*mask);
564 for_each_node_mask(node, *mask) {
572 EXPORT_SYMBOL(cfs_cpt_spread_node);
575 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
577 int cpu = smp_processor_id();
578 int cpt = cptab->ctb_cpu2cpt[cpu];
584 /* don't return negative value for safety of upper layer,
585 * instead we shadow the unknown cpu to a valid partition ID */
586 cpt = cpu % cptab->ctb_nparts;
591 EXPORT_SYMBOL(cfs_cpt_current);
594 cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
596 LASSERT(cpu >= 0 && cpu < NR_CPUS);
598 return cptab->ctb_cpu2cpt[cpu];
600 EXPORT_SYMBOL(cfs_cpt_of_cpu);
603 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
606 nodemask_t *nodemask;
610 LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
612 if (cpt == CFS_CPT_ANY) {
613 cpumask = cptab->ctb_cpumask;
614 nodemask = cptab->ctb_nodemask;
616 cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
617 nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
620 if (any_online_cpu(*cpumask) == NR_CPUS) {
621 CERROR("No online CPU found in CPU partition %d, did someone "
622 "do CPU hotplug on system? You might need to reload "
623 "Lustre modules to keep system working well.\n", cpt);
627 for_each_online_cpu(i) {
628 if (cpu_isset(i, *cpumask))
631 rc = set_cpus_allowed_ptr(current, cpumask);
632 set_mems_allowed(*nodemask);
634 schedule(); /* switch to allowed CPU */
639 /* don't need to set affinity because all online CPUs are covered */
642 EXPORT_SYMBOL(cfs_cpt_bind);
645 * Choose max to \a number CPUs from \a node and set them in \a cpt.
646 * We always prefer to choose CPU in the same core/socket.
649 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
650 cpumask_t *node, int number)
652 cpumask_t *socket = NULL;
653 cpumask_t *core = NULL;
659 if (number >= cpus_weight(*node)) {
660 while (!cpus_empty(*node)) {
661 cpu = first_cpu(*node);
663 rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
666 cpu_clear(cpu, *node);
671 /* allocate scratch buffer */
672 LIBCFS_ALLOC(socket, cpumask_size());
673 LIBCFS_ALLOC(core, cpumask_size());
674 if (socket == NULL || core == NULL) {
679 while (!cpus_empty(*node)) {
680 cpu = first_cpu(*node);
682 /* get cpumask for cores in the same socket */
683 cfs_cpu_core_siblings(cpu, socket);
684 cpus_and(*socket, *socket, *node);
686 LASSERT(!cpus_empty(*socket));
688 while (!cpus_empty(*socket)) {
691 /* get cpumask for hts in the same core */
692 cfs_cpu_ht_siblings(cpu, core);
693 cpus_and(*core, *core, *node);
695 LASSERT(!cpus_empty(*core));
697 for_each_cpu_mask(i, *core) {
698 cpu_clear(i, *socket);
701 rc = cfs_cpt_set_cpu(cptab, cpt, i);
710 cpu = first_cpu(*socket);
716 LIBCFS_FREE(socket, cpumask_size());
718 LIBCFS_FREE(core, cpumask_size());
722 #define CPT_WEIGHT_MIN 4u
725 cfs_cpt_num_estimate(void)
727 unsigned nnode = num_online_nodes();
728 unsigned ncpu = num_online_cpus();
731 if (ncpu <= CPT_WEIGHT_MIN) {
736 /* generate reasonable number of CPU partitions based on total number
737 * of CPUs, Preferred N should be power2 and match this condition:
738 * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
739 for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) {}
741 if (ncpt <= nnode) { /* fat numa system */
745 } else { /* ncpt > nnode */
746 while ((nnode << 1) <= ncpt)
753 #if (BITS_PER_LONG == 32)
754 /* config many CPU partitions on 32-bit system could consume
756 ncpt = min(2U, ncpt);
758 while (ncpu % ncpt != 0)
759 ncpt--; /* worst case is 1 */
764 static struct cfs_cpt_table *
765 cfs_cpt_table_create(int ncpt)
767 struct cfs_cpt_table *cptab = NULL;
768 cpumask_t *mask = NULL;
774 rc = cfs_cpt_num_estimate();
778 if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
779 CWARN("CPU partition number %d is larger than suggested "
780 "value (%d), your system may have performance"
781 "issue or run out of memory while under pressure\n",
785 if (num_online_cpus() % ncpt != 0) {
786 CERROR("CPU number %d is not multiple of cpu_npartition %d, "
787 "please try different cpu_npartitions value or"
788 "set pattern string by cpu_pattern=STRING\n",
789 (int)num_online_cpus(), ncpt);
793 cptab = cfs_cpt_table_alloc(ncpt);
795 CERROR("Failed to allocate CPU map(%d)\n", ncpt);
799 num = num_online_cpus() / ncpt;
801 CERROR("CPU changed while setting CPU partition\n");
805 LIBCFS_ALLOC(mask, cpumask_size());
807 CERROR("Failed to allocate scratch cpumask\n");
811 for_each_online_node(i) {
812 cfs_node_to_cpumask(i, mask);
814 while (!cpus_empty(*mask)) {
815 struct cfs_cpu_partition *part;
818 /* Each emulated NUMA node has all allowed CPUs in
820 * End loop when all partitions have assigned CPUs.
825 part = &cptab->ctb_parts[cpt];
827 n = num - cpus_weight(*part->cpt_cpumask);
830 rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
834 LASSERT(num >= cpus_weight(*part->cpt_cpumask));
835 if (num == cpus_weight(*part->cpt_cpumask))
841 num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
842 CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
843 "CPU hotplug/unplug while setting?\n",
844 cptab->ctb_nparts, num, cpt,
845 cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
849 LIBCFS_FREE(mask, cpumask_size());
854 CERROR("Failed to setup CPU-partition-table with %d "
855 "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
856 ncpt, num_online_nodes(), num_online_cpus());
859 LIBCFS_FREE(mask, cpumask_size());
862 cfs_cpt_table_free(cptab);
867 static struct cfs_cpt_table *
868 cfs_cpt_table_create_pattern(char *pattern)
870 struct cfs_cpt_table *cptab;
877 for (ncpt = 0;; ncpt++) { /* quick scan bracket */
878 str = strchr(str, '[');
884 str = cfs_trimwhite(pattern);
885 if (*str == 'n' || *str == 'N') {
891 (node && ncpt > num_online_nodes()) ||
892 (!node && ncpt > num_online_cpus())) {
893 CERROR("Invalid pattern %s, or too many partitions %d\n",
898 high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1;
900 cptab = cfs_cpt_table_alloc(ncpt);
902 CERROR("Failed to allocate cpu partition table\n");
906 for (str = cfs_trimwhite(pattern), c = 0;; c++) {
907 struct cfs_range_expr *range;
908 struct cfs_expr_list *el;
909 char *bracket = strchr(str, '[');
915 if (bracket == NULL) {
917 CERROR("Invalid pattern %s\n", str);
919 } else if (c != ncpt) {
920 CERROR("expect %d partitions but found %d\n",
927 if (sscanf(str, "%d%n", &cpt, &n) < 1) {
928 CERROR("Invalid cpu pattern %s\n", str);
932 if (cpt < 0 || cpt >= ncpt) {
933 CERROR("Invalid partition id %d, total partitions %d\n",
938 if (cfs_cpt_weight(cptab, cpt) != 0) {
939 CERROR("Partition %d has already been set.\n", cpt);
943 str = cfs_trimwhite(str + n);
944 if (str != bracket) {
945 CERROR("Invalid pattern %s\n", str);
949 bracket = strchr(str, ']');
950 if (bracket == NULL) {
951 CERROR("missing right bracket for cpt %d, %s\n",
956 if (cfs_expr_list_parse(str, (bracket - str) + 1,
957 0, high, &el) != 0) {
958 CERROR("Can't parse number range: %s\n", str);
962 list_for_each_entry(range, &el->el_exprs, re_link) {
963 for (i = range->re_lo; i <= range->re_hi; i++) {
964 if ((i - range->re_lo) % range->re_stride != 0)
967 rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
968 cfs_cpt_set_cpu(cptab, cpt, i);
970 cfs_expr_list_free(el);
976 cfs_expr_list_free(el);
978 if (!cfs_cpt_online(cptab, cpt)) {
979 CERROR("No online CPU is found on partition %d\n", cpt);
983 str = cfs_trimwhite(bracket + 1);
989 cfs_cpt_table_free(cptab);
993 #ifdef CONFIG_HOTPLUG_CPU
995 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
997 unsigned int cpu = (unsigned long)hcpu;
1002 case CPU_DEAD_FROZEN:
1004 case CPU_ONLINE_FROZEN:
1005 spin_lock(&cpt_data.cpt_lock);
1006 cpt_data.cpt_version++;
1007 spin_unlock(&cpt_data.cpt_lock);
1009 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
1010 CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
1015 mutex_lock(&cpt_data.cpt_mutex);
1016 /* if all HTs in a core are offline, it may break affinity */
1017 cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
1018 warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
1019 mutex_unlock(&cpt_data.cpt_mutex);
1020 CDEBUG(warn ? D_WARNING : D_INFO,
1021 "Lustre: can't support CPU plug-out well now, "
1022 "performance and stability could be impacted"
1023 "[CPU %u action: %lx]\n", cpu, action);
1029 static struct notifier_block cfs_cpu_notifier = {
1030 .notifier_call = cfs_cpu_notify,
1039 if (cfs_cpt_table != NULL)
1040 cfs_cpt_table_free(cfs_cpt_table);
1042 #ifdef CONFIG_HOTPLUG_CPU
1043 unregister_hotcpu_notifier(&cfs_cpu_notifier);
1045 if (cpt_data.cpt_cpumask != NULL)
1046 LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
1052 LASSERT(cfs_cpt_table == NULL);
1054 memset(&cpt_data, 0, sizeof(cpt_data));
1056 LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
1057 if (cpt_data.cpt_cpumask == NULL) {
1058 CERROR("Failed to allocate scratch buffer\n");
1062 spin_lock_init(&cpt_data.cpt_lock);
1063 mutex_init(&cpt_data.cpt_mutex);
1065 #ifdef CONFIG_HOTPLUG_CPU
1066 register_hotcpu_notifier(&cfs_cpu_notifier);
1069 if (*cpu_pattern != 0) {
1070 cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
1071 if (cfs_cpt_table == NULL) {
1072 CERROR("Failed to create cptab from pattern %s\n",
1078 cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
1079 if (cfs_cpt_table == NULL) {
1080 CERROR("Failed to create ptable with npartitions %d\n",
1086 spin_lock(&cpt_data.cpt_lock);
1087 if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
1088 spin_unlock(&cpt_data.cpt_lock);
1089 CERROR("CPU hotplug/unplug during setup\n");
1092 spin_unlock(&cpt_data.cpt_lock);
1094 LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
1095 num_online_cpus(), cfs_cpt_number(cfs_cpt_table));