*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* cpu_npartitions=1:
* core[0, 1, ... 7] = partition[0]
*
- * . User can also specifiy CPU partitions by string pattern
+ * . User can also specify CPU partitions by string pattern
*
* Examples: cpu_partitions="0[0,1], 1[2,3]"
* cpu_partitions="N 0[0-3], 1[4-8]"
*
- * The first charactor "N" means following numbers are numa ID
+ * The first character "N" means following numbers are numa ID
*
* . NUMA allocators, CPU affinity threads are built over CPU partitions,
* instead of HW CPUs or HW nodes.
* configured by cpu_npartitions of the global cfs_cpt_table
*
* . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
- * same way as 2.2 or earlier verison
+ * same way as 2.2 or earlier versions
*
* Author: liang@whamcloud.com
*/
#ifndef HAVE_LIBCFS_CPT
-typedef unsigned long cpumask_t;
-typedef unsigned long nodemask_t;
+#if !defined(__linux__) || !defined(__KERNEL__)
+typedef struct nodemask { DECLARE_BITMAP(bits, 1); } nodemask_t;
+typedef struct cpumask { DECLARE_BITMAP(bits, 1); } cpumask_t;
+
+#define node_set(node, dst) __node_set((node), &(dst))
+static __always_inline void __node_set(int node, nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+#endif
struct cfs_cpt_table {
/* # of CPU partitions */
int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
/**
+ * return number of HTs in the same core of \a cpu
+ */
+int cfs_cpu_ht_nsiblings(int cpu);
+
+/**
* iterate over all CPU partitions in \a cptab
*/
#define cfs_cpt_for_each(i, cptab) \