*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* cpu_npartitions=1:
* core[0, 1, ... 7] = partition[0]
*
- * . User can also specifiy CPU partitions by string pattern
+ * . User can also specify CPU partitions by string pattern
*
* Examples: cpu_partitions="0[0,1], 1[2,3]"
* cpu_partitions="N 0[0-3], 1[4-8]"
*
- * The first charactor "N" means following numbers are numa ID
+ * The first character "N" means following numbers are numa ID
*
* . NUMA allocators, CPU affinity threads are built over CPU partitions,
* instead of HW CPUs or HW nodes.
* configured by cpu_npartitions of the global cfs_cpt_table
*
* . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
- * same way as 2.2 or earlier verison
+ * same way as 2.2 or earlier versions
*
* Author: liang@whamcloud.com
*/
#ifndef HAVE_LIBCFS_CPT
-typedef unsigned long cpumask_t;
-typedef unsigned long nodemask_t;
+#ifndef __KERNEL__
+typedef struct nodemask { DECLARE_BITMAP(bits, 1); } nodemask_t;
+typedef struct cpumask { DECLARE_BITMAP(bits, 1); } cpumask_t;
+
+#define node_set(node, dst) __node_set((node), &(dst))
+static __always_inline void __node_set(int node, nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+#endif /* __KERNEL__ */
struct cfs_cpt_table {
/* # of CPU partitions */
int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
/**
+ * return number of HTs in the same core of \a cpu
+ */
+int cfs_cpu_ht_nsiblings(int cpu);
+
+/**
+ * allocate \a nr_bytes of physical memory from a contiguous region with the
+ * properties of \a flags which are bound to the partition id \a cpt. This
+ * function should only be used for the case when only a few pages of memory
+ * are need.
+ */
+static inline void *
+cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes,
+ gfp_t flags)
+{
+ return kmalloc_node(nr_bytes, flags,
+ cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
+ * allocate \a nr_bytes of virtually contiguous memory that is bound to the
+ * partition id \a cpt.
+ */
+static inline void *
+cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
+{
+ /* vzalloc_node() sets __GFP_FS by default but no current Kernel
+ * exported entry-point allows for both a NUMA node specification
+ * and a custom allocation flags mask. This may be an issue since
+ * __GFP_FS usage can cause some deadlock situations in our code,
+ * like when memory reclaim started, within the same context of a
+ * thread doing FS operations, that can also attempt conflicting FS
+ * operations, ...
+ */
+ return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
+ * allocate a single page of memory with the properties of \a flags were
+ * that page is bound to the partition id \a cpt.
+ */
+static inline struct page *
+cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
+{
+ return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
+}
+
+/**
+ * allocate a chunck of memory from a memory pool that is bound to the
+ * partition id \a cpt with the properites of \a flags.
+ */
+static inline void *
+cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
+ int cpt, gfp_t flags)
+{
+ return kmem_cache_alloc_node(cachep, flags,
+ cfs_cpt_spread_node(cptab, cpt));
+}
+
+/**
* iterate over all CPU partitions in \a cptab
*/
#define cfs_cpt_for_each(i, cptab) \