* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
void *
cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
- size_t nr_bytes, unsigned int flags)
+ size_t nr_bytes, gfp_t flags)
{
return kmalloc_node(nr_bytes, flags,
cfs_cpt_spread_node(cptab, cpt));
void *
cfs_cpt_vzalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
{
+ /* vzalloc_node() sets __GFP_FS by default but no current Kernel
+ * exported entry-point allows for both a NUMA node specification
+ * and a custom allocation flags mask. This may be an issue since
+ * __GFP_FS usage can cause some deadlock situations in our code,
+ * like when memory reclaim started, within the same context of a
+ * thread doing FS operations, that can also attempt conflicting FS
+ * operations, ...
+ */
return vzalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
}
EXPORT_SYMBOL(cfs_cpt_vzalloc);
struct page *
-cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
+cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, gfp_t flags)
{
return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
}
void *
cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
- int cpt, unsigned int flags)
+ int cpt, gfp_t flags)
{
return kmem_cache_alloc_node(cachep, flags,
cfs_cpt_spread_node(cptab, cpt));