#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
-cfs_page_t *cfs_alloc_pages(int mask, unsigned long order);
-void cfs_free_pages(cfs_page_t *pg, int what);
cfs_page_t *cfs_alloc_page(unsigned int flags);
void cfs_free_page(cfs_page_t *pg);
void *cfs_page_address(cfs_page_t *pg);
void *cfs_kmap(cfs_page_t *pg);
void cfs_kunmap(cfs_page_t *pg);
-#define __cfs_free_pages(pg, what) cfs_free_pages((pg), (what))
-
#define cfs_get_page(p) __I_should_not_be_called__(at_all)
#define cfs_page_count(p) __I_should_not_be_called__(at_all)
#define cfs_page_index(p) ((p)->index)
int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
/*
+ * NUMA allocators
+ */
+#define cfs_cpt_malloc(cptab, cpt, bytes, flags) \
+ cfs_alloc(bytes, flags)
+#define cfs_cpt_vmalloc(cptab, cpt, bytes) \
+ cfs_alloc(bytes)
+#define cfs_page_cpt_alloc(cptab, cpt, mask) \
+ cfs_alloc_page(mask)
+#define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \
+ cfs_mem_cache_alloc(cache, gfp)
+
+/*
* Copy to/from user
*/
static inline int cfs_copy_from_user(void *a,void *b, int c)