Replace memory relevant wrappers with kernel API.
Affected primitives:
CFS_PAGE_SIZE, CFS_PAGE_SHIFT, cfs_num_physpages,
cfs_copy_from_user, cfs_copy_to_user, cfs_page_address,
cfs_kmap/cfs_kunmap, cfs_get_page, cfs_page_count,
cfs_page_index, cfs_page_pin, cfs_page_unpin,
cfs_memory_pressure_get/set/clr, CFS_NUM_CACHEPAGES,
CFS_ALLOC_XXX flags, cfs_alloc/free, cfs_alloc/free_large,
cfs_alloc/free_page, CFS_DECL_MMSPACE, CFS_MMSPACE_OPEN,
CFS_MMSPACE_CLOSE, CFS_SLAB_XXX flags, cfs_shrinker_t,
cfs_set/remove_shrinker, CFS_DEFAULT_SEEKS, cfs_mem_cache_t,
cfs_mem_cache_alloc/free/create/destroy, cfs_mem_is_in_cache
manual changes:
1. cfs_alloc_flags_to_gfp() is removed
2. remove kmalloc/kfree etc. from linux-mem.c and linux-mem.h
3. remove page_address/kmap/kunmap etc. from linux-mem.h
4. remove page_cache_get/page_cache_release from echo_internal.h. They
are defined already in user-mem.h
5. change kmem_cache_create/destroy prototype to kernel's and modify
all callers to match them
6. define _SPL_KMEM_H and related macros to avoid using spl's
sys/kmem.h that redefines slab allocator
7. change kmem_virt to is_vmalloc_addr as provided by kernel, so that
we don't use any spl's sys/kmem.h functions
8. clean up include files a little bit in osd-zfs
9. various coding style cleanup
NUMA allocators(cfs_cpt_xxx) are not changed in this patch.
gnilnd is not converted, as requested by James Simmons.
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Change-Id: Iadfbb0d5a0e31c78dd6c811e5ffdb468fa7e6f44
Reviewed-on: http://review.whamcloud.com/2831
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
################################################################################
# memory operations
-
-#s/\bcfs_page_t\b/struct page/g
-#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
-#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
-#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
-#s/\bcfs_num_physpages\b/num_physpages/g
-#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
-#s/\bcfs_copy_from_user\b/copy_from_user/g
-#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_copy_to_user\b/copy_to_user/g
-#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_page_address\b/page_address/g
-#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
-#s/\bcfs_kmap\b/kmap/g
-#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
-#s/\bcfs_kunmap\b/kunmap/g
-#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
-#s/\bcfs_get_page\b/get_page/g
-#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
-#s/\bcfs_page_count\b/page_count/g
-#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
-#s/\bcfs_page_index\b/page_index/g
-#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
-#s/\bcfs_page_pin\b/page_cache_get/g
-#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
-#s/\bcfs_page_unpin\b/page_cache_release/g
-#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
-#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
-#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
-#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
-#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
-# memory allocator
-#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
-#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
-#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
-#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
-#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
-#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
-#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
-#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
-#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
-#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
-#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
-#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
-#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
-#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
-#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
-#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
-#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
-#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
-#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
-#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
-#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
-#s/\bcfs_alloc\b/kmalloc/g
-#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
-#s/\bcfs_free\b/kfree/g
-#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
-#s/\bcfs_alloc_large\b/vmalloc/g
-#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
-#s/\bcfs_free_large\b/vfree/g
-#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
-#s/\bcfs_alloc_page\b/alloc_page/g
-#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
-#s/\bcfs_free_page\b/__free_page/g
-#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+s/\bcfs_page_t\b/struct page/g
+/typedef[ \t]*\bstruct page\b[ \t]*\bstruct page\b/d
+s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+s/\bcfs_num_physpages\b/num_physpages/g
+/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+s/\bcfs_copy_from_user\b/copy_from_user/g
+/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_copy_to_user\b/copy_to_user/g
+/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_page_address\b/page_address/g
+/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+s/\bcfs_kmap\b/kmap/g
+/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+s/\bcfs_kunmap\b/kunmap/g
+/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+s/\bcfs_get_page\b/get_page/g
+/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+s/\bcfs_page_count\b/page_count/g
+/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+s/\bcfs_page_index\b/page_index/g
+/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+s/\bcfs_page_pin\b/page_cache_get/g
+/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+s/\bcfs_page_unpin\b/page_cache_release/g
+/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+ # memory allocator
+s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+s/\bCFS_ALLOC_USER\b/GFP_USER/g
+/#[ \t]*define[ \t]*\bGFP_USER\b[ \t]*\bGFP_USER\b/d
+s/\bCFS_ALLOC_KERNEL\b/GFP_KERNEL/g
+/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+s/\bCFS_ALLOC_NOFS\b/GFP_NOFS/g
+/#[ \t]*define[ \t]*\bGFP_NOFS\b[ \t]*\bGFP_NOFS\b/d
+s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+s/\bcfs_alloc\b/kmalloc/g
+/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+s/\bcfs_free\b/kfree/g
+/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+s/\bcfs_alloc_large\b/vmalloc/g
+/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+s/\bcfs_free_large\b/vfree/g
+/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+s/\bcfs_alloc_page\b/alloc_page/g
+/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+s/\bcfs_free_page\b/__free_page/g
+/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
# TODO: SLAB allocator
-#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
-#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
-#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
-#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
-#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
-#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
-#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
-#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
-#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
-#s/\bcfs_shrinker\b/shrinker/g
-#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
-#s/\bcfs_shrinker_t\b/struct shrinkert/g
-#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
-#s/\bcfs_set_shrinker\b/set_shrinker/g
-#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
-#s/\bcfs_remove_shrinker\b/remove_shrinker/g
-#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
-#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
-#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+s/\bcfs_shrinker\b/shrinker/g
+/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+s/\bcfs_shrinker_t\b/shrinker_t/g
+/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+s/\bcfs_set_shrinker\b/set_shrinker/g
+/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+s/\bcfs_remove_shrinker\b/remove_shrinker/g
+/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/cfs_mem_cache_t/struct kmem_cache/g
+s/cfs_mem_cache_create/kmem_cache_create/g
+s/\w+[ =]*cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_alloc/kmem_cache_alloc/g
+s/cfs_mem_cache_free/kmem_cache_free/g
+s/cfs_mem_is_in_cache/kmem_is_in_cache/g
/* Variable sized pages are not supported */
#ifdef PAGE_SHIFT
-#define CFS_PAGE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
#else
-#define CFS_PAGE_SHIFT 12
+#define PAGE_CACHE_SHIFT 12
#endif
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
+#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE - 1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE - 1))
enum {
XNU_PAGE_RAW,
* - "xll" pages (XNU_PAGE_XLL): these are used by file system to cache
* file data, owned by file system objects, hashed, lrued, etc.
*
- * cfs_page_t has to cover both of them, because core Lustre code is based on
+ * struct page has to cover both of them, because core Lustre code is based on
* the Linux assumption that page is _both_ memory buffer and file system
* caching entity.
*
* To achieve this, all types of pages supported on XNU has to start from
- * common header that contains only "page type". Common cfs_page_t operations
+ * common header that contains only "page type". Common struct page operations
* dispatch through operation vector based on page type.
*
*/
typedef struct xnu_page {
int type;
-} cfs_page_t;
+} struct page;
struct xnu_page_ops {
- void *(*page_map) (cfs_page_t *);
- void (*page_unmap) (cfs_page_t *);
- void *(*page_address) (cfs_page_t *);
+ void *(*page_map) (struct page *);
+ void (*page_unmap) (struct page *);
+ void *(*page_address) (struct page *);
};
void xnu_page_ops_register(int type, struct xnu_page_ops *ops);
/*
* Public interface to lustre
*
- * - cfs_alloc_page(f)
- * - cfs_free_page(p)
- * - cfs_kmap(p)
- * - cfs_kunmap(p)
- * - cfs_page_address(p)
+ * - alloc_page(f)
+ * - __free_page(p)
+ * - kmap(p)
+ * - kunmap(p)
+ * - page_address(p)
*/
/*
- * Of all functions above only cfs_kmap(), cfs_kunmap(), and
- * cfs_page_address() can be called on file system pages. The rest is for raw
+ * Of all functions above only kmap(), kunmap(), and
+ * page_address() can be called on file system pages. The rest is for raw
* pages only.
*/
-cfs_page_t *cfs_alloc_page(u_int32_t flags);
-void cfs_free_page(cfs_page_t *page);
-void cfs_get_page(cfs_page_t *page);
-int cfs_put_page_testzero(cfs_page_t *page);
-int cfs_page_count(cfs_page_t *page);
-#define cfs_page_index(pg) (0)
+struct page *alloc_page(u_int32_t flags);
+void __free_page(struct page *page);
+void get_page(struct page *page);
+int cfs_put_page_testzero(struct page *page);
+int page_count(struct page *page);
+#define page_index(pg) (0)
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
/*
* Memory allocator
*/
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-void cfs_free(void *addr);
+void *kmalloc(size_t nr_bytes, u_int32_t flags);
+void kfree(void *addr);
-void *cfs_alloc_large(size_t nr_bytes);
-void cfs_free_large(void *addr);
+void *vmalloc(size_t nr_bytes);
+void vfree(void *addr);
extern int get_preemption_level(void);
-#define CFS_ALLOC_ATOMIC_TRY \
- (get_preemption_level() != 0 ? CFS_ALLOC_ATOMIC : 0)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
+
+#define ALLOC_ATOMIC_TRY \
+ (get_preemption_level() != 0 ? GFP_ATOMIC : 0)
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
/*
* Slab:
#define MC_NAME_MAX_LEN 64
-typedef struct cfs_mem_cache {
+struct kmem_cache {
int mc_size;
mem_cache_t mc_cache;
struct list_head mc_link;
char mc_name [MC_NAME_MAX_LEN];
-} cfs_mem_cache_t;
+};
#define KMEM_CACHE_MAX_COUNT 64
#define KMEM_MAX_ZONE 8192
-cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *);
+void *kmem_cache_alloc(struct kmem_cache *, int);
+void kmem_cache_free(struct kmem_cache *, void *);
/*
* Misc
*/
/* XXX Liang: num_physpages... fix me */
#define num_physpages (64 * 1024)
-#define CFS_NUM_CACHEPAGES num_physpages
+#define NUM_CACHEPAGES num_physpages
-#define CFS_DECL_MMSPACE
-#define CFS_MMSPACE_OPEN do {} while(0)
-#define CFS_MMSPACE_CLOSE do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN do {} while (0)
+#define MMSPACE_CLOSE do {} while (0)
#define copy_from_user(kaddr, uaddr, size) copyin(CAST_USER_ADDR_T(uaddr), (caddr_t)kaddr, size)
#define copy_to_user(uaddr, kaddr, size) copyout((caddr_t)kaddr, CAST_USER_ADDR_T(uaddr), size)
};
/*
- * Universal memory allocator API
- */
-enum cfs_alloc_flags {
- /* allocation is not allowed to block */
- CFS_ALLOC_ATOMIC = 0x1,
- /* allocation is allowed to block */
- CFS_ALLOC_WAIT = 0x2,
- /* allocation should return zeroed memory */
- CFS_ALLOC_ZERO = 0x4,
- /* allocation is allowed to call file-system code to free/clean
- * memory */
- CFS_ALLOC_FS = 0x8,
- /* allocation is allowed to do io to free/clean memory */
- CFS_ALLOC_IO = 0x10,
- /* don't report allocation failure to the console */
- CFS_ALLOC_NOWARN = 0x20,
- /* standard allocator flag combination */
- CFS_ALLOC_STD = CFS_ALLOC_FS | CFS_ALLOC_IO,
- CFS_ALLOC_USER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO,
- CFS_ALLOC_NOFS = CFS_ALLOC_WAIT | CFS_ALLOC_IO,
- CFS_ALLOC_KERNEL = CFS_ALLOC_WAIT | CFS_ALLOC_IO | CFS_ALLOC_FS,
-};
-
-/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
-enum cfs_alloc_page_flags {
- /* allow to return page beyond KVM. It has to be mapped into KVM by
- * cfs_kmap() and unmapped with cfs_kunmap(). */
- CFS_ALLOC_HIGHMEM = 0x40,
- CFS_ALLOC_HIGHUSER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO |
- CFS_ALLOC_HIGHMEM,
-};
-
-/*
* Drop into debugger, if possible. Implementation is provided by platform.
*/
* @retval 0 for success.
*/
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len);
/** Update digest by part of data.
/*
* Memory
*/
-#ifndef cfs_memory_pressure_get
-#define cfs_memory_pressure_get() (0)
-#endif
-#ifndef cfs_memory_pressure_set
-#define cfs_memory_pressure_set() do {} while (0)
-#endif
-#ifndef cfs_memory_pressure_clr
-#define cfs_memory_pressure_clr() do {} while (0)
-#endif
-
static inline int cfs_memory_pressure_get_and_set(void)
{
- int old = cfs_memory_pressure_get();
+ int old = memory_pressure_get();
- if (!old)
- cfs_memory_pressure_set();
- return old;
+ if (!old)
+ memory_pressure_set();
+ return old;
}
static inline void cfs_memory_pressure_restore(int old)
{
- if (old)
- cfs_memory_pressure_set();
- else
- cfs_memory_pressure_clr();
- return;
+ if (old)
+ memory_pressure_set();
+ else
+ memory_pressure_clr();
+ return;
}
#endif
#endif /* LIBCFS_DEBUG */
#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << CFS_PAGE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \
do { \
LASSERT(!cfs_in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & CFS_ALLOC_ATOMIC)) != 0); \
+ ((mask) & GFP_ATOMIC)) != 0); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
do { \
LIBCFS_ALLOC_PRE((size), (mask)); \
(ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- cfs_alloc((size), (mask)) : cfs_alloc_large(size); \
+ kmalloc((size), (mask)) : vmalloc(size); \
LIBCFS_ALLOC_POST((ptr), (size)); \
} while (0)
* default allocator
*/
#define LIBCFS_ALLOC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_IO)
+ LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO)
/**
* non-sleeping allocator
*/
#define LIBCFS_ALLOC_ATOMIC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_ATOMIC)
+ LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
/**
* allocate memory for specified CPU partition
/** default numa allocator */
#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
- LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO)
+ LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
-#define LIBCFS_FREE(ptr, size) \
-do { \
- int s = (size); \
- if (unlikely((ptr) == NULL)) { \
- CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
- "%s:%d\n", s, __FILE__, __LINE__); \
- break; \
- } \
- libcfs_kmem_dec((ptr), s); \
- CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
+#define LIBCFS_FREE(ptr, size) \
+do { \
+ int s = (size); \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
+ "%s:%d\n", s, __FILE__, __LINE__); \
+ break; \
+ } \
+ libcfs_kmem_dec((ptr), s); \
+ CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
s, (ptr), libcfs_kmem_read()); \
- if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
- cfs_free_large(ptr); \
- else \
- cfs_free(ptr); \
+ if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
+ vfree(ptr); \
+ else \
+ kfree(ptr); \
} while (0)
/******************************************************************************/
struct libcfs_device_userstate
{
- int ldu_memhog_pages;
- cfs_page_t *ldu_memhog_root_page;
+ int ldu_memhog_pages;
+ struct page *ldu_memhog_root_page;
};
/* what used to be in portals_lib.h */
int *oldmask, int minmask, int allmask);
/* Allocate space for and copy an existing string.
- * Must free with cfs_free().
+ * Must free with kfree().
*/
char *cfs_strdup(const char *str, u_int32_t flags);
#define LWTSTR(n) #n
#define LWTWHERE(f,l) f ":" LWTSTR(l)
-#define LWT_EVENTS_PER_PAGE (CFS_PAGE_SIZE / sizeof (lwt_event_t))
+#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lwt_event_t))
#define LWT_EVENT(p1, p2, p3, p4) \
do { \
# include <linux/mm_inline.h>
#endif
-typedef struct page cfs_page_t;
-#define CFS_PAGE_SIZE PAGE_CACHE_SIZE
-#define CFS_PAGE_SHIFT PAGE_CACHE_SHIFT
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
-#define cfs_num_physpages num_physpages
+#define page_index(p) ((p)->index)
-#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define cfs_copy_to_user(to, from, n) copy_to_user(to, from, n)
-static inline void *cfs_page_address(cfs_page_t *page)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return page_address(page);
-}
-
-static inline void *cfs_kmap(cfs_page_t *page)
-{
- return kmap(page);
-}
-
-static inline void cfs_kunmap(cfs_page_t *page)
-{
- kunmap(page);
-}
-
-static inline void cfs_get_page(cfs_page_t *page)
-{
- get_page(page);
-}
-
-static inline int cfs_page_count(cfs_page_t *page)
-{
- return page_count(page);
-}
-
-#define cfs_page_index(p) ((p)->index)
-
-#define cfs_page_pin(page) page_cache_get(page)
-#define cfs_page_unpin(page) page_cache_release(page)
-
-/*
- * Memory allocator
- * XXX Liang: move these declare to public file
- */
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void cfs_free(void *addr);
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void cfs_free_large(void *addr);
-
-extern cfs_page_t *cfs_alloc_page(unsigned int flags);
-extern void cfs_free_page(cfs_page_t *page);
-
-#define cfs_memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define cfs_memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define cfs_memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
+#define memory_pressure_get() (current->flags & PF_MEMALLOC)
+#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
+#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
-#define CFS_NUM_CACHEPAGES \
- min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4)
+#define NUM_CACHEPAGES \
+ min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
#else
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define NUM_CACHEPAGES num_physpages
#endif
/*
* In Linux there is no way to determine whether current execution context is
* blockable.
*/
-#define CFS_ALLOC_ATOMIC_TRY CFS_ALLOC_ATOMIC
+#define ALLOC_ATOMIC_TRY GFP_ATOMIC
+/* GFP_IOFS was added in 2.6.33 kernel */
+#ifndef GFP_IOFS
+#define GFP_IOFS (__GFP_IO | __GFP_FS)
+#endif
-/*
- * SLAB allocator
- * XXX Liang: move these declare to public file
- */
-typedef struct kmem_cache cfs_mem_cache_t;
-extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
-extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
-
-#define CFS_DECL_MMSPACE mm_segment_t __oldfs
-#define CFS_MMSPACE_OPEN \
+#define DECL_MMSPACE mm_segment_t __oldfs
+#define MMSPACE_OPEN \
do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
-#define CFS_MMSPACE_CLOSE set_fs(__oldfs)
+#define MMSPACE_CLOSE set_fs(__oldfs)
-#define CFS_SLAB_HWCACHE_ALIGN SLAB_HWCACHE_ALIGN
-#define CFS_SLAB_KERNEL SLAB_KERNEL
-#define CFS_SLAB_NOFS SLAB_NOFS
-/*
- * NUMA allocators
- *
- * NB: we will rename these functions in a separate patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes, unsigned int flags);
extern void *cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes);
-extern cfs_page_t *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
+extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
int cpt, unsigned int flags);
-extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep,
+extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
struct cfs_cpt_table *cptab,
int cpt, unsigned int flags);
/*
* Shrinker
*/
-#define cfs_shrinker shrinker
#ifdef HAVE_SHRINK_CONTROL
# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
#endif
#ifdef HAVE_REGISTER_SHRINKER
-typedef int (*cfs_shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
+typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
static inline
-struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
+struct shrinker *set_shrinker(int seek, shrinker_t func)
{
struct shrinker *s;
}
static inline
-void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+void remove_shrinker(struct shrinker *shrinker)
{
if (shrinker == NULL)
return;
unregister_shrinker(shrinker);
kfree(shrinker);
}
-#else
-typedef shrinker_t cfs_shrinker_t;
-#define cfs_set_shrinker(s, f) set_shrinker(s, f)
-#define cfs_remove_shrinker(s) remove_shrinker(s)
#endif
-#define CFS_DEFAULT_SEEKS DEFAULT_SEEKS
#endif /* __LINUX_CFS_MEM_H__ */
*
***************************************************************************/
-struct cfs_shrinker {
+struct shrinker {
;
};
-#define CFS_DEFAULT_SEEKS (0)
+#define DEFAULT_SEEKS (0)
-typedef int (*cfs_shrinker_t)(int, unsigned int);
+typedef int (*shrinker_t)(int, unsigned int);
static inline
-struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink)
+struct shrinker *set_shrinker(int seeks, shrinker_t shrink)
{
- return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here
+ return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */
}
-static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+static inline void remove_shrinker(struct shrinker *shrinker)
{
}
*/
#define LIBLUSTRE_HANDLE_UNALIGNED_PAGE
-typedef struct page {
+struct page {
void *addr;
unsigned long index;
cfs_list_t list;
int _managed;
#endif
cfs_list_t _node;
-} cfs_page_t;
+};
/* 4K */
-#define CFS_PAGE_SHIFT 12
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
-
-cfs_page_t *cfs_alloc_page(unsigned int flags);
-void cfs_free_page(cfs_page_t *pg);
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
-
-#define cfs_get_page(p) __I_should_not_be_called__(at_all)
-#define cfs_page_count(p) __I_should_not_be_called__(at_all)
-#define cfs_page_index(p) ((p)->index)
-#define cfs_page_pin(page) do {} while (0)
-#define cfs_page_unpin(page) do {} while (0)
+#define PAGE_CACHE_SHIFT 12
+#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+
+struct page *alloc_page(unsigned int flags);
+void __free_page(struct page *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
+
+#define get_page(p) __I_should_not_be_called__(at_all)
+#define page_count(p) __I_should_not_be_called__(at_all)
+#define page_index(p) ((p)->index)
+#define page_cache_get(page) do { } while (0)
+#define page_cache_release(page) do { } while (0)
/*
* Memory allocator
* Inline function, so utils can use them without linking of libcfs
*/
-#define __ALLOC_ZERO (1 << 2)
-static inline void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
+
+static inline void *kmalloc(size_t nr_bytes, u_int32_t flags)
{
- void *result;
+ void *result;
- result = malloc(nr_bytes);
- if (result != NULL && (flags & __ALLOC_ZERO))
- memset(result, 0, nr_bytes);
- return result;
+ result = malloc(nr_bytes);
+ if (result != NULL && (flags & __GFP_ZERO))
+ memset(result, 0, nr_bytes);
+ return result;
}
-#define cfs_free(addr) free(addr)
-#define cfs_alloc_large(nr_bytes) cfs_alloc(nr_bytes, 0)
-#define cfs_free_large(addr) cfs_free(addr)
+#define kfree(addr) free(addr)
+#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0)
+#define vfree(addr) free(addr)
-#define CFS_ALLOC_ATOMIC_TRY (0)
+#define ALLOC_ATOMIC_TRY (0)
/*
* SLAB allocator
*/
-typedef struct {
+struct kmem_cache {
int size;
-} cfs_mem_cache_t;
+};
-#define CFS_SLAB_HWCACHE_ALIGN 0
+#define SLAB_HWCACHE_ALIGN 0
#define SLAB_DESTROY_BY_RCU 0
-#define CFS_SLAB_KERNEL 0
-#define CFS_SLAB_NOFS 0
+#define SLAB_KERNEL 0
+#define SLAB_NOFS 0
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c);
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp);
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *c);
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp);
+void kmem_cache_free(struct kmem_cache *c, void *addr);
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem);
/*
* NUMA allocators
*/
#define cfs_cpt_malloc(cptab, cpt, bytes, flags) \
- cfs_alloc(bytes, flags)
+ kmalloc(bytes, flags)
#define cfs_cpt_vmalloc(cptab, cpt, bytes) \
- cfs_alloc(bytes)
+ kmalloc(bytes)
#define cfs_page_cpt_alloc(cptab, cpt, mask) \
- cfs_alloc_page(mask)
+ alloc_page(mask)
#define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \
- cfs_mem_cache_alloc(cache, gfp)
+ kmem_cache_alloc(cache, gfp)
#define smp_rmb() do {} while (0)
/*
* Copy to/from user
*/
-static inline int cfs_copy_from_user(void *a,void *b, int c)
+static inline int copy_from_user(void *a, void *b, int c)
{
- memcpy(a,b,c);
- return 0;
+ memcpy(a, b, c);
+ return 0;
}
-static inline int cfs_copy_to_user(void *a,void *b, int c)
+static inline int copy_to_user(void *a, void *b, int c)
{
- memcpy(a,b,c);
- return 0;
+ memcpy(a,b,c);
+ return 0;
}
#endif
((unsigned char *)&addr)[1], \
((unsigned char *)&addr)[0]
-static int cfs_copy_from_user(void *to, void *from, int c)
+static int copy_from_user(void *to, void *from, int c)
{
- memcpy(to, from, c);
- return 0;
+ memcpy(to, from, c);
+ return 0;
}
-static int cfs_copy_to_user(void *to, const void *from, int c)
+static int copy_to_user(void *to, const void *from, int c)
{
- memcpy(to, from, c);
- return 0;
+ memcpy(to, from, c);
+ return 0;
}
static unsigned long
0 \
)
-#define cfs_num_physpages (64 * 1024)
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define num_physpages (64 * 1024)
+#define NUM_CACHEPAGES num_physpages
#else
#ifdef __KERNEL__
-typedef struct cfs_mem_cache cfs_mem_cache_t;
-
/*
* page definitions
*/
-#define CFS_PAGE_SIZE PAGE_SIZE
-#define CFS_PAGE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SIZE PAGE_SIZE
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
#define CFS_PAGE_MASK (~(PAGE_SIZE - 1))
-typedef struct cfs_page {
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
+
+struct page {
void * addr;
cfs_atomic_t count;
void * private;
void * mapping;
__u32 index;
__u32 flags;
-} cfs_page_t;
+};
#define page cfs_page
#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
&(page)->flags)
-#define __GFP_FS (1)
-#define GFP_KERNEL (2)
-#define GFP_ATOMIC (4)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
-cfs_page_t *cfs_alloc_page(int flags);
-void cfs_free_page(cfs_page_t *pg);
-void cfs_release_page(cfs_page_t *pg);
-cfs_page_t * virt_to_page(void * addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct page *alloc_page(int flags);
+void __free_page(struct page *pg);
+void cfs_release_page(struct page *pg);
+struct page *virt_to_page(void *addr);
#define page_cache_get(a) do {} while (0)
#define page_cache_release(a) do {} while (0)
-static inline void *cfs_page_address(cfs_page_t *page)
+static inline void *page_address(struct page *page)
{
return page->addr;
}
-static inline void *cfs_kmap(cfs_page_t *page)
+static inline void *kmap(struct page *page)
{
return page->addr;
}
-static inline void cfs_kunmap(cfs_page_t *page)
+static inline void kunmap(struct page *page)
{
return;
}
-static inline void cfs_get_page(cfs_page_t *page)
+static inline void get_page(struct page *page)
{
cfs_atomic_inc(&page->count);
}
-static inline void cfs_put_page(cfs_page_t *page)
+static inline void cfs_put_page(struct page *page)
{
cfs_atomic_dec(&page->count);
}
-static inline int cfs_page_count(cfs_page_t *page)
+static inline int page_count(struct page *page)
{
return cfs_atomic_read(&page->count);
}
-#define cfs_page_index(p) ((p)->index)
+#define page_index(p) ((p)->index)
/*
* Memory allocator
*/
-#define CFS_ALLOC_ATOMIC_TRY (0)
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void cfs_free(void *addr);
-
-#define kmalloc cfs_alloc
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void cfs_free_large(void *addr);
+#define ALLOC_ATOMIC_TRY (0)
+extern void *kmalloc(size_t nr_bytes, u_int32_t flags);
+extern void kfree(void *addr);
+extern void *vmalloc(size_t nr_bytes);
+extern void vfree(void *addr);
/*
* SLAB allocator
*/
-#define CFS_SLAB_HWCACHE_ALIGN 0
+#define SLAB_HWCACHE_ALIGN 0
/* The cache name is limited to 20 chars */
-struct cfs_mem_cache {
+struct kmem_cache {
char name[20];
ulong_ptr_t flags;
NPAGED_LOOKASIDE_LIST npll;
};
-extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t,
- unsigned long);
-extern int cfs_mem_cache_destroy (cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+extern kmem_cache_destroy(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, int);
+extern void kmem_cache_free(struct kmem_cache *, void *);
/*
* shrinker
*/
typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
-struct cfs_shrinker {
+struct shrinker {
shrink_callback cb;
int seeks; /* seeks to recreate an obj */
long nr; /* objs pending delete */
};
-struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb);
-void cfs_remove_shrinker(struct cfs_shrinker *s);
+struct shrinker *set_shrinker(int seeks, shrink_callback cb);
+void remove_shrinker(struct shrinker *s);
int start_shrinker_timer();
void stop_shrinker_timer();
* Page allocator slabs
*/
-extern cfs_mem_cache_t *cfs_page_t_slab;
-extern cfs_mem_cache_t *cfs_page_p_slab;
+extern struct kmem_cache *cfs_page_t_slab;
+extern struct kmem_cache *cfs_page_p_slab;
-#define CFS_DECL_MMSPACE
-#define CFS_MMSPACE_OPEN do {} while(0)
-#define CFS_MMSPACE_CLOSE do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN do {} while (0)
+#define MMSPACE_CLOSE do {} while (0)
#define cfs_mb() do {} while(0)
* MM defintions from (linux/mm.h)
*/
-#define CFS_DEFAULT_SEEKS 2 /* shrink seek */
+#define DEFAULT_SEEKS 2 /* shrink seek */
#else /* !__KERNEL__ */
{
cfs_group_info_t * groupinfo;
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
- groupinfo =
- (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0);
+ groupinfo = kmalloc(sizeof(cfs_group_info_t), 0);
if (groupinfo) {
memset(groupinfo, 0, sizeof(cfs_group_info_t));
}
return groupinfo;
}
+
static __inline void cfs_groups_free(cfs_group_info_t *group_info)
{
- KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
- __FUNCTION__));
- cfs_free(group_info);
+ KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+ __FUNCTION__));
+ kfree(group_info);
}
+
static __inline int
cfs_set_current_groups(cfs_group_info_t *group_info)
{
__FUNCTION__));
return 0;
}
+
static __inline int groups_search(cfs_group_info_t *group_info,
gid_t grp) {
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
spinlock_t Lock; /* Protection lock */
- cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ struct kmem_cache *slab; /* Memory slab for task slot */
ULONG NumOfTasks; /* Total tasks (threads) */
LIST_ENTRY TaskList; /* List of task slots */
int ksnd_ntconns; /* number of tconns in list */
cfs_list_t ksnd_tconns; /* tdi connections list */
- cfs_mem_cache_t *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
+ struct kmem_cache *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
event_t ksnd_tconn_exit; /* event signal by last tconn */
spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
int ksnd_ntsdus; /* number of tsdu buffers allocated */
ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
- cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
+ struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */
static struct cfs_zone_nob cfs_zone_nob;
static spinlock_t cfs_zone_guard;
-cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
+struct kmem_cache *mem_cache_find(const char *name, size_t objsize)
{
- cfs_mem_cache_t *walker = NULL;
+ struct kmem_cache *walker = NULL;
LASSERT(cfs_zone_nob.z_nob != NULL);
* survives kext unloading, so that @name cannot be just static string
* embedded into kext image.
*/
-cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t objsize, const char *name)
{
- cfs_mem_cache_t *mc = NULL;
+ struct kmem_cache *mc = NULL;
char *cname;
- MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+ MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
if (mc == NULL){
CERROR("cfs_mem_cache created fail!\n");
return NULL;
return mc;
}
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
{
/*
* zone can NOT be destroyed after creating,
#else /* !CFS_INDIVIDUAL_ZONE */
-cfs_mem_cache_t *
+struct kmem_cache *
mem_cache_find(const char *name, size_t objsize)
{
return NULL;
}
-cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t size, const char *name)
{
- cfs_mem_cache_t *mc = NULL;
+ struct kmem_cache *mc = NULL;
- MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+ MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
if (mc == NULL){
CERROR("cfs_mem_cache created fail!\n");
return NULL;
return mc;
}
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
{
OSMalloc_Tagfree(mc->mc_cache);
FREE(mc, M_TEMP);
#endif /* !CFS_INDIVIDUAL_ZONE */
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name,
- size_t objsize, size_t off, unsigned long arg1)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+ unsigned long arg1, void *ctro)
{
- cfs_mem_cache_t *mc;
+ struct kmem_cache *mc;
- mc = mem_cache_find(name, objsize);
- if (mc)
- return mc;
- mc = mem_cache_create(objsize, name);
+ mc = mem_cache_find(name, objsize);
+ if (mc)
+ return mc;
+ mc = mem_cache_create(objsize, name);
return mc;
}
-int cfs_mem_cache_destroy (cfs_mem_cache_t *cachep)
+kmem_cache_destroy (struct kmem_cache *cachep)
{
- mem_cache_destroy(cachep);
- return 0;
+ mem_cache_destroy(cachep);
+ return 0;
}
-void *cfs_mem_cache_alloc (cfs_mem_cache_t *cachep, int flags)
+void *kmem_cache_alloc (struct kmem_cache *cachep, int flags)
{
- void *result;
+ void *result;
- /* zalloc_canblock() is not exported... Emulate it. */
- if (flags & CFS_ALLOC_ATOMIC) {
- result = (void *)mem_cache_alloc_nb(cachep);
- } else {
- LASSERT(get_preemption_level() == 0);
- result = (void *)mem_cache_alloc(cachep);
- }
- if (result != NULL && (flags & CFS_ALLOC_ZERO))
- memset(result, 0, cachep->mc_size);
+ /* zalloc_canblock() is not exported... Emulate it. */
+ if (flags & GFP_ATOMIC) {
+ result = (void *)mem_cache_alloc_nb(cachep);
+ } else {
+ LASSERT(get_preemption_level() == 0);
+ result = (void *)mem_cache_alloc(cachep);
+ }
+ if (result != NULL && (flags & __GFP_ZERO))
+ memset(result, 0, cachep->mc_size);
- return result;
+ return result;
}
-void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp)
+void kmem_cache_free (struct kmem_cache *cachep, void *objp)
{
- mem_cache_free(cachep, objp);
+ mem_cache_free(cachep, objp);
}
/* ---------------------------------------------------------------------------
* "Raw" pages
*/
-static unsigned int raw_pages = 0;
-static cfs_mem_cache_t *raw_page_cache = NULL;
+static unsigned int raw_pages;
+static struct kmem_cache *raw_page_cache;
static struct xnu_page_ops raw_page_ops;
static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = {
};
#if defined(LIBCFS_DEBUG)
-static int page_type_is_valid(cfs_page_t *page)
+static int page_type_is_valid(struct page *page)
{
- LASSERT(page != NULL);
- return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
+ LASSERT(page != NULL);
+ return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
}
-static int page_is_raw(cfs_page_t *page)
+static int page_is_raw(struct page *page)
{
- return page->type == XNU_PAGE_RAW;
+ return page->type == XNU_PAGE_RAW;
}
#endif
-static struct xnu_raw_page *as_raw(cfs_page_t *page)
+static struct xnu_raw_page *as_raw(struct page *page)
{
- LASSERT(page_is_raw(page));
- return list_entry(page, struct xnu_raw_page, header);
+ LASSERT(page_is_raw(page));
+ return list_entry(page, struct xnu_raw_page, header);
}
-static void *raw_page_address(cfs_page_t *pg)
+static void *raw_page_address(struct page *pg)
{
- return (void *)as_raw(pg)->virtual;
+ return (void *)as_raw(pg)->virtual;
}
-static void *raw_page_map(cfs_page_t *pg)
+static void *raw_page_map(struct page *pg)
{
- return (void *)as_raw(pg)->virtual;
+ return (void *)as_raw(pg)->virtual;
}
-static void raw_page_unmap(cfs_page_t *pg)
+static void raw_page_unmap(struct page *pg)
{
}
static void raw_page_finish(struct xnu_raw_page *pg)
{
- -- raw_pages;
- if (pg->virtual != NULL)
- cfs_mem_cache_free(raw_page_cache, pg->virtual);
- cfs_free(pg);
+ --raw_pages;
+ if (pg->virtual != NULL)
+ kmem_cache_free(raw_page_cache, pg->virtual);
+ kfree(pg);
}
void raw_page_death_row_clean(void)
/*
* kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
* block. (raw_page_done()->upl_abort() can block too) On the other
- * hand, cfs_free_page() may be called in non-blockable context. To
+ * hand, __free_page() may be called in non-blockable context. To
* work around this, park pages on global list when cannot block.
*/
if (get_preemption_level() > 0) {
}
}
-cfs_page_t *cfs_alloc_page(u_int32_t flags)
+struct page *alloc_page(u_int32_t flags)
{
- struct xnu_raw_page *page;
+ struct xnu_raw_page *page;
- /*
- * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
+ /*
+ * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
- page = cfs_alloc(sizeof *page, flags);
- if (page != NULL) {
- page->virtual = cfs_mem_cache_alloc(raw_page_cache, flags);
- if (page->virtual != NULL) {
- ++ raw_pages;
- page->header.type = XNU_PAGE_RAW;
- atomic_set(&page->count, 1);
- } else {
- cfs_free(page);
- page = NULL;
- }
- }
- return page != NULL ? &page->header : NULL;
+ page = kmalloc(sizeof *page, flags);
+ if (page != NULL) {
+ page->virtual = kmem_cache_alloc(raw_page_cache, flags);
+ if (page->virtual != NULL) {
+ ++raw_pages;
+ page->header.type = XNU_PAGE_RAW;
+ atomic_set(&page->count, 1);
+ } else {
+ kfree(page);
+ page = NULL;
+ }
+ }
+ return page != NULL ? &page->header : NULL;
}
-void cfs_free_page(cfs_page_t *pages)
+void __free_page(struct page *pages)
{
- free_raw_page(as_raw(pages));
+ free_raw_page(as_raw(pages));
}
-void cfs_get_page(cfs_page_t *p)
+void get_page(struct page *p)
{
- atomic_inc(&as_raw(p)->count);
+ atomic_inc(&as_raw(p)->count);
}
-int cfs_put_page_testzero(cfs_page_t *p)
+int cfs_put_page_testzero(struct page *p)
{
return atomic_dec_and_test(&as_raw(p)->count);
}
-int cfs_page_count(cfs_page_t *p)
+int page_count(struct page *p)
{
- return atomic_read(&as_raw(p)->count);
+ return atomic_read(&as_raw(p)->count);
}
/*
* Generic page operations
*/
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
{
- /*
- * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- LASSERT(page_type_is_valid(pg));
- return page_ops[pg->type]->page_address(pg);
+ /*
+ * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+ LASSERT(page_type_is_valid(pg));
+ return page_ops[pg->type]->page_address(pg);
}
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
{
- LASSERT(page_type_is_valid(pg));
- return page_ops[pg->type]->page_map(pg);
+ LASSERT(page_type_is_valid(pg));
+ return page_ops[pg->type]->page_map(pg);
}
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
{
- LASSERT(page_type_is_valid(pg));
- page_ops[pg->type]->page_unmap(pg);
+ LASSERT(page_type_is_valid(pg));
+ page_ops[pg->type]->page_unmap(pg);
}
void xnu_page_ops_register(int type, struct xnu_page_ops *ops)
#define get_preemption_level() (0)
#endif
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+void *kmalloc(size_t nr_bytes, u_int32_t flags)
{
- int mflags;
+ int mflags;
- mflags = 0;
- if (flags & CFS_ALLOC_ATOMIC) {
- mflags |= M_NOWAIT;
- } else {
- LASSERT(get_preemption_level() == 0);
- mflags |= M_WAITOK;
- }
+ mflags = 0;
+ if (flags & GFP_ATOMIC) {
+ mflags |= M_NOWAIT;
+ } else {
+ LASSERT(get_preemption_level() == 0);
+ mflags |= M_WAITOK;
+ }
- if (flags & CFS_ALLOC_ZERO)
- mflags |= M_ZERO;
+ if (flags & __GFP_ZERO)
+ mflags |= M_ZERO;
- return _MALLOC(nr_bytes, M_TEMP, mflags);
+ return _MALLOC(nr_bytes, M_TEMP, mflags);
}
-void cfs_free(void *addr)
+void kfree(void *addr)
{
- return _FREE(addr, M_TEMP);
+ return _FREE(addr, M_TEMP);
}
-void *cfs_alloc_large(size_t nr_bytes)
+void *vmalloc(size_t nr_bytes)
{
- LASSERT(get_preemption_level() == 0);
- return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
+ LASSERT(get_preemption_level() == 0);
+ return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
}
-void cfs_free_large(void *addr)
+void vfree(void *addr)
{
- LASSERT(get_preemption_level() == 0);
- return _FREE(addr, M_TEMP);
+ LASSERT(get_preemption_level() == 0);
+ return _FREE(addr, M_TEMP);
}
/*
#endif
CFS_INIT_LIST_HEAD(&page_death_row);
spin_lock_init(&page_death_row_phylax);
- raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+ raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE,
+ 0, 0, NULL);
return 0;
}
{
raw_page_death_row_clean();
spin_lock_done(&page_death_row_phylax);
- cfs_mem_cache_destroy(raw_page_cache);
+ kmem_cache_destroy(raw_page_cache);
#if CFS_INDIVIDUAL_ZONE
cfs_zone_nob.z_nob = NULL;
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
tcd = &trace_data[0].tcd;
CFS_INIT_LIST_HEAD(&pages);
if (get_preemption_level() == 0)
- nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
+ nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages);
else
nr_pages = 0;
spin_lock(&trace_cpu_serializer);
max = TCD_MAX_PAGES;
} else {
max = (max / cfs_num_possible_cpus());
- max = (max << (20 - CFS_PAGE_SHIFT));
+ max = (max << (20 - PAGE_CACHE_SHIFT));
}
rc = cfs_tracefile_init(max);
do { \
if ((h)->cbh_flags & CBH_FLAG_ATOMIC_GROW) \
LIBCFS_CPT_ALLOC_GFP((ptr), h->cbh_cptab, h->cbh_cptid, \
- CBH_NOB, CFS_ALLOC_ATOMIC); \
+ CBH_NOB, GFP_ATOMIC); \
else \
LIBCFS_CPT_ALLOC((ptr), h->cbh_cptab, h->cbh_cptid, \
CBH_NOB); \
return -EBADF;
/* freed in group_rem */
- reg = cfs_alloc(sizeof(*reg), 0);
+ reg = kmalloc(sizeof(*reg), 0);
if (reg == NULL)
return -ENOMEM;
reg->kr_uid, reg->kr_fp, group);
if (reg->kr_fp != NULL)
fput(reg->kr_fp);
- cfs_free(reg);
+ kfree(reg);
}
}
up_write(&kg_sem);
lenz = strlen(str) + 1;
- dup_str = cfs_alloc(lenz, flags);
+ dup_str = kmalloc(lenz, flags);
if (dup_str == NULL)
return NULL;
int err;
const struct cfs_crypto_hash_type *type;
- hdesc = cfs_alloc(sizeof(*hdesc), 0);
+ hdesc = kmalloc(sizeof(*hdesc), 0);
if (hdesc == NULL)
return ERR_PTR(-ENOMEM);
err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
if (err) {
- cfs_free(hdesc);
+ kfree(hdesc);
return ERR_PTR(err);
}
return (struct cfs_crypto_hash_desc *)hdesc;
EXPORT_SYMBOL(cfs_crypto_hash_init);
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len)
{
struct scatterlist sl;
if (hash_len == NULL) {
crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- cfs_free(hdesc);
+ kfree(hdesc);
return 0;
}
if (hash == NULL || *hash_len < size) {
return err;
}
crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- cfs_free(hdesc);
+ kfree(hdesc);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
* kmalloc size for 2.6.18 kernel is 128K */
unsigned int data_len = 1 * 128 * 1024;
- data = cfs_alloc(data_len, 0);
+ data = kmalloc(data_len, 0);
if (data == NULL) {
CERROR("Failed to allocate mem\n");
return -ENOMEM;
for (i = 0; i < CFS_HASH_ALG_MAX; i++)
cfs_crypto_performance_test(i, data, data_len);
- cfs_free(data);
+ kfree(data);
return 0;
}
{
struct mm_struct *mm;
char *buffer, *tmp_buf = NULL;
- int buf_len = CFS_PAGE_SIZE;
+ int buf_len = PAGE_CACHE_SIZE;
int key_len = strlen(key);
unsigned long addr;
int rc;
ENTRY;
- buffer = cfs_alloc(buf_len, CFS_ALLOC_USER);
+ buffer = kmalloc(buf_len, GFP_USER);
if (!buffer)
RETURN(-ENOMEM);
mm = get_task_mm(current);
if (!mm) {
- cfs_free(buffer);
+ kfree(buffer);
RETURN(-EINVAL);
}
out:
mmput(mm);
- cfs_free((void *)buffer);
+ kfree((void *)buffer);
if (tmp_buf)
- cfs_free((void *)tmp_buf);
+ kfree((void *)tmp_buf);
return rc;
}
EXPORT_SYMBOL(cfs_get_environ);
#include <linux/highmem.h>
#include <libcfs/libcfs.h>
-static unsigned int cfs_alloc_flags_to_gfp(u_int32_t flags)
-{
- unsigned int mflags = 0;
-
- if (flags & CFS_ALLOC_ATOMIC)
- mflags |= __GFP_HIGH;
- else
- mflags |= __GFP_WAIT;
- if (flags & CFS_ALLOC_NOWARN)
- mflags |= __GFP_NOWARN;
- if (flags & CFS_ALLOC_IO)
- mflags |= __GFP_IO;
- if (flags & CFS_ALLOC_FS)
- mflags |= __GFP_FS;
- if (flags & CFS_ALLOC_HIGHMEM)
- mflags |= __GFP_HIGHMEM;
- return mflags;
-}
-
-void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
-{
- void *ptr = NULL;
-
- ptr = kmalloc(nr_bytes, cfs_alloc_flags_to_gfp(flags));
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO))
- memset(ptr, 0, nr_bytes);
- return ptr;
-}
-
-void
-cfs_free(void *addr)
-{
- kfree(addr);
-}
-
-void *
-cfs_alloc_large(size_t nr_bytes)
-{
- return vmalloc(nr_bytes);
-}
-
-void
-cfs_free_large(void *addr)
-{
- vfree(addr);
-}
-
-cfs_page_t *cfs_alloc_page(unsigned int flags)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return alloc_page(cfs_alloc_flags_to_gfp(flags));
-}
-
-void cfs_free_page(cfs_page_t *page)
-{
- __free_page(page);
-}
-
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name, size_t size, size_t offset,
- unsigned long flags)
-{
-#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
- return kmem_cache_create(name, size, offset, flags, NULL, NULL);
-#else
- return kmem_cache_create(name, size, offset, flags, NULL);
-#endif
-}
-
-int
-cfs_mem_cache_destroy (cfs_mem_cache_t * cachep)
-{
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
- return kmem_cache_destroy(cachep);
-#else
- kmem_cache_destroy(cachep);
- return 0;
-#endif
-}
-
-void *
-cfs_mem_cache_alloc(cfs_mem_cache_t *cachep, int flags)
-{
- return kmem_cache_alloc(cachep, cfs_alloc_flags_to_gfp(flags));
-}
-
-void
-cfs_mem_cache_free(cfs_mem_cache_t *cachep, void *objp)
-{
- return kmem_cache_free(cachep, objp);
-}
-
-/**
- * Returns true if \a addr is an address of an allocated object in a slab \a
- * kmem. Used in assertions. This check is optimistically imprecise, i.e., it
- * occasionally returns true for the incorrect addresses, but if it returns
- * false, then the addresses is guaranteed to be incorrect.
- */
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
-{
-#ifdef CONFIG_SLAB
- struct page *page;
-
- /*
- * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other
- * allocators, like slub and slob.
- */
- page = virt_to_page(addr);
- if (unlikely(PageCompound(page)))
- page = (struct page *)page->private;
- return PageSlab(page) && ((void *)page->lru.next) == kmem;
-#else
- return 1;
-#endif
-}
-EXPORT_SYMBOL(cfs_mem_is_in_cache);
-
-
-EXPORT_SYMBOL(cfs_alloc);
-EXPORT_SYMBOL(cfs_free);
-EXPORT_SYMBOL(cfs_alloc_large);
-EXPORT_SYMBOL(cfs_free_large);
-EXPORT_SYMBOL(cfs_alloc_page);
-EXPORT_SYMBOL(cfs_free_page);
-EXPORT_SYMBOL(cfs_mem_cache_create);
-EXPORT_SYMBOL(cfs_mem_cache_destroy);
-EXPORT_SYMBOL(cfs_mem_cache_alloc);
-EXPORT_SYMBOL(cfs_mem_cache_free);
-
-/*
- * NB: we will rename some of above functions in another patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
-
void *
cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes, unsigned int flags)
{
void *ptr;
- ptr = kmalloc_node(nr_bytes, cfs_alloc_flags_to_gfp(flags),
+ ptr = kmalloc_node(nr_bytes, flags,
cfs_cpt_spread_node(cptab, cpt));
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO) != 0)
+ if (ptr != NULL && (flags & __GFP_ZERO) != 0)
memset(ptr, 0, nr_bytes);
return ptr;
}
EXPORT_SYMBOL(cfs_cpt_vmalloc);
-cfs_page_t *
+struct page *
cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
{
- return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt),
- cfs_alloc_flags_to_gfp(flags), 0);
+ return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
}
EXPORT_SYMBOL(cfs_page_cpt_alloc);
void *
-cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, struct cfs_cpt_table *cptab,
+cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
int cpt, unsigned int flags)
{
- return kmem_cache_alloc_node(cachep, cfs_alloc_flags_to_gfp(flags),
+ return kmem_cache_alloc_node(cachep, flags,
cfs_cpt_spread_node(cptab, cpt));
}
EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc);
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
- toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
- CWARN("Too many interfaces: only enumerating first %d\n",
- nalloc);
- }
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ toobig = 1;
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+ CWARN("Too many interfaces: only enumerating first %d\n",
+ nalloc);
+ }
LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
if (ifr == NULL) {
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+ int total_mb = (num_physpages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
*size = strnlen (knl_ptr, maxsize - 1) + 1;
- if (user_ptr != NULL) {
- if (user_size < 4)
- return (-EINVAL);
+ if (user_ptr != NULL) {
+ if (user_size < 4)
+ return -EINVAL;
- if (cfs_copy_to_user (user_ptr, knl_ptr, *size))
- return (-EFAULT);
+ if (copy_to_user(user_ptr, knl_ptr, *size))
+ return -EFAULT;
- /* Did I truncate the string? */
- if (knl_ptr[*size - 1] != 0)
- cfs_copy_to_user (user_ptr + *size - 4, "...", 4);
- }
+ /* Did I truncate the string? */
+ if (knl_ptr[*size - 1] != 0)
+ copy_to_user(user_ptr + *size - 4, "...", 4);
+ }
- return (0);
+ return 0;
}
int
continue;
for (j = 0; j < lwt_pages_per_cpu; j++) {
- memset (p->lwtp_events, 0, CFS_PAGE_SIZE);
+ memset(p->lwtp_events, 0, PAGE_CACHE_SIZE);
p = cfs_list_entry (p->lwtp_list.next,
lwt_page_t, lwtp_list);
}
int
-lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size)
+lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size,
+ void *user_ptr, int user_size)
{
- const int events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t);
- const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
- lwt_page_t *p;
- int i;
- int j;
+ const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
+ const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
+ lwt_page_t *p;
+ int i;
+ int j;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return (-EPERM);
p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
- return (-ENODATA);
+ return -ENODATA;
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- if (cfs_copy_to_user(user_ptr, p->lwtp_events,
- bytes_per_page))
- return (-EFAULT);
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
+ if (copy_to_user(user_ptr, p->lwtp_events,
+ bytes_per_page))
+ return -EFAULT;
user_ptr = ((char *)user_ptr) + bytes_per_page;
p = cfs_list_entry(p->lwtp_list.next,
/* NULL pointers, zero scalars */
memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu =
- LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE);
+ lwt_pages_per_cpu =
+ LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);
for (i = 0; i < cfs_num_online_cpus(); i++)
for (j = 0; j < lwt_pages_per_cpu; j++) {
- struct page *page = alloc_page (GFP_KERNEL);
+ struct page *page = alloc_page(GFP_KERNEL);
lwt_page_t *lwtp;
if (page == NULL) {
lwtp->lwtp_page = page;
lwtp->lwtp_events = page_address(page);
- memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE);
+ memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
if (j == 0) {
CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
void
kportal_memhog_free (struct libcfs_device_userstate *ldu)
{
- cfs_page_t **level0p = &ldu->ldu_memhog_root_page;
- cfs_page_t **level1p;
- cfs_page_t **level2p;
- int count1;
- int count2;
+ struct page **level0p = &ldu->ldu_memhog_root_page;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
- if (*level0p != NULL) {
+ if (*level0p != NULL) {
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
- level1p = (cfs_page_t **)cfs_page_address(*level0p);
- count1 = 0;
+ while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level1p != NULL) {
- while (count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
- *level1p != NULL) {
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
- level2p = (cfs_page_t **)cfs_page_address(*level1p);
- count2 = 0;
+ while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level2p != NULL) {
- while (count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
- *level2p != NULL) {
+ __free_page(*level2p);
+ ldu->ldu_memhog_pages--;
+ level2p++;
+ count2++;
+ }
- cfs_free_page(*level2p);
- ldu->ldu_memhog_pages--;
- level2p++;
- count2++;
- }
-
- cfs_free_page(*level1p);
- ldu->ldu_memhog_pages--;
- level1p++;
- count1++;
- }
+ __free_page(*level1p);
+ ldu->ldu_memhog_pages--;
+ level1p++;
+ count1++;
+ }
- cfs_free_page(*level0p);
- ldu->ldu_memhog_pages--;
+ __free_page(*level0p);
+ ldu->ldu_memhog_pages--;
- *level0p = NULL;
- }
+ *level0p = NULL;
+ }
- LASSERT (ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_pages == 0);
}
int
kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags)
{
- cfs_page_t **level0p;
- cfs_page_t **level1p;
- cfs_page_t **level2p;
- int count1;
- int count2;
+ struct page **level0p;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
- LASSERT (ldu->ldu_memhog_pages == 0);
- LASSERT (ldu->ldu_memhog_root_page == NULL);
+ LASSERT(ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_root_page == NULL);
- if (npages < 0)
- return -EINVAL;
+ if (npages < 0)
+ return -EINVAL;
- if (npages == 0)
- return 0;
+ if (npages == 0)
+ return 0;
- level0p = &ldu->ldu_memhog_root_page;
- *level0p = cfs_alloc_page(flags);
- if (*level0p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
+ level0p = &ldu->ldu_memhog_root_page;
+ *level0p = alloc_page(flags);
+ if (*level0p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level1p = (cfs_page_t **)cfs_page_address(*level0p);
- count1 = 0;
- memset(level1p, 0, CFS_PAGE_SIZE);
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
+ memset(level1p, 0, PAGE_CACHE_SIZE);
- while (ldu->ldu_memhog_pages < npages &&
- count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+ while (ldu->ldu_memhog_pages < npages &&
+ count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
- return (-EINTR);
+ if (cfs_signal_pending())
+ return -EINTR;
- *level1p = cfs_alloc_page(flags);
- if (*level1p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
+ *level1p = alloc_page(flags);
+ if (*level1p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level2p = (cfs_page_t **)cfs_page_address(*level1p);
- count2 = 0;
- memset(level2p, 0, CFS_PAGE_SIZE);
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
+ memset(level2p, 0, PAGE_CACHE_SIZE);
- while (ldu->ldu_memhog_pages < npages &&
- count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+ while (ldu->ldu_memhog_pages < npages &&
+ count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
- return (-EINTR);
+ if (cfs_signal_pending())
+ return -EINTR;
- *level2p = cfs_alloc_page(flags);
- if (*level2p == NULL)
- return (-ENOMEM);
- ldu->ldu_memhog_pages++;
+ *level2p = alloc_page(flags);
+ if (*level2p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level2p++;
- count2++;
- }
+ level2p++;
+ count2++;
+ }
- level1p++;
- count1++;
- }
+ level1p++;
+ count1++;
+ }
- return 0;
+ return 0;
}
/* called when opening /dev/device */
RETURN(err);
}
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+static int libcfs_ioctl(struct cfs_psdev_file *pfile,
+ unsigned long cmd, void *arg)
{
- char *buf;
- struct libcfs_ioctl_data *data;
- int err = 0;
- ENTRY;
-
- LIBCFS_ALLOC_GFP(buf, 1024, CFS_ALLOC_STD);
- if (buf == NULL)
- RETURN(-ENOMEM);
+ char *buf;
+ struct libcfs_ioctl_data *data;
+ int err = 0;
+ ENTRY;
+
+ LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
+ if (buf == NULL)
+ RETURN(-ENOMEM);
/* 'cmd' and permissions get checked in our arch-specific caller */
if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
const char *format1, va_list args,
const char *format2, ...)
{
- struct timeval tv;
- int nob;
- int remain;
- va_list ap;
- char buf[CFS_PAGE_SIZE]; /* size 4096 used for compatimble
- * with linux, where message can`t
- * be exceed PAGE_SIZE */
+ struct timeval tv;
+ int nob;
+ int remain;
+ va_list ap;
+ char buf[PAGE_CACHE_SIZE]; /* size 4096 used for compatimble
+ * with linux, where message can`t
+ * be exceed PAGE_SIZE */
int console = 0;
char *prefix = "Lustre";
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
- cfs_page_t *page;
- struct cfs_trace_page *tage;
-
- /* My caller is trying to free memory */
- if (!cfs_in_interrupt() && cfs_memory_pressure_get())
- return NULL;
-
- /*
- * Don't spam console with allocation failures: they will be reported
- * by upper layer anyway.
- */
- gfp |= CFS_ALLOC_NOWARN;
- page = cfs_alloc_page(gfp);
- if (page == NULL)
- return NULL;
+ struct page *page;
+ struct cfs_trace_page *tage;
- tage = cfs_alloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- cfs_free_page(page);
- return NULL;
- }
+ /* My caller is trying to free memory */
+ if (!cfs_in_interrupt() && memory_pressure_get())
+ return NULL;
+
+ /*
+ * Don't spam console with allocation failures: they will be reported
+ * by upper layer anyway.
+ */
+ gfp |= __GFP_NOWARN;
+ page = alloc_page(gfp);
+ if (page == NULL)
+ return NULL;
+
+ tage = kmalloc(sizeof(*tage), gfp);
+ if (tage == NULL) {
+ __free_page(page);
+ return NULL;
+ }
- tage->page = page;
- cfs_atomic_inc(&cfs_tage_allocated);
- return tage;
+ tage->page = page;
+ cfs_atomic_inc(&cfs_tage_allocated);
+ return tage;
}
static void cfs_tage_free(struct cfs_trace_page *tage)
{
- __LASSERT(tage != NULL);
- __LASSERT(tage->page != NULL);
+ __LASSERT(tage != NULL);
+ __LASSERT(tage->page != NULL);
- cfs_free_page(tage->page);
- cfs_free(tage);
- cfs_atomic_dec(&cfs_tage_allocated);
+ __free_page(tage->page);
+ kfree(tage);
+ cfs_atomic_dec(&cfs_tage_allocated);
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!cfs_list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= CFS_PAGE_SIZE)
+ if (tage->used + len <= PAGE_CACHE_SIZE)
return tage;
}
--tcd->tcd_cur_stock_pages;
cfs_list_del_init(&tage->linkage);
} else {
- tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
+ tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
- if ((!cfs_memory_pressure_get() ||
+ if ((!memory_pressure_get() ||
cfs_in_interrupt()) && printk_ratelimit())
printk(CFS_KERN_WARNING
"cannot allocate a tage (%ld)\n",
* from here: this will lead to infinite recursion.
*/
- if (len > CFS_PAGE_SIZE) {
+ if (len > PAGE_CACHE_SIZE) {
printk(CFS_KERN_ERR
"cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
- if (needed + known_size > CFS_PAGE_SIZE)
+ if (needed + known_size > PAGE_CACHE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
goto console;
}
- string_buf = (char *)cfs_page_address(tage->page) +
+ string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = CFS_PAGE_SIZE - tage->used - known_size;
+ max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(CFS_KERN_EMERG "negative max_nob: %d\n",
max_nob);
"newline\n", file, msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
- debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
+ debug_buf = (char *)page_address(tage->page) + tage->used;
if (libcfs_debug_binary) {
memcpy(debug_buf, &header, sizeof(header));
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT (tage->used <= CFS_PAGE_SIZE);
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
- char *p, *file, *fn;
- cfs_page_t *page;
+ char *p, *file, *fn;
+ struct page *page;
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- page = tage->page;
- p = cfs_page_address(page);
- while (p < ((char *)cfs_page_address(page) + tage->used)) {
+ page = tage->page;
+ p = page_address(page);
+ while (p < ((char *)page_address(page) + tage->used)) {
struct ptldebug_header *hdr;
int len;
hdr = (void *)p;
struct cfs_trace_page *tmp;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
cfs_tracefile_write_lock();
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO */
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, filp_poff(filp));
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u but wrote "
cfs_list_del(&tage->linkage);
cfs_tage_free(tage);
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
rc = filp_fsync(filp);
if (rc)
printk(CFS_KERN_ERR "sync returns %d\n", rc);
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
- if (cfs_copy_from_user((void *)knl_buffer,
+ if (copy_from_user((void *)knl_buffer,
(void *)usr_buffer, usr_buffer_nob))
return -EFAULT;
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
- if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
+ if (copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
if (append != NULL && nob < usr_buffer_nob) {
- if (cfs_copy_to_user(usr_buffer + nob, append, 1))
+ if (copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
nob++;
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
return -EINVAL;
- *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
+ *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
if (*str == NULL)
return -ENOMEM;
void cfs_trace_free_string_buffer(char *str, int nob)
{
- cfs_free(str);
+ kfree(str);
}
int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
}
mb /= cfs_num_possible_cpus();
- pages = mb << (20 - CFS_PAGE_SHIFT);
+ pages = mb << (20 - PAGE_CACHE_SHIFT);
cfs_tracefile_write_lock();
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
int last_loop = 0;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
goto end_loop;
}
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page,
else if (f_pos > (off_t)filp_size(filp))
f_pos = filp_size(filp);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u "
__LASSERT(cfs_list_empty(&pc.pc_pages));
}
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
extern int libcfs_panic_in_progress;
extern int cfs_trace_max_debug_mb(void);
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* Private declare for tracefile
*/
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* page itself
*/
- cfs_page_t *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
do { \
__LASSERT(tage != NULL); \
__LASSERT(tage->page != NULL); \
- __LASSERT(tage->used <= CFS_PAGE_SIZE); \
- __LASSERT(cfs_page_count(tage->page) > 0); \
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(page_count(tage->page) > 0); \
} while (0)
#endif /* LUSTRE_TRACEFILE_PRIVATE */
return ERR_PTR(-ENODEV);
}
- hdesc = cfs_alloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
+ hdesc = kmalloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
if (hdesc == NULL)
return ERR_PTR(-ENOMEM);
if (err == 0) {
return (struct cfs_crypto_hash_desc *) hdesc;
} else {
- cfs_free(hdesc);
+ kfree(hdesc);
return ERR_PTR(err);
}
}
}
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len)
{
const void *p = page->addr + offset;
int err;
if (hash_len == NULL) {
- cfs_free(d);
+ kfree(d);
return 0;
}
if (hash == NULL || *hash_len < size) {
err = d->hd_hash->final(d->hd_ctx, hash, *hash_len);
if (err == 0) {
/* If get final digest success free hash descriptor */
- cfs_free(d);
+ kfree(d);
}
return err;
unsigned char *data;
unsigned int j, data_len = 1024 * 1024;
- data = cfs_alloc(data_len, 0);
+ data = kmalloc(data_len, 0);
if (data == NULL) {
CERROR("Failed to allocate mem\n");
return -ENOMEM;
for (i = 0; i < CFS_HASH_ALG_MAX; i++)
cfs_crypto_performance_test(i, data, data_len);
- cfs_free(data);
+ kfree(data);
return 0;
}
* Allocator
*/
-cfs_page_t *cfs_alloc_page(unsigned int flags)
+struct page *alloc_page(unsigned int flags)
{
- cfs_page_t *pg = malloc(sizeof(*pg));
+ struct page *pg = malloc(sizeof(*pg));
int rc = 0;
if (!pg)
pg->addr = NULL;
#if defined (__DARWIN__)
- pg->addr = valloc(CFS_PAGE_SIZE);
+ pg->addr = valloc(PAGE_CACHE_SIZE);
#elif defined (__WINNT__)
pg->addr = pgalloc(0);
#else
- rc = posix_memalign(&pg->addr, CFS_PAGE_SIZE, CFS_PAGE_SIZE);
+ rc = posix_memalign(&pg->addr, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE);
#endif
if (rc != 0 || pg->addr == NULL) {
free(pg);
return pg;
}
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
{
#if defined (__WINNT__)
pgfree(pg->addr);
free(pg);
}
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
{
return pg->addr;
}
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
{
return pg->addr;
}
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
{
}
* SLAB allocator
*/
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long flags)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+ unsigned long flags, void *ctor)
{
- cfs_mem_cache_t *c;
+ struct kmem_cache *c;
c = malloc(sizeof(*c));
if (!c)
return c;
}
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c)
+void kmem_cache_destroy(struct kmem_cache *c)
{
CDEBUG(D_MALLOC, "destroy slab cache %p, objsize %u\n", c, c->size);
free(c);
- return 0;
}
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp)
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp)
{
- return cfs_alloc(c->size, gfp);
+ return kmalloc(c->size, gfp);
}
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr)
+void kmem_cache_free(struct kmem_cache *c, void *addr)
{
- cfs_free(addr);
+ kfree(addr);
}
/**
* occasionally returns true for the incorrect addresses, but if it returns
* false, then the addresses is guaranteed to be incorrect.
*/
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
{
return 1;
}
* task slot routiens
*/
-PTASK_SLOT
-alloc_task_slot()
+PTASK_SLOT alloc_task_slot()
{
- PTASK_SLOT task = NULL;
-
- if (cfs_win_task_manger.slab) {
- task = cfs_mem_cache_alloc(cfs_win_task_manger.slab, 0);
- } else {
- task = cfs_alloc(sizeof(TASK_SLOT), 0);
- }
-
- return task;
+ if (cfs_win_task_manger.slab)
+ return kmem_cache_alloc(cfs_win_task_manger.slab, 0);
+ else
+ return kmalloc(sizeof(TASK_SLOT), 0);
}
void
cfs_init_event(&task->Event, TRUE, FALSE);
}
-void
-cleanup_task_slot(PTASK_SLOT task)
+void cleanup_task_slot(PTASK_SLOT task)
{
- if (task->task.pid) {
- cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
- }
+ if (task->task.pid)
+ cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
- if (cfs_win_task_manger.slab) {
- cfs_mem_cache_free(cfs_win_task_manger.slab, task);
- } else {
- cfs_free(task);
- }
+ if (cfs_win_task_manger.slab)
+ kmem_cache_free(cfs_win_task_manger.slab, task);
+ else
+ kfree(task);
}
/*
/* initialize the spinlock protection */
spin_lock_init(&cfs_win_task_manger.Lock);
- /* create slab memory cache */
- cfs_win_task_manger.slab = cfs_mem_cache_create(
- "TSLT", sizeof(TASK_SLOT), 0, 0);
+ /* create slab memory cache */
+ cfs_win_task_manger.slab = kmem_cache_create("TSLT", sizeof(TASK_SLOT),
+ 0, 0, NULL);
/* intialize the list header */
InitializeListHead(&(cfs_win_task_manger.TaskList));
spin_unlock(&cfs_win_task_manger.Lock);
- /* destroy the taskslot cache slab */
- cfs_mem_cache_destroy(cfs_win_task_manger.slab);
- memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
+ /* destroy the taskslot cache slab */
+ kmem_cache_destroy(cfs_win_task_manger.slab);
+ memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
}
return ERR_PTR(-EINVAL);
}
- AnsiString = cfs_alloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
- CFS_ALLOC_ZERO);
+ AnsiString = kmalloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
+ __GFP_ZERO);
if (NULL == AnsiString)
return ERR_PTR(-ENOMEM);
UnicodeString =
- cfs_alloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
- CFS_ALLOC_ZERO);
+ kmalloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
+ __GFP_ZERO);
if (NULL == UnicodeString) {
- cfs_free(AnsiString);
+ kfree(AnsiString);
return ERR_PTR(-ENOMEM);
}
/* Check the returned status of IoStatus... */
if (!NT_SUCCESS(IoStatus.Status)) {
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ kfree(UnicodeString);
+ kfree(AnsiString);
return ERR_PTR(cfs_error_code(IoStatus.Status));
}
/* Allocate the file_t: libcfs file object */
- fp = cfs_alloc(sizeof(*fp) + NameLength, CFS_ALLOC_ZERO);
+ fp = kmalloc(sizeof(*fp) + NameLength, __GFP_ZERO);
if (NULL == fp) {
Status = ZwClose(FileHandle);
ASSERT(NT_SUCCESS(Status));
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ kfree(UnicodeString);
+ kfree(AnsiString);
return ERR_PTR(-ENOMEM);
}
fp->f_mode = (mode_t)mode;
fp->f_count = 1;
- /* free the memory of temporary name strings */
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ /* free the memory of temporary name strings */
+ kfree(UnicodeString);
+ kfree(AnsiString);
- return fp;
+ return fp;
}
Status = ZwClose(fp->f_handle);
ASSERT(NT_SUCCESS(Status));
- /* free the file flip structure */
- cfs_free(fp);
- return 0;
+ /* free the file flip structure */
+ kfree(fp);
+ return 0;
}
return;
}
if (cfs_atomic_dec_and_test(&de->d_count)) {
- cfs_free(de);
+ kfree(de);
}
}
#include <libcfs/libcfs.h>
-cfs_mem_cache_t *cfs_page_t_slab = NULL;
-cfs_mem_cache_t *cfs_page_p_slab = NULL;
+struct kmem_cache *cfs_page_t_slab;
+struct kmem_cache *cfs_page_p_slab;
-cfs_page_t * virt_to_page(void * addr)
+struct page *virt_to_page(void *addr)
{
- cfs_page_t *pg;
- pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-
- if (NULL == pg) {
- cfs_enter_debugger();
- return NULL;
- }
+ struct page *pg;
+ pg = kmem_cache_alloc(cfs_page_t_slab, 0);
+
+ if (NULL == pg) {
+ cfs_enter_debugger();
+ return NULL;
+ }
- memset(pg, 0, sizeof(cfs_page_t));
- pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
- pg->mapping = addr;
- cfs_atomic_set(&pg->count, 1);
+ memset(pg, 0, sizeof(struct page));
+ pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
+ pg->mapping = addr;
+ cfs_atomic_set(&pg->count, 1);
set_bit(PG_virt, &(pg->flags));
- cfs_enter_debugger();
- return pg;
+ cfs_enter_debugger();
+ return pg;
}
/*
- * cfs_alloc_page
- * To allocate the cfs_page_t and also 1 page of memory
+ * alloc_page
+ * To allocate the struct page and also 1 page of memory
*
* Arguments:
* flags: the allocation options
*
* Return Value:
- * pointer to the cfs_page_t strcture in success or
+ * pointer to the struct page strcture in success or
* NULL in failure case
*
* Notes:
cfs_atomic_t libcfs_total_pages;
-cfs_page_t * cfs_alloc_page(int flags)
+struct page *alloc_page(int flags)
{
- cfs_page_t *pg;
- pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-
- if (NULL == pg) {
- cfs_enter_debugger();
- return NULL;
- }
+ struct page *pg;
+ pg = kmem_cache_alloc(cfs_page_t_slab, 0);
- memset(pg, 0, sizeof(cfs_page_t));
- pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
- cfs_atomic_set(&pg->count, 1);
-
- if (pg->addr) {
- if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
- memset(pg->addr, 0, CFS_PAGE_SIZE);
- }
- cfs_atomic_inc(&libcfs_total_pages);
- } else {
- cfs_enter_debugger();
- cfs_mem_cache_free(cfs_page_t_slab, pg);
- pg = NULL;
- }
+ if (NULL == pg) {
+ cfs_enter_debugger();
+ return NULL;
+ }
+
+ memset(pg, 0, sizeof(struct page));
+ pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
+ cfs_atomic_set(&pg->count, 1);
+
+ if (pg->addr) {
+ if (cfs_is_flag_set(flags, __GFP_ZERO))
+ memset(pg->addr, 0, PAGE_CACHE_SIZE);
+ cfs_atomic_inc(&libcfs_total_pages);
+ } else {
+ cfs_enter_debugger();
+ kmem_cache_free(cfs_page_t_slab, pg);
+ pg = NULL;
+ }
- return pg;
+ return pg;
}
/*
- * cfs_free_page
- * To free the cfs_page_t including the page
+ * __free_page
+ * To free the struct page including the page
*
* Arguments:
- * pg: pointer to the cfs_page_t strcture
+ * pg: pointer to the struct page strcture
*
* Return Value:
* N/A
* Notes:
* N/A
*/
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
{
- ASSERT(pg != NULL);
- ASSERT(pg->addr != NULL);
- ASSERT(cfs_atomic_read(&pg->count) <= 1);
+ ASSERT(pg != NULL);
+ ASSERT(pg->addr != NULL);
+ ASSERT(cfs_atomic_read(&pg->count) <= 1);
if (!test_bit(PG_virt, &pg->flags)) {
- cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
- cfs_atomic_dec(&libcfs_total_pages);
- } else {
- cfs_enter_debugger();
- }
- cfs_mem_cache_free(cfs_page_t_slab, pg);
+ kmem_cache_free(cfs_page_p_slab, pg->addr);
+ cfs_atomic_dec(&libcfs_total_pages);
+ } else {
+ cfs_enter_debugger();
+ }
+ kmem_cache_free(cfs_page_t_slab, pg);
}
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
{
- KdPrint(("cfs_mem_is_in_cache: not implemented. (should maintain a"
- "chain to keep all allocations traced.)\n"));
- return 1;
+ KdPrint(("kmem_is_in_cache: not implemented. (should maintain a"
+ "chain to keep all allocations traced.)\n"));
+ return 1;
}
/*
- * cfs_alloc
+ * kmalloc
* To allocate memory from system pool
*
* Arguments:
*/
void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
+kmalloc(size_t nr_bytes, u_int32_t flags)
{
- void *ptr;
+ void *ptr;
- /* Ignore the flags: always allcoate from NonPagedPool */
- ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) {
- memset(ptr, 0, nr_bytes);
- }
+ /* Ignore the flags: always allcoate from NonPagedPool */
+ ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
+ if (ptr != NULL && (flags & __GFP_ZERO))
+ memset(ptr, 0, nr_bytes);
- if (!ptr) {
- cfs_enter_debugger();
- }
+ if (!ptr)
+ cfs_enter_debugger();
- return ptr;
+ return ptr;
}
/*
- * cfs_free
+ * kfree
* To free the sepcified memory to system pool
*
* Arguments:
*/
void
-cfs_free(void *addr)
+kfree(void *addr)
{
- ExFreePool(addr);
+ ExFreePool(addr);
}
/*
- * cfs_alloc_large
+ * vmalloc
* To allocate large block of memory from system pool
*
* Arguments:
*/
void *
-cfs_alloc_large(size_t nr_bytes)
+vmalloc(size_t nr_bytes)
{
- return cfs_alloc(nr_bytes, 0);
+ return kmalloc(nr_bytes, 0);
}
/*
- * cfs_free_large
+ * vfree
* To free the sepcified memory to system pool
*
* Arguments:
* N/A
*/
-void
-cfs_free_large(void *addr)
+void vfree(void *addr)
{
- cfs_free(addr);
+ kfree(addr);
}
/*
- * cfs_mem_cache_create
+ * kmem_cache_create
* To create a SLAB cache
*
* Arguments:
* 3, parameters C/D are removed.
*/
-cfs_mem_cache_t *
-cfs_mem_cache_create(
- const char * name,
- size_t size,
- size_t offset,
- unsigned long flags
- )
+struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ size_t offset, unsigned long flags,
+ void *ctor)
{
- cfs_mem_cache_t * kmc = NULL;
+ struct kmem_cache *kmc = NULL;
- /* The name of the SLAB could not exceed 20 chars */
+ /* The name of the SLAB could not exceed 20 chars */
- if (name && strlen(name) >= 20) {
- goto errorout;
- }
+ if (name && strlen(name) >= 20)
+ goto errorout;
- /* Allocate and initialize the SLAB strcture */
+ /* Allocate and initialize the SLAB strcture */
- kmc = cfs_alloc (sizeof(cfs_mem_cache_t), 0);
+ kmc = kmalloc(sizeof(struct kmem_cache), 0);
- if (NULL == kmc) {
- goto errorout;
- }
+ if (NULL == kmc)
+ goto errorout;
- memset(kmc, 0, sizeof(cfs_mem_cache_t));
- kmc->flags = flags;
+ memset(kmc, 0, sizeof(struct kmem_cache));
+ kmc->flags = flags;
if (name) {
strcpy(&kmc->name[0], name);
}
/*
- * cfs_mem_cache_destroy
+ *kmem_cache_destroy
* To destroy the unused SLAB cache
*
* Arguments:
* N/A
*/
-int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc)
+kmem_cache_destroy(struct kmem_cache *kmc)
{
- ASSERT(kmc != NULL);
+ ASSERT(kmc != NULL);
- ExDeleteNPagedLookasideList(&(kmc->npll));
+ ExDeleteNPagedLookasideList(&(kmc->npll));
- cfs_free(kmc);
+ kfree(kmc);
- return 0;
+ return 0;
}
/*
- * cfs_mem_cache_alloc
+ * kmem_cache_alloc
* To allocate an object (LookAside entry) from the SLAB
*
* Arguments:
* N/A
*/
-void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags)
+void *kmem_cache_alloc(struct kmem_cache *kmc, int flags)
{
- void *buf = NULL;
+ void *buf = NULL;
- buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
+ buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
- return buf;
+ return buf;
}
/*
- * cfs_mem_cache_free
+ * kmem_cache_free
* To free an object (LookAside entry) to the SLAB cache
*
* Arguments:
* N/A
*/
-void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
+void kmem_cache_free(struct kmem_cache *kmc, void *buf)
{
ExFreeToNPagedLookasideList(&(kmc->npll), buf);
}
CFS_LIST_HEAD(shrinker_hdr);
cfs_timer_t shrinker_timer = {0};
-struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
+struct shrinker *set_shrinker(int seeks, shrink_callback cb)
{
- struct cfs_shrinker * s = (struct cfs_shrinker *)
- cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
+ struct shrinker *s = (struct shrinker *)
+ kmalloc(sizeof(struct shrinker), __GFP_ZERO);
if (s) {
s->cb = cb;
s->seeks = seeks;
return s;
}
-void cfs_remove_shrinker(struct cfs_shrinker *s)
+void remove_shrinker(struct shrinker *s)
{
- struct cfs_shrinker *tmp;
+ struct shrinker *tmp;
spin_lock(&shrinker_guard);
#if TRUE
- cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
- struct cfs_shrinker, list) {
- if (tmp == s) {
- cfs_list_del(&tmp->list);
- break;
- }
- }
+ cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
+ struct shrinker, list) {
+ if (tmp == s) {
+ cfs_list_del(&tmp->list);
+ break;
+ }
+ }
#else
- cfs_list_del(&s->list);
+ cfs_list_del(&s->list);
#endif
spin_unlock(&shrinker_guard);
- cfs_free(s);
+ kfree(s);
}
/* time ut test proc */
void shrinker_timer_proc(ulong_ptr_t arg)
{
- struct cfs_shrinker *s;
+ struct shrinker *s;
spin_lock(&shrinker_guard);
cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct cfs_shrinker, list) {
+ struct shrinker, list) {
s->cb(s->nr, __GFP_FS);
}
spin_unlock(&shrinker_guard);
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
- err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr));
+ err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
RETURN(err);
RETURN(-EINVAL);
}
- err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len);
+ err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
RETURN(err);
int libcfs_ioctl_popdata(void *arg, void *data, int size)
{
- if (cfs_copy_to_user((char *)arg, data, size))
+ if (copy_to_user((char *)arg, data, size))
return -EFAULT;
return 0;
}
*/
void
-cfs_thread_proc(
- void * context
- )
+cfs_thread_proc(void *context)
{
cfs_thread_context_t * thread_context =
(cfs_thread_context_t *) context;
/* Free the context memory */
- cfs_free(context);
+ kfree(context);
/* Terminate this system thread */
{
cfs_handle_t thread = NULL;
NTSTATUS status;
- cfs_thread_context_t * context = NULL;
+ cfs_thread_context_t *context = NULL;
/* Allocate the context to be transferred to system thread */
- context = cfs_alloc(sizeof(cfs_thread_context_t), CFS_ALLOC_ZERO);
+ context = kmalloc(sizeof(cfs_thread_context_t), __GFP_ZERO);
if (!context) {
return ERR_PTR(-ENOMEM);
if (!NT_SUCCESS(status)) {
- cfs_free(context);
+ kfree(context);
/* We need translate the nt status to linux error code */
struct cfs_symbol *sym = NULL;
struct cfs_symbol *new = NULL;
- new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO);
- if (!new) {
- return (-ENOMEM);
- }
+ new = kmalloc(sizeof(struct cfs_symbol), __GFP_ZERO);
+ if (!new)
+ return -ENOMEM;
+
strncpy(new->name, name, CFS_SYMBOL_LEN);
new->value = (void *)value;
new->ref = 0;
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
up_write(&cfs_symbol_lock);
- cfs_free(new);
+ kfree(new);
return 0; /* alreay registerred */
}
}
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref == 0);
cfs_list_del (&sym->sym_list);
- cfs_free(sym);
+ kfree(sym);
break;
}
}
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
LASSERT(sym->ref == 0);
cfs_list_del (&sym->sym_list);
- cfs_free(sym);
+ kfree(sym);
}
up_write(&cfs_symbol_lock);
return;
and kernel ntoskrnl.lib) */
cfs_libc_init();
- /* create slab memory caches for page alloctors */
- cfs_page_t_slab = cfs_mem_cache_create(
- "CPGT", sizeof(cfs_page_t), 0, 0 );
+ /* create slab memory caches for page alloctors */
+ cfs_page_t_slab = kmem_cache_create("CPGT", sizeof(struct page),
+ 0, 0, NULL);
- cfs_page_p_slab = cfs_mem_cache_create(
- "CPGP", CFS_PAGE_SIZE, 0, 0 );
+ cfs_page_p_slab = kmem_cache_create("CPGP", PAGE_CACHE_SIZE,
+ 0, 0, NULL);
if ( cfs_page_t_slab == NULL ||
cfs_page_p_slab == NULL ){
errorout:
- if (rc != 0) {
- /* destroy the taskslot cache slab */
- if (cfs_page_t_slab) {
- cfs_mem_cache_destroy(cfs_page_t_slab);
- }
- if (cfs_page_p_slab) {
- cfs_mem_cache_destroy(cfs_page_p_slab);
- }
- }
+ if (rc != 0) {
+ /* destroy the taskslot cache slab */
+ if (cfs_page_t_slab)
+ kmem_cache_destroy(cfs_page_t_slab);
+ if (cfs_page_p_slab)
+ kmem_cache_destroy(cfs_page_p_slab);
+ }
return rc;
}
/* destroy the taskslot cache slab */
if (cfs_page_t_slab) {
- cfs_mem_cache_destroy(cfs_page_t_slab);
+kmem_cache_destroy(cfs_page_t_slab);
}
if (cfs_page_p_slab) {
- cfs_mem_cache_destroy(cfs_page_p_slab);
+kmem_cache_destroy(cfs_page_p_slab);
}
return;
/* SLAB object for cfs_proc_entry_t allocation */
-cfs_mem_cache_t * proc_entry_cache = NULL;
+struct kmem_cache *proc_entry_cache;
/* root node for sysctl table */
cfs_sysctl_table_header_t root_table_header;
char *start;
cfs_proc_entry_t * dp;
- dp = (cfs_proc_entry_t *) file->f_inode->i_priv;
- if (!(page = (char*) cfs_alloc(CFS_PAGE_SIZE, 0)))
- return -ENOMEM;
+ dp = (cfs_proc_entry_t *) file->f_inode->i_priv;
+ page = (char *) kmalloc(PAGE_CACHE_SIZE, 0);
+ if (page == NULL)
+ return -ENOMEM;
while ((nbytes > 0) && !eof) {
break;
}
- n -= cfs_copy_to_user((void *)buf, start, n);
+ n -= copy_to_user((void *)buf, start, n);
if (n == 0) {
if (retval == 0)
retval = -EFAULT;
buf += n;
retval += n;
}
- cfs_free(page);
+ kfree(page);
- return retval;
+ return retval;
}
static ssize_t
{
cfs_proc_entry_t * entry = NULL;
- entry = cfs_mem_cache_alloc(proc_entry_cache, 0);
- if (!entry) {
- return NULL;
- }
+ entry = kmem_cache_alloc(proc_entry_cache, 0);
+ if (!entry)
+ return NULL;
memset(entry, 0, sizeof(cfs_proc_entry_t));
void
proc_free_entry(cfs_proc_entry_t * entry)
-
{
- ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
-
- cfs_mem_cache_free(proc_entry_cache, entry);
+ ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
+ kmem_cache_free(proc_entry_cache, entry);
}
/* dissect the path string for a given full proc path */
parent = root;
entry = NULL;
- ename = cfs_alloc(0x21, CFS_ALLOC_ZERO);
+ ename = kmalloc(0x21, __GFP_ZERO);
- if (ename == NULL) {
- goto errorout;
- }
+ if (ename == NULL)
+ goto errorout;
again:
errorout:
if (ename) {
- cfs_free(ename);
+ kfree(ename);
}
return entry;
entry = proc_alloc_entry();
memcpy(entry->name, ename, flen);
- if (entry) {
- if(!proc_insert_splay(parent, entry)) {
- proc_free_entry(entry);
- entry = NULL;
- }
- }
+ if (entry && !proc_insert_splay(parent, entry)) {
+ proc_free_entry(entry);
+ entry = NULL;
+ }
}
if (!entry) {
void proc_destroy_fs()
{
- LOCK_PROCFS();
+ LOCK_PROCFS();
- if (cfs_proc_root) {
- proc_destroy_splay(cfs_proc_root);
- }
+ if (cfs_proc_root)
+ proc_destroy_splay(cfs_proc_root);
- if (proc_entry_cache) {
- cfs_mem_cache_destroy(proc_entry_cache);
- }
+ if (proc_entry_cache)
+ kmem_cache_destroy(proc_entry_cache);
- UNLOCK_PROCFS();
+ UNLOCK_PROCFS();
}
static char proc_item_path[512];
CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry));
INIT_PROCFS_LOCK();
- proc_entry_cache = cfs_mem_cache_create(
- NULL,
- sizeof(cfs_proc_entry_t),
- 0,
- 0
- );
+ proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t),
+ 0, 0, NULL);
if (!proc_entry_cache) {
return (-ENOMEM);
return -ENOTDIR;
if (oldval && oldlenp) {
- if(get_user(len, oldlenp))
+ if (get_user(len, oldlenp))
return -EFAULT;
- if (len) {
- l = strlen(table->data);
- if (len > l) len = l;
- if (len >= table->maxlen)
- len = table->maxlen;
- if(cfs_copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if(put_user(0, ((char *) oldval) + len))
- return -EFAULT;
- if(put_user(len, oldlenp))
- return -EFAULT;
- }
+ if (len) {
+ l = strlen(table->data);
+ if (len > l)
+ len = l;
+ if (len >= table->maxlen)
+ len = table->maxlen;
+ if (copy_to_user(oldval, table->data, len))
+ return -EFAULT;
+ if (put_user(0, ((char *) oldval) + len))
+ return -EFAULT;
+ if (put_user(len, oldlenp))
+ return -EFAULT;
+ }
}
if (newval && newlen) {
len = newlen;
if (len > table->maxlen)
len = table->maxlen;
- if(cfs_copy_from_user(table->data, newval, len))
+ if (copy_from_user(table->data, newval, len))
return -EFAULT;
if (len == table->maxlen)
len--;
if (write) {
while (left) {
char c;
- if(get_user(c,(char *) buffer))
- return -EFAULT;
+ if (get_user(c, (char *)buffer))
+ return -EFAULT;
if (!isspace(c))
- break;
+ break;
left--;
- ((char *) buffer)++;
+ ((char *)buffer)++;
}
if (!left)
break;
len = left;
if (len > TMPBUFLEN-1)
len = TMPBUFLEN-1;
- if(cfs_copy_from_user(buf, buffer, len))
+ if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[len] = 0;
p = buf;
val = -val;
(char *)buffer += len;
left -= len;
- switch(op) {
- case OP_SET: *i = val; break;
- case OP_AND: *i &= val; break;
- case OP_OR: *i |= val; break;
- case OP_MAX: if(*i < val)
- *i = val;
- break;
- case OP_MIN: if(*i > val)
- *i = val;
- break;
- }
+ switch(op) {
+ case OP_SET:
+ *i = val;
+ break;
+ case OP_AND:
+ *i &= val;
+ break;
+ case OP_OR:
+ *i |= val;
+ break;
+ case OP_MAX:
+ if (*i < val)
+ *i = val;
+ break;
+ case OP_MIN:
+ if (*i > val)
+ *i = val;
+ break;
+ }
} else {
p = buf;
if (!first)
len = strlen(buf);
if (len > left)
len = left;
- if(cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
left -= len;
(char *)buffer += len;
}
if (!write && !first && left) {
- if(put_user('\n', (char *) buffer))
+ if (put_user('\n', (char *) buffer))
return -EFAULT;
left--, ((char *)buffer)++;
}
p = (char *) buffer;
while (left) {
char c;
- if(get_user(c, p++))
+ if (get_user(c, p++))
return -EFAULT;
if (!isspace(c))
break;
len = 0;
p = buffer;
while (len < *lenp) {
- if(get_user(c, p++))
+ if (get_user(c, p++))
return -EFAULT;
if (c == 0 || c == '\n')
break;
}
if (len >= (size_t)table->maxlen)
len = (size_t)table->maxlen-1;
- if(cfs_copy_from_user(table->data, buffer, len))
+ if (copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
filp->f_pos += *lenp;
if (len > *lenp)
len = *lenp;
if (len)
- if(cfs_copy_to_user(buffer, table->data, len))
+ if (copy_to_user(buffer, table->data, len))
return -EFAULT;
if (len < *lenp) {
- if(put_user('\n', ((char *) buffer) + len))
+ if (put_user('\n', ((char *) buffer) + len))
return -EFAULT;
len++;
}
if (len) {
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(cfs_copy_to_user(oldval, table->data, len))
+ if (copy_to_user(oldval, table->data, len))
return -EFAULT;
- if(put_user(len, oldlenp))
+ if (put_user(len, oldlenp))
return -EFAULT;
}
}
len = newlen;
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(cfs_copy_from_user(table->data, newval, len))
+ if (copy_from_user(table->data, newval, len))
return -EFAULT;
}
}
newval, newlen, head->ctl_table,
&context);
if (context)
- cfs_free(context);
+ kfree(context);
if (error != -ENOTDIR)
return error;
tmp = tmp->next;
int insert_at_head)
{
struct ctl_table_header *tmp;
- tmp = cfs_alloc(sizeof(struct ctl_table_header), 0);
+ tmp = kmalloc(sizeof(struct ctl_table_header), 0);
if (!tmp)
return NULL;
tmp->ctl_table = table;
#ifdef CONFIG_PROC_FS
unregister_proc_table(header->ctl_table, cfs_proc_sys);
#endif
- cfs_free(header);
+ kfree(header);
}
if (fp == NULL)
return NULL;
- fh = cfs_alloc(sizeof(*fh), CFS_ALLOC_ZERO);
+ fh = kmalloc(sizeof(*fh), __GFP_ZERO);
if (fh == NULL)
return NULL;
- fh->f_inode = cfs_alloc(sizeof(struct inode), CFS_ALLOC_ZERO);
+ fh->f_inode = kmalloc(sizeof(struct inode), __GFP_ZERO);
if (!fh->f_inode) {
- cfs_free(fh);
+ kfree(fh);
return NULL;
}
}
if (0 != rc) {
- cfs_free(fh->f_inode);
- cfs_free(fh);
+ kfree(fh->f_inode);
+ kfree(fh);
return NULL;
}
fp->nlink--;
}
- cfs_free(fh->f_inode);
- cfs_free(fh);
+ kfree(fh->f_inode);
+ kfree(fh);
return rc;
}
/* if not empty - flush it first */
if (m->count) {
n = min(m->count, size);
- err = cfs_copy_to_user(buf, m->buf + m->from, n);
+ err = copy_to_user(buf, m->buf + m->from, n);
if (err)
goto Efault;
m->count -= n;
if (m->count < m->size)
goto Fill;
m->op->stop(m, p);
- cfs_free(m->buf);
+ kfree(m->buf);
m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
if (!m->buf)
goto Enomem;
}
m->op->stop(m, p);
n = min(m->count, size);
- err = cfs_copy_to_user(buf, m->buf, n);
+ err = copy_to_user(buf, m->buf, n);
if (err)
goto Efault;
copied += n;
Eoverflow:
m->op->stop(m, p);
- cfs_free(m->buf);
- m->buf = cfs_alloc(m->size <<= 1, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+ kfree(m->buf);
+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | __GFP_ZERO);
return !m->buf ? -ENOMEM : -EAGAIN;
}
struct seq_file *m = (struct seq_file *)file->private_data;
if (m) {
if (m->buf)
- cfs_free(m->buf);
- cfs_free(m);
+ kfree(m->buf);
+ kfree(m);
}
return 0;
}
if (!res)
((struct seq_file *)file->private_data)->private = data;
else
- cfs_free(op);
+ kfree(op);
}
return res;
}
{
const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
int res = seq_release(inode, file);
- cfs_free((void *)op);
+ kfree((void *)op);
return res;
}
EXPORT_SYMBOL(single_release);
{
struct seq_file *seq = file->private_data;
- cfs_free(seq->private);
+ kfree(seq->private);
seq->private = NULL;
return seq_release(inode, file);
}
void *private;
struct seq_file *seq;
- private = cfs_alloc(psize, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+ private = kmalloc(psize, GFP_KERNEL | __GFP_ZERO);
if (private == NULL)
goto out;
return private;
out_free:
- cfs_free(private);
+ kfree(private);
out:
return NULL;
}
} else {
- KsTsdu = (PKS_TSDU) cfs_mem_cache_alloc(
+ KsTsdu = (PKS_TSDU) kmem_cache_alloc(
ks_data.ksnd_tsdu_slab, 0);
}
PKS_TSDU KsTsdu
)
{
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
list = RemoveHeadList(&ks_data.ksnd_addrs_list);
slot = CONTAINING_RECORD(list, ks_addr_slot_t, link);
- cfs_free(slot);
+ kfree(slot);
ks_data.ksnd_naddrs--;
}
return;
}
- slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
+ slot = kmalloc(sizeof(ks_addr_slot_t) + DeviceName->Length, __GFP_ZERO);
if (slot != NULL) {
spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
/* free the Context structure... */
ASSERT(Context->Magic == KS_TCP_CONTEXT_MAGIC);
Context->Magic = 'CDAB';
- cfs_free(Context);
+ kfree(Context);
}
/* free the Irp */
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* free the Irp structure */
/* there's still data in tdi internal queue, we need issue a new
Irp to receive all of them. first allocate the tcp context */
- context = cfs_alloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
+ context = kmalloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
ks_abort_tconn(tconn);
ks_tconn_t * tconn = NULL;
/* allocate ksoc_tconn_t from the slab cache memory */
- tconn = (ks_tconn_t *)cfs_mem_cache_alloc(
- ks_data.ksnd_tconn_slab, CFS_ALLOC_ZERO);
+ tconn = (ks_tconn_t *)kmem_cache_alloc(
+ ks_data.ksnd_tconn_slab, __GFP_ZERO);
if (tconn) {
spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
- cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
+ kmem_cache_free(ks_data.ksnd_tconn_slab, tconn);
KsPrint((3, "ks_free_tconn: tconn %p is freed.\n", tconn));
}
length = KsQueryMdlsSize(mdl);
/* we need allocate the ks_tx_t structure from memory pool. */
- context = cfs_alloc(sizeof(ks_tdi_tx_t), 0);
+ context = kmalloc(sizeof(ks_tdi_tx_t), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* here need free the Irp. */
CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
- ks_data.ksnd_tconn_slab = cfs_mem_cache_create(
- "tcon", sizeof(ks_tconn_t) , 0, 0);
+ ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
+ 0, 0, NULL);
if (!ks_data.ksnd_tconn_slab) {
rc = -ENOMEM;
spin_lock_init(&ks_data.ksnd_tsdu_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
- ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
- "tsdu", ks_data.ksnd_tsdu_size, 0, 0);
+ ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
+ 0, 0, NULL);
if (!ks_data.ksnd_tsdu_slab) {
rc = -ENOMEM;
if (ks_data.ksnd_engine_nums < 4) {
ks_data.ksnd_engine_nums = 4;
}
- ks_data.ksnd_engine_mgr = cfs_alloc(sizeof(ks_engine_mgr_t) *
- ks_data.ksnd_engine_nums,CFS_ALLOC_ZERO);
+ ks_data.ksnd_engine_mgr = kmalloc(sizeof(ks_engine_mgr_t) *
+ ks_data.ksnd_engine_nums, __GFP_ZERO);
if (ks_data.ksnd_engine_mgr == NULL) {
rc = -ENOMEM;
goto errorout;
/* do cleanup in case we get failures */
if (rc < 0) {
if (ks_data.ksnd_tconn_slab) {
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
}
}
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
/* it's safe to delete the tconn slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
+kmem_cache_destroy(ks_data.ksnd_tsdu_slab);
ks_data.ksnd_tsdu_slab = NULL;
/* good! it's smooth to do the cleaning up...*/
spin_lock(&ks_data.ksnd_addrs_lock);
- *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
+ *names = kmalloc(sizeof(char *) * ks_data.ksnd_naddrs, __GFP_ZERO);
if (*names == NULL) {
goto errorout;
}
void libcfs_ipif_free_enumeration(char **names, int n)
{
if (names) {
- cfs_free(names);
+ kfree(names);
}
}
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
- cfs_alloc(sizeof(union cfs_trace_data_union) * \
- CFS_NR_CPUS, CFS_ALLOC_KERNEL);
+ kmalloc(sizeof(union cfs_trace_data_union) * \
+ CFS_NR_CPUS, GFP_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
}
for (i = 0; i < cfs_num_possible_cpus(); i++)
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
cfs_trace_console_buffers[i][j] =
- cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
- CFS_ALLOC_KERNEL);
+ kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ GFP_KERNEL);
if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
for (i = 0; i < cfs_num_possible_cpus(); i++) {
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
if (cfs_trace_console_buffers[i][j] != NULL) {
- cfs_free(cfs_trace_console_buffers[i][j]);
+ kfree(cfs_trace_console_buffers[i][j]);
cfs_trace_console_buffers[i][j] = NULL;
}
}
}
for (i = 0; cfs_trace_data[i] != NULL; i++) {
- cfs_free(cfs_trace_data[i]);
+ kfree(cfs_trace_data[i]);
cfs_trace_data[i] = NULL;
}
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
+ int total_mb = (num_physpages >> (20 - PAGE_CACHE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
{
LPVOID page;
- page = VirtualAlloc(NULL, CFS_PAGE_SIZE << factor,
+ page = VirtualAlloc(NULL, PAGE_CACHE_SIZE << factor,
MEM_COMMIT, PAGE_READWRITE);
return page;
}
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
- new = cfs_alloc(sizeof(struct idr_layer), CFS_ALLOC_ZERO);
+ new = kmalloc(sizeof(struct idr_layer), __GFP_ZERO);
if(new == NULL)
return (0);
free_layer(idp, new);
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
- cfs_free(p);
+ kfree(p);
}
return 0;
}
struct idr_context *cfs_idr_init()
{
struct idr_context * idp = NULL;
- idp = cfs_alloc(sizeof(struct idr_context), 0);
+ idp = kmalloc(sizeof(struct idr_context), 0);
if (idp) {
memset(idp, 0, sizeof(struct idr_context));
}
void cfs_idr_exit(struct idr_context *idp)
{
if (idp) {
- cfs_free(idp);
+ kfree(idp);
}
}
/*
* XXX Liang:
*
- * Temporary fix, because lnet_me_free()->cfs_free->FREE() can be blocked in xnu,
+ * Temporary fix, because lnet_me_free()->kfree->FREE() can be blocked in xnu,
* at then same time we've taken LNET_LOCK(), which is a spinlock.
* by using LNET_USE_LIB_FREELIST, we can avoid calling of FREE().
*
* A page-based fragment of a MD.
*/
typedef struct {
- /** Pointer to the page where the fragment resides */
- cfs_page_t *kiov_page;
- /** Length in bytes of the fragment */
- unsigned int kiov_len;
- /**
- * Starting offset of the fragment within the page. Note that the
- * end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= CFS_PAGE_SIZE.
- */
- unsigned int kiov_offset;
+ /** Pointer to the page where the fragment resides */
+ struct page *kiov_page;
+ /** Length in bytes of the fragment */
+ unsigned int kiov_len;
+ /**
+ * Starting offset of the fragment within the page. Note that the
+ * end of the fragment must not pass the end of the page; i.e.,
+ * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ */
+ unsigned int kiov_offset;
} lnet_kiov_t;
/** @} lnet_md */
for (i = 0; i < npages; i++) {
if (p->ibp_pages[i] != NULL)
- cfs_free_page(p->ibp_pages[i]);
+ __free_page(p->ibp_pages[i]);
}
LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
for (i = 0; i < npages; i++) {
p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- CFS_ALLOC_IO);
+ __GFP_IO);
if (p->ibp_pages[i] == NULL) {
CERROR("Can't allocate page %d of %d\n", i, npages);
kiblnd_free_pages(p);
LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
if (kptllnd_data.kptl_rx_cache != NULL)
- cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
+ kmem_cache_destroy(kptllnd_data.kptl_rx_cache);
if (kptllnd_data.kptl_peers != NULL)
LIBCFS_FREE(kptllnd_data.kptl_peers,
kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
kptllnd_data.kptl_rx_cache =
- cfs_mem_cache_create("ptllnd_rx",
+ kmem_cache_create("ptllnd_rx",
sizeof(kptl_rx_t) +
*kptllnd_tunables.kptl_max_msg_size,
0, /* offset */
cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */
kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
- cfs_mem_cache_t* kptl_rx_cache; /* rx descripter cache */
+ struct kmem_cache *kptl_rx_cache; /* rx descripter cache */
cfs_atomic_t kptl_ntx; /* # tx descs allocated */
spinlock_t kptl_tx_lock; /* serialise idle tx list*/
return NULL;
}
- rx = cfs_mem_cache_alloc(kptllnd_data.kptl_rx_cache, CFS_ALLOC_ATOMIC);
+ rx = kmem_cache_alloc(kptllnd_data.kptl_rx_cache, GFP_ATOMIC);
if (rx == NULL) {
CERROR("Failed to allocate rx\n");
return NULL;
kptllnd_peer_decref(peer);
}
- cfs_mem_cache_free(kptllnd_data.kptl_rx_cache, rx);
+ kmem_cache_free(kptllnd_data.kptl_rx_cache, rx);
}
void
int i;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
*/
rc = -sock_send(sock, &msg, MSG_DONTWAIT, &sndlen);
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc == 0)
rc = sndlen;
return rc;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + \
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + \
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
rc = -sock_receive(C2B_SOCK(conn->ksnc_sock), &msg, MSG_DONTWAIT, &rcvlen);
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc == 0)
rc = rcvlen;
return (rc);
CFS_DECL_NET_DATA;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
CFS_NET_EX;
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc != 0) {
if (suio.uio_resid != nob &&\
CFS_DECL_NET_DATA;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
CFS_NET_EX;
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc){
if (ruio.uio_resid != nob && \
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1))
+ (kiov[i].kiov_offset + kiov[i].kiov_len !=
+ PAGE_CACHE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
for (i = 0; i < n_ids; i++) {
tmpid.pid = info->pi_pid;
tmpid.nid = info->pi_ni[i].ns_nid;
-#ifdef __KERNEL__
- if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+ if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
goto out_1;
-#else
- ids[i] = tmpid;
-#endif
}
rc = info->pi_nnis;
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
+ lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
siov->kiov_len - soffset);
this_nob = MIN(this_nob, nob);
- if (daddr == NULL)
- daddr = ((char *)cfs_kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
- if (saddr == NULL)
- saddr = ((char *)cfs_kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
-
- /* Vanishing risk of kmap deadlock when mapping 2 pages.
- * However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs */
-
- memcpy (daddr, saddr, this_nob);
- nob -= this_nob;
-
- if (diov->kiov_len > doffset + this_nob) {
- daddr += this_nob;
- doffset += this_nob;
- } else {
- cfs_kunmap(diov->kiov_page);
- daddr = NULL;
- diov++;
- ndiov--;
- doffset = 0;
- }
+ if (daddr == NULL)
+ daddr = ((char *)kmap(diov->kiov_page)) +
+ diov->kiov_offset + doffset;
+ if (saddr == NULL)
+ saddr = ((char *)kmap(siov->kiov_page)) +
+ siov->kiov_offset + soffset;
+
+ /* Vanishing risk of kmap deadlock when mapping 2 pages.
+ * However in practice at least one of the kiovs will be mapped
+ * kernel pages and the map/unmap will be NOOPs */
+
+ memcpy (daddr, saddr, this_nob);
+ nob -= this_nob;
+
+ if (diov->kiov_len > doffset + this_nob) {
+ daddr += this_nob;
+ doffset += this_nob;
+ } else {
+ kunmap(diov->kiov_page);
+ daddr = NULL;
+ diov++;
+ ndiov--;
+ doffset = 0;
+ }
- if (siov->kiov_len > soffset + this_nob) {
- saddr += this_nob;
- soffset += this_nob;
- } else {
- cfs_kunmap(siov->kiov_page);
- saddr = NULL;
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
+ if (siov->kiov_len > soffset + this_nob) {
+ saddr += this_nob;
+ soffset += this_nob;
+ } else {
+ kunmap(siov->kiov_page);
+ saddr = NULL;
+ siov++;
+ nsiov--;
+ soffset = 0;
+ }
+ } while (nob > 0);
- if (daddr != NULL)
- cfs_kunmap(diov->kiov_page);
- if (saddr != NULL)
- cfs_kunmap(siov->kiov_page);
+ if (daddr != NULL)
+ kunmap(diov->kiov_page);
+ if (saddr != NULL)
+ kunmap(siov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
+ addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
addr += this_nob;
kiovoffset += this_nob;
} else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- } while (nob > 0);
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2iov);
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
+ addr = ((char *)kmap(kiov->kiov_page)) +
+ kiov->kiov_offset + kiovoffset;
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
- nob -= this_nob;
+ memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ nob -= this_nob;
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ if (kiov->kiov_len > kiovoffset + this_nob) {
+ addr += this_nob;
+ kiovoffset += this_nob;
+ } else {
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
- } while (nob > 0);
+ if (iov->iov_len > iovoffset + this_nob) {
+ iovoffset += this_nob;
+ } else {
+ iov++;
+ niov--;
+ iovoffset = 0;
+ }
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_iov2kiov);
dst->kiov_page = src->kiov_page;
dst->kiov_offset = src->kiov_offset + offset;
- if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
- return (niov);
- }
+ if (len <= frag_len) {
+ dst->kiov_len = len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ return niov;
+ }
- dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
+ dst->kiov_len = frag_len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
len -= frag_len;
dst++;
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
libcfs_id2str(target));
return -ENOMEM;
}
- msg->msg_vmflush = !!cfs_memory_pressure_get();
+ msg->msg_vmflush = !!memory_pressure_get();
cpt = lnet_cpt_of_cookie(mdh.cookie);
lnet_res_lock(cpt);
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
while (--npages >= 0)
- cfs_free_page(rb->rb_kiov[npages].kiov_page);
+ __free_page(rb->rb_kiov[npages].kiov_page);
LIBCFS_FREE(rb, sz);
}
for (i = 0; i < npages; i++) {
page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- CFS_ALLOC_ZERO | CFS_ALLOC_STD);
+ __GFP_ZERO | GFP_IOFS);
if (page == NULL) {
while (--i >= 0)
- cfs_free_page(rb->rb_kiov[i].kiov_page);
+ __free_page(rb->rb_kiov[i].kiov_page);
LIBCFS_FREE(rb, sz);
return NULL;
}
- rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
+ rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}
lnet_rtrpools_alloc(int im_a_router)
{
lnet_rtrbufpool_t *rtrp;
- int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ int large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
int small_pages = 1;
int nrb_tiny;
int nrb_small;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else {
off += 1;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else {
off += 1;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos += 1;
npg = breq->blk_npg;
/* NB: this is not going to work for variable page size,
* but we have to keep it for compatibility */
- len = npg * CFS_PAGE_SIZE;
+ len = npg * PAGE_CACHE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
}
void
-brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_fill_page (struct page *pg, int pattern, __u64 magic)
{
- char *addr = cfs_page_address(pg);
+ char *addr = page_address(pg);
int i;
LASSERT (addr != NULL);
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += CFS_PAGE_SIZE - BRW_MSIZE;
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++)
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
}
int
-brw_check_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_check_page (struct page *pg, int pattern, __u64 magic)
{
- char *addr = cfs_page_address(pg);
+ char *addr = page_address(pg);
__u64 data = 0; /* make compiler happy */
int i;
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
- addr += CFS_PAGE_SIZE - BRW_MSIZE;
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++) {
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *) addr) + i);
if (data != magic) goto bad_data;
}
brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
- cfs_page_t *pg;
+ struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
#ifdef __KERNEL__
brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
- cfs_page_t *pg;
+ struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
#ifdef __KERNEL__
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
- len = npg * CFS_PAGE_SIZE;
+ len = npg * PAGE_CACHE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
reply->brw_status = EINVAL;
return 0;
}
- npg = reqst->brw_len >> CFS_PAGE_SHIFT;
+ npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
} else {
- npg = (reqst->brw_len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
+ if (copy_from_user(name, args->lstio_ses_namep,
+ args->lstio_ses_nmlen)) {
LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_dbg_namep,
+ if (copy_from_user(name, args->lstio_dbg_namep,
args->lstio_dbg_nmlen)) {
LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_grp_namep,
+ if (copy_from_user(name, args->lstio_grp_namep,
args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
if (rc == 0 &&
- cfs_copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
+ copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
return -EINVAL;
}
args->lstio_grp_ndentp == NULL) /* # of node entry */
return -EINVAL;
- if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp,
- sizeof(ndent)) ||
- cfs_copy_from_user(&index, args->lstio_grp_idxp,
- sizeof(index)))
- return -EFAULT;
+ if (copy_from_user(&ndent, args->lstio_grp_ndentp,
+ sizeof(ndent)) ||
+ copy_from_user(&index, args->lstio_grp_idxp,
+ sizeof(index)))
+ return -EFAULT;
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
+ if (ndent <= 0 || index < 0)
+ return -EINVAL;
+ }
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (rc != 0)
return rc;
- if (args->lstio_grp_dentsp != NULL &&
- (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
- cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
+ if (args->lstio_grp_dentsp != NULL &&
+ (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
- return 0;
+ return 0;
}
int
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
args->lstio_bat_ndentp == NULL) /* # of node entry */
return -EINVAL;
- if (cfs_copy_from_user(&index, args->lstio_bat_idxp,
+ if (copy_from_user(&index, args->lstio_bat_idxp,
sizeof(index)) ||
- cfs_copy_from_user(&ndent, args->lstio_bat_ndentp,
+ copy_from_user(&ndent, args->lstio_bat_ndentp,
sizeof(ndent)))
return -EFAULT;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (rc != 0)
return rc;
- if (args->lstio_bat_dentsp != NULL &&
- (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
- cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
+ if (args->lstio_bat_dentsp != NULL &&
+ (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
- return rc;
+ return rc;
}
int
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_sta_namep,
+ if (copy_from_user(name, args->lstio_sta_namep,
args->lstio_sta_nmlen)) {
LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
return -EFAULT;
/* have parameter, check if parameter length is valid */
if (args->lstio_tes_param != NULL &&
(args->lstio_tes_param_len <= 0 ||
- args->lstio_tes_param_len > CFS_PAGE_SIZE - sizeof(lstcon_test_t)))
+ args->lstio_tes_param_len >
+ PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
goto out;
}
- rc = -EFAULT;
- if (cfs_copy_from_user(name,
- args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- cfs_copy_from_user(srcgrp,
- args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- cfs_copy_from_user(dstgrp,
- args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
- cfs_copy_from_user(param, args->lstio_tes_param,
- args->lstio_tes_param_len))
- goto out;
+ rc = -EFAULT;
+ if (copy_from_user(name, args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ copy_from_user(srcgrp, args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ copy_from_user(dstgrp, args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
+ copy_from_user(param, args->lstio_tes_param,
+ args->lstio_tes_param_len))
+ goto out;
rc = lstcon_test_add(name,
args->lstio_tes_type,
&ret, args->lstio_tes_resultp);
if (ret != 0)
- rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret,
+ rc = (copy_to_user(args->lstio_tes_retp, &ret,
sizeof(ret))) ? -EFAULT : 0;
out:
if (name != NULL)
int
lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
{
- char *buf;
- int opc = data->ioc_u32[0];
- int rc;
+ char *buf;
+ int opc = data->ioc_u32[0];
+ int rc;
- if (cmd != IOC_LIBCFS_LNETST)
- return -EINVAL;
+ if (cmd != IOC_LIBCFS_LNETST)
+ return -EINVAL;
- if (data->ioc_plen1 > CFS_PAGE_SIZE)
- return -EINVAL;
+ if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ return -EINVAL;
- LIBCFS_ALLOC(buf, data->ioc_plen1);
- if (buf == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC(buf, data->ioc_plen1);
+ if (buf == NULL)
+ return -ENOMEM;
- /* copy in parameter */
- if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
- LIBCFS_FREE(buf, data->ioc_plen1);
- return -EFAULT;
- }
+ /* copy in parameter */
+ if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+ LIBCFS_FREE(buf, data->ioc_plen1);
+ return -EFAULT;
+ }
mutex_lock(&console_session.ses_mutex);
rc = -EINVAL;
}
- if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(lstcon_trans_stat_t)))
- rc = -EFAULT;
+ if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+ sizeof(lstcon_trans_stat_t)))
+ rc = -EFAULT;
out:
mutex_unlock(&console_session.ses_mutex);
- LIBCFS_FREE(buf, data->ioc_plen1);
+ LIBCFS_FREE(buf, data->ioc_plen1);
- return rc;
+ return rc;
}
EXPORT_SYMBOL(lstcon_ioctl_entry);
if (bulk->bk_iovs[i].kiov_page == NULL)
continue;
- cfs_free_page(bulk->bk_iovs[i].kiov_page);
+ __free_page(bulk->bk_iovs[i].kiov_page);
}
srpc_client_rpc_decref(crpc->crp_rpc);
cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
lstcon_rpc_t, crp_link) {
- if (cfs_copy_from_user(&tmp, next,
+ if (copy_from_user(&tmp, next,
sizeof(cfs_list_t)))
return -EFAULT;
(cfs_time_t)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
- if (cfs_copy_to_user(&ent->rpe_peer,
- &nd->nd_id, sizeof(lnet_process_id_t)) ||
- cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- cfs_copy_to_user(&ent->rpe_state,
- &nd->nd_state, sizeof(nd->nd_state)) ||
- cfs_copy_to_user(&ent->rpe_rpc_errno, &error,
- sizeof(error)))
- return -EFAULT;
+ if (copy_to_user(&ent->rpe_peer,
+ &nd->nd_id, sizeof(lnet_process_id_t)) ||
+ copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+ copy_to_user(&ent->rpe_state,
+ &nd->nd_state, sizeof(nd->nd_state)) ||
+ copy_to_user(&ent->rpe_rpc_errno, &error,
+ sizeof(error)))
+ return -EFAULT;
- if (error != 0)
- continue;
+ if (error != 0)
+ continue;
- /* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+ /* RPC is done */
+ rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
- if (cfs_copy_to_user(&ent->rpe_sid,
- &rep->sid, sizeof(lst_sid_t)) ||
- cfs_copy_to_user(&ent->rpe_fwk_errno,
- &rep->status, sizeof(rep->status)))
- return -EFAULT;
+ if (copy_to_user(&ent->rpe_sid,
+ &rep->sid, sizeof(lst_sid_t)) ||
+ copy_to_user(&ent->rpe_fwk_errno,
+ &rep->status, sizeof(rep->status)))
+ return -EFAULT;
- if (readent == NULL)
- continue;
+ if (readent == NULL)
+ continue;
- if ((error = readent(trans->tas_opc, msg, ent)) != 0)
- return error;
- }
+ error = readent(trans->tas_opc, msg, ent);
+ if (error != 0)
+ return error;
+ }
- return 0;
+ return 0;
}
void
LASSERT (i < nkiov);
- pid = (lnet_process_id_packed_t *)cfs_page_address(kiov[i].kiov_page);
+ pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
return &pid[idx % SFW_ID_PER_PAGE];
}
{
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
- brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + CFS_PAGE_SIZE - 1) / CFS_PAGE_SIZE;
- brq->blk_flags = param->blk_flags;
+ brq->blk_opc = param->blk_opc;
+ brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
+ PAGE_CACHE_SIZE;
+ brq->blk_flags = param->blk_flags;
- return 0;
+ return 0;
}
int
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
- npg * CFS_PAGE_SIZE :
+ npg * PAGE_CACHE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
LASSERT(nob > 0);
len = (feats & LST_FEAT_BULK_LEN) == 0 ?
- CFS_PAGE_SIZE : min_t(int, nob, CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;
bulk->bk_iovs[i].kiov_len = len;
bulk->bk_iovs[i].kiov_page =
- cfs_alloc_page(CFS_ALLOC_STD);
+ alloc_page(GFP_IOFS);
if (bulk->bk_iovs[i].kiov_page == NULL) {
lstcon_rpc_put(*crpc);
case LST_TRANS_SESQRY:
rep = &msg->msg_body.dbg_reply;
- if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+ if (copy_to_user(&ent_up->rpe_priv[0],
&rep->dbg_timeout, sizeof(int)) ||
- cfs_copy_to_user(&ent_up->rpe_payload[0],
+ copy_to_user(&ent_up->rpe_payload[0],
&rep->dbg_name, LST_NAME_SIZE))
return -EFAULT;
}
for (i = 0 ; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
}
for (i = 0; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
goto error;
}
cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
lstcon_group_t, grp_link) {
if (index-- == 0) {
- return cfs_copy_to_user(name_up, grp->grp_name, len) ?
+ return copy_to_user(name_up, grp->grp_name, len) ?
-EFAULT : 0;
}
}
break;
nd = ndl->ndl_node;
- if (cfs_copy_to_user(&dents_up[count].nde_id,
+ if (copy_to_user(&dents_up[count].nde_id,
&nd->nd_id, sizeof(nd->nd_id)) ||
- cfs_copy_to_user(&dents_up[count].nde_state,
+ copy_to_user(&dents_up[count].nde_state,
&n