Replace memory relevant wrappers with kernel API.
Affected primitives:
CFS_PAGE_SIZE, CFS_PAGE_SHIFT, cfs_num_physpages,
cfs_copy_from_user, cfs_copy_to_user, cfs_page_address,
cfs_kmap/cfs_kunmap, cfs_get_page, cfs_page_count,
cfs_page_index, cfs_page_pin, cfs_page_unpin,
cfs_memory_pressure_get/set/clr, CFS_NUM_CACHEPAGES,
CFS_ALLOC_XXX flags, cfs_alloc/free, cfs_alloc/free_large,
cfs_alloc/free_page, CFS_DECL_MMSPACE, CFS_MMSPACE_OPEN,
CFS_MMSPACE_CLOSE, CFS_SLAB_XXX flags, cfs_shrinker_t,
cfs_set/remove_shrinker, CFS_DEFAULT_SEEKS, cfs_mem_cache_t,
cfs_mem_cache_alloc/free/create/destroy, cfs_mem_is_in_cache
manual changes:
1. cfs_alloc_flags_to_gfp() is removed
2. remove kmalloc/kfree etc. from linux-mem.c and linux-mem.h
3. remove page_address/kmap/kunmap etc. from linux-mem.h
4. remove page_cache_get/page_cache_release from echo_internal.h. They
are defined already in user-mem.h
5. change kmem_cache_create/destroy prototype to kernel's and modify
all callers to match them
6. define _SPL_KMEM_H and related macros to avoid using spl's
sys/kmem.h that redefines slab allocator
7. change kmem_virt to is_vmalloc_addr as provided by kernel, so that
we don't use any spl's sys/kmem.h functions
8. clean up include files a little bit in osd-zfs
9. various coding style cleanup
NUMA allocators(cfs_cpt_xxx) are not changed in this patch.
gnilnd is not converted, as requested by James Simmons.
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Change-Id: Iadfbb0d5a0e31c78dd6c811e5ffdb468fa7e6f44
Reviewed-on: http://review.whamcloud.com/2831
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
################################################################################
# memory operations
-
-#s/\bcfs_page_t\b/struct page/g
-#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
-#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
-#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
-#s/\bcfs_num_physpages\b/num_physpages/g
-#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
-#s/\bcfs_copy_from_user\b/copy_from_user/g
-#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_copy_to_user\b/copy_to_user/g
-#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_page_address\b/page_address/g
-#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
-#s/\bcfs_kmap\b/kmap/g
-#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
-#s/\bcfs_kunmap\b/kunmap/g
-#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
-#s/\bcfs_get_page\b/get_page/g
-#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
-#s/\bcfs_page_count\b/page_count/g
-#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
-#s/\bcfs_page_index\b/page_index/g
-#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
-#s/\bcfs_page_pin\b/page_cache_get/g
-#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
-#s/\bcfs_page_unpin\b/page_cache_release/g
-#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
-#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
-#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
-#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
-#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
-# memory allocator
-#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
-#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
-#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
-#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
-#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
-#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
-#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
-#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
-#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
-#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
-#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
-#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
-#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
-#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
-#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
-#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
-#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
-#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
-#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
-#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
-#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
-#s/\bcfs_alloc\b/kmalloc/g
-#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
-#s/\bcfs_free\b/kfree/g
-#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
-#s/\bcfs_alloc_large\b/vmalloc/g
-#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
-#s/\bcfs_free_large\b/vfree/g
-#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
-#s/\bcfs_alloc_page\b/alloc_page/g
-#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
-#s/\bcfs_free_page\b/__free_page/g
-#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+s/\bcfs_page_t\b/struct page/g
+/typedef[ \t]*\bstruct page\b[ \t]*\bstruct page\b/d
+s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+s/\bcfs_num_physpages\b/num_physpages/g
+/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+s/\bcfs_copy_from_user\b/copy_from_user/g
+/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_copy_to_user\b/copy_to_user/g
+/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_page_address\b/page_address/g
+/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+s/\bcfs_kmap\b/kmap/g
+/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+s/\bcfs_kunmap\b/kunmap/g
+/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+s/\bcfs_get_page\b/get_page/g
+/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+s/\bcfs_page_count\b/page_count/g
+/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+s/\bcfs_page_index\b/page_index/g
+/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+s/\bcfs_page_pin\b/page_cache_get/g
+/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+s/\bcfs_page_unpin\b/page_cache_release/g
+/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+ # memory allocator
+s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+s/\bCFS_ALLOC_USER\b/GFP_USER/g
+/#[ \t]*define[ \t]*\bGFP_USER\b[ \t]*\bGFP_USER\b/d
+s/\bCFS_ALLOC_KERNEL\b/GFP_KERNEL/g
+/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+s/\bCFS_ALLOC_NOFS\b/GFP_NOFS/g
+/#[ \t]*define[ \t]*\bGFP_NOFS\b[ \t]*\bGFP_NOFS\b/d
+s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+s/\bcfs_alloc\b/kmalloc/g
+/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+s/\bcfs_free\b/kfree/g
+/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+s/\bcfs_alloc_large\b/vmalloc/g
+/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+s/\bcfs_free_large\b/vfree/g
+/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+s/\bcfs_alloc_page\b/alloc_page/g
+/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+s/\bcfs_free_page\b/__free_page/g
+/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
# TODO: SLAB allocator
-#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
-#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
-#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
-#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
-#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
-#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
-#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
-#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
-#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
-#s/\bcfs_shrinker\b/shrinker/g
-#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
-#s/\bcfs_shrinker_t\b/struct shrinkert/g
-#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
-#s/\bcfs_set_shrinker\b/set_shrinker/g
-#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
-#s/\bcfs_remove_shrinker\b/remove_shrinker/g
-#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
-#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
-#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+s/\bcfs_shrinker\b/shrinker/g
+/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+s/\bcfs_shrinker_t\b/shrinker_t/g
+/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+s/\bcfs_set_shrinker\b/set_shrinker/g
+/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+s/\bcfs_remove_shrinker\b/remove_shrinker/g
+/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/cfs_mem_cache_t/struct kmem_cache/g
+s/cfs_mem_cache_create/kmem_cache_create/g
+s/\w+[ =]*cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_alloc/kmem_cache_alloc/g
+s/cfs_mem_cache_free/kmem_cache_free/g
+s/cfs_mem_is_in_cache/kmem_is_in_cache/g
/* Variable sized pages are not supported */
#ifdef PAGE_SHIFT
-#define CFS_PAGE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
#else
-#define CFS_PAGE_SHIFT 12
+#define PAGE_CACHE_SHIFT 12
#endif
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
+#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE - 1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE - 1))
enum {
XNU_PAGE_RAW,
* - "xll" pages (XNU_PAGE_XLL): these are used by file system to cache
* file data, owned by file system objects, hashed, lrued, etc.
*
- * cfs_page_t has to cover both of them, because core Lustre code is based on
+ * struct page has to cover both of them, because core Lustre code is based on
* the Linux assumption that page is _both_ memory buffer and file system
* caching entity.
*
* To achieve this, all types of pages supported on XNU has to start from
- * common header that contains only "page type". Common cfs_page_t operations
+ * common header that contains only "page type". Common struct page operations
* dispatch through operation vector based on page type.
*
*/
typedef struct xnu_page {
int type;
-} cfs_page_t;
+} struct page;
struct xnu_page_ops {
- void *(*page_map) (cfs_page_t *);
- void (*page_unmap) (cfs_page_t *);
- void *(*page_address) (cfs_page_t *);
+ void *(*page_map) (struct page *);
+ void (*page_unmap) (struct page *);
+ void *(*page_address) (struct page *);
};
void xnu_page_ops_register(int type, struct xnu_page_ops *ops);
/*
* Public interface to lustre
*
- * - cfs_alloc_page(f)
- * - cfs_free_page(p)
- * - cfs_kmap(p)
- * - cfs_kunmap(p)
- * - cfs_page_address(p)
+ * - alloc_page(f)
+ * - __free_page(p)
+ * - kmap(p)
+ * - kunmap(p)
+ * - page_address(p)
*/
/*
- * Of all functions above only cfs_kmap(), cfs_kunmap(), and
- * cfs_page_address() can be called on file system pages. The rest is for raw
+ * Of all functions above only kmap(), kunmap(), and
+ * page_address() can be called on file system pages. The rest is for raw
* pages only.
*/
-cfs_page_t *cfs_alloc_page(u_int32_t flags);
-void cfs_free_page(cfs_page_t *page);
-void cfs_get_page(cfs_page_t *page);
-int cfs_put_page_testzero(cfs_page_t *page);
-int cfs_page_count(cfs_page_t *page);
-#define cfs_page_index(pg) (0)
+struct page *alloc_page(u_int32_t flags);
+void __free_page(struct page *page);
+void get_page(struct page *page);
+int cfs_put_page_testzero(struct page *page);
+int page_count(struct page *page);
+#define page_index(pg) (0)
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
/*
* Memory allocator
*/
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-void cfs_free(void *addr);
+void *kmalloc(size_t nr_bytes, u_int32_t flags);
+void kfree(void *addr);
-void *cfs_alloc_large(size_t nr_bytes);
-void cfs_free_large(void *addr);
+void *vmalloc(size_t nr_bytes);
+void vfree(void *addr);
extern int get_preemption_level(void);
-#define CFS_ALLOC_ATOMIC_TRY \
- (get_preemption_level() != 0 ? CFS_ALLOC_ATOMIC : 0)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
+
+#define ALLOC_ATOMIC_TRY \
+ (get_preemption_level() != 0 ? GFP_ATOMIC : 0)
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
/*
* Slab:
#define MC_NAME_MAX_LEN 64
-typedef struct cfs_mem_cache {
+struct kmem_cache {
int mc_size;
mem_cache_t mc_cache;
struct list_head mc_link;
char mc_name [MC_NAME_MAX_LEN];
-} cfs_mem_cache_t;
+};
#define KMEM_CACHE_MAX_COUNT 64
#define KMEM_MAX_ZONE 8192
-cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *);
+void *kmem_cache_alloc(struct kmem_cache *, int);
+void kmem_cache_free(struct kmem_cache *, void *);
/*
* Misc
*/
/* XXX Liang: num_physpages... fix me */
#define num_physpages (64 * 1024)
-#define CFS_NUM_CACHEPAGES num_physpages
+#define NUM_CACHEPAGES num_physpages
-#define CFS_DECL_MMSPACE
-#define CFS_MMSPACE_OPEN do {} while(0)
-#define CFS_MMSPACE_CLOSE do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN do {} while (0)
+#define MMSPACE_CLOSE do {} while (0)
#define copy_from_user(kaddr, uaddr, size) copyin(CAST_USER_ADDR_T(uaddr), (caddr_t)kaddr, size)
#define copy_to_user(uaddr, kaddr, size) copyout((caddr_t)kaddr, CAST_USER_ADDR_T(uaddr), size)
};
/*
- * Universal memory allocator API
- */
-enum cfs_alloc_flags {
- /* allocation is not allowed to block */
- CFS_ALLOC_ATOMIC = 0x1,
- /* allocation is allowed to block */
- CFS_ALLOC_WAIT = 0x2,
- /* allocation should return zeroed memory */
- CFS_ALLOC_ZERO = 0x4,
- /* allocation is allowed to call file-system code to free/clean
- * memory */
- CFS_ALLOC_FS = 0x8,
- /* allocation is allowed to do io to free/clean memory */
- CFS_ALLOC_IO = 0x10,
- /* don't report allocation failure to the console */
- CFS_ALLOC_NOWARN = 0x20,
- /* standard allocator flag combination */
- CFS_ALLOC_STD = CFS_ALLOC_FS | CFS_ALLOC_IO,
- CFS_ALLOC_USER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO,
- CFS_ALLOC_NOFS = CFS_ALLOC_WAIT | CFS_ALLOC_IO,
- CFS_ALLOC_KERNEL = CFS_ALLOC_WAIT | CFS_ALLOC_IO | CFS_ALLOC_FS,
-};
-
-/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
-enum cfs_alloc_page_flags {
- /* allow to return page beyond KVM. It has to be mapped into KVM by
- * cfs_kmap() and unmapped with cfs_kunmap(). */
- CFS_ALLOC_HIGHMEM = 0x40,
- CFS_ALLOC_HIGHUSER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO |
- CFS_ALLOC_HIGHMEM,
-};
-
-/*
* Drop into debugger, if possible. Implementation is provided by platform.
*/
* @retval 0 for success.
*/
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len);
/** Update digest by part of data.
/*
* Memory
*/
-#ifndef cfs_memory_pressure_get
-#define cfs_memory_pressure_get() (0)
-#endif
-#ifndef cfs_memory_pressure_set
-#define cfs_memory_pressure_set() do {} while (0)
-#endif
-#ifndef cfs_memory_pressure_clr
-#define cfs_memory_pressure_clr() do {} while (0)
-#endif
-
static inline int cfs_memory_pressure_get_and_set(void)
{
- int old = cfs_memory_pressure_get();
+ int old = memory_pressure_get();
- if (!old)
- cfs_memory_pressure_set();
- return old;
+ if (!old)
+ memory_pressure_set();
+ return old;
}
static inline void cfs_memory_pressure_restore(int old)
{
- if (old)
- cfs_memory_pressure_set();
- else
- cfs_memory_pressure_clr();
- return;
+ if (old)
+ memory_pressure_set();
+ else
+ memory_pressure_clr();
+ return;
}
#endif
#endif /* LIBCFS_DEBUG */
#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << CFS_PAGE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \
do { \
LASSERT(!cfs_in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & CFS_ALLOC_ATOMIC)) != 0); \
+ ((mask) & GFP_ATOMIC)) != 0); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
do { \
LIBCFS_ALLOC_PRE((size), (mask)); \
(ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
- cfs_alloc((size), (mask)) : cfs_alloc_large(size); \
+ kmalloc((size), (mask)) : vmalloc(size); \
LIBCFS_ALLOC_POST((ptr), (size)); \
} while (0)
* default allocator
*/
#define LIBCFS_ALLOC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_IO)
+ LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO)
/**
* non-sleeping allocator
*/
#define LIBCFS_ALLOC_ATOMIC(ptr, size) \
- LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_ATOMIC)
+ LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
/**
* allocate memory for specified CPU partition
/** default numa allocator */
#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
- LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO)
+ LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
-#define LIBCFS_FREE(ptr, size) \
-do { \
- int s = (size); \
- if (unlikely((ptr) == NULL)) { \
- CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
- "%s:%d\n", s, __FILE__, __LINE__); \
- break; \
- } \
- libcfs_kmem_dec((ptr), s); \
- CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
+#define LIBCFS_FREE(ptr, size) \
+do { \
+ int s = (size); \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
+ "%s:%d\n", s, __FILE__, __LINE__); \
+ break; \
+ } \
+ libcfs_kmem_dec((ptr), s); \
+ CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
s, (ptr), libcfs_kmem_read()); \
- if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
- cfs_free_large(ptr); \
- else \
- cfs_free(ptr); \
+ if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
+ vfree(ptr); \
+ else \
+ kfree(ptr); \
} while (0)
/******************************************************************************/
struct libcfs_device_userstate
{
- int ldu_memhog_pages;
- cfs_page_t *ldu_memhog_root_page;
+ int ldu_memhog_pages;
+ struct page *ldu_memhog_root_page;
};
/* what used to be in portals_lib.h */
int *oldmask, int minmask, int allmask);
/* Allocate space for and copy an existing string.
- * Must free with cfs_free().
+ * Must free with kfree().
*/
char *cfs_strdup(const char *str, u_int32_t flags);
#define LWTSTR(n) #n
#define LWTWHERE(f,l) f ":" LWTSTR(l)
-#define LWT_EVENTS_PER_PAGE (CFS_PAGE_SIZE / sizeof (lwt_event_t))
+#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lwt_event_t))
#define LWT_EVENT(p1, p2, p3, p4) \
do { \
# include <linux/mm_inline.h>
#endif
-typedef struct page cfs_page_t;
-#define CFS_PAGE_SIZE PAGE_CACHE_SIZE
-#define CFS_PAGE_SHIFT PAGE_CACHE_SHIFT
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
-#define cfs_num_physpages num_physpages
+#define page_index(p) ((p)->index)
-#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define cfs_copy_to_user(to, from, n) copy_to_user(to, from, n)
-static inline void *cfs_page_address(cfs_page_t *page)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return page_address(page);
-}
-
-static inline void *cfs_kmap(cfs_page_t *page)
-{
- return kmap(page);
-}
-
-static inline void cfs_kunmap(cfs_page_t *page)
-{
- kunmap(page);
-}
-
-static inline void cfs_get_page(cfs_page_t *page)
-{
- get_page(page);
-}
-
-static inline int cfs_page_count(cfs_page_t *page)
-{
- return page_count(page);
-}
-
-#define cfs_page_index(p) ((p)->index)
-
-#define cfs_page_pin(page) page_cache_get(page)
-#define cfs_page_unpin(page) page_cache_release(page)
-
-/*
- * Memory allocator
- * XXX Liang: move these declare to public file
- */
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void cfs_free(void *addr);
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void cfs_free_large(void *addr);
-
-extern cfs_page_t *cfs_alloc_page(unsigned int flags);
-extern void cfs_free_page(cfs_page_t *page);
-
-#define cfs_memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define cfs_memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define cfs_memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
+#define memory_pressure_get() (current->flags & PF_MEMALLOC)
+#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
+#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
-#define CFS_NUM_CACHEPAGES \
- min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4)
+#define NUM_CACHEPAGES \
+ min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
#else
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define NUM_CACHEPAGES num_physpages
#endif
/*
* In Linux there is no way to determine whether current execution context is
* blockable.
*/
-#define CFS_ALLOC_ATOMIC_TRY CFS_ALLOC_ATOMIC
+#define ALLOC_ATOMIC_TRY GFP_ATOMIC
+/* GFP_IOFS was added in 2.6.33 kernel */
+#ifndef GFP_IOFS
+#define GFP_IOFS (__GFP_IO | __GFP_FS)
+#endif
-/*
- * SLAB allocator
- * XXX Liang: move these declare to public file
- */
-typedef struct kmem_cache cfs_mem_cache_t;
-extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
-extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
-
-#define CFS_DECL_MMSPACE mm_segment_t __oldfs
-#define CFS_MMSPACE_OPEN \
+#define DECL_MMSPACE mm_segment_t __oldfs
+#define MMSPACE_OPEN \
do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
-#define CFS_MMSPACE_CLOSE set_fs(__oldfs)
+#define MMSPACE_CLOSE set_fs(__oldfs)
-#define CFS_SLAB_HWCACHE_ALIGN SLAB_HWCACHE_ALIGN
-#define CFS_SLAB_KERNEL SLAB_KERNEL
-#define CFS_SLAB_NOFS SLAB_NOFS
-/*
- * NUMA allocators
- *
- * NB: we will rename these functions in a separate patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes, unsigned int flags);
extern void *cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes);
-extern cfs_page_t *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
+extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
int cpt, unsigned int flags);
-extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep,
+extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
struct cfs_cpt_table *cptab,
int cpt, unsigned int flags);
/*
* Shrinker
*/
-#define cfs_shrinker shrinker
#ifdef HAVE_SHRINK_CONTROL
# define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \
#endif
#ifdef HAVE_REGISTER_SHRINKER
-typedef int (*cfs_shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
+typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
static inline
-struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
+struct shrinker *set_shrinker(int seek, shrinker_t func)
{
struct shrinker *s;
}
static inline
-void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+void remove_shrinker(struct shrinker *shrinker)
{
if (shrinker == NULL)
return;
unregister_shrinker(shrinker);
kfree(shrinker);
}
-#else
-typedef shrinker_t cfs_shrinker_t;
-#define cfs_set_shrinker(s, f) set_shrinker(s, f)
-#define cfs_remove_shrinker(s) remove_shrinker(s)
#endif
-#define CFS_DEFAULT_SEEKS DEFAULT_SEEKS
#endif /* __LINUX_CFS_MEM_H__ */
*
***************************************************************************/
-struct cfs_shrinker {
+struct shrinker {
;
};
-#define CFS_DEFAULT_SEEKS (0)
+#define DEFAULT_SEEKS (0)
-typedef int (*cfs_shrinker_t)(int, unsigned int);
+typedef int (*shrinker_t)(int, unsigned int);
static inline
-struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink)
+struct shrinker *set_shrinker(int seeks, shrinker_t shrink)
{
- return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here
+ return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */
}
-static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+static inline void remove_shrinker(struct shrinker *shrinker)
{
}
*/
#define LIBLUSTRE_HANDLE_UNALIGNED_PAGE
-typedef struct page {
+struct page {
void *addr;
unsigned long index;
cfs_list_t list;
int _managed;
#endif
cfs_list_t _node;
-} cfs_page_t;
+};
/* 4K */
-#define CFS_PAGE_SHIFT 12
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
-
-cfs_page_t *cfs_alloc_page(unsigned int flags);
-void cfs_free_page(cfs_page_t *pg);
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
-
-#define cfs_get_page(p) __I_should_not_be_called__(at_all)
-#define cfs_page_count(p) __I_should_not_be_called__(at_all)
-#define cfs_page_index(p) ((p)->index)
-#define cfs_page_pin(page) do {} while (0)
-#define cfs_page_unpin(page) do {} while (0)
+#define PAGE_CACHE_SHIFT 12
+#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+
+struct page *alloc_page(unsigned int flags);
+void __free_page(struct page *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
+
+#define get_page(p) __I_should_not_be_called__(at_all)
+#define page_count(p) __I_should_not_be_called__(at_all)
+#define page_index(p) ((p)->index)
+#define page_cache_get(page) do { } while (0)
+#define page_cache_release(page) do { } while (0)
/*
* Memory allocator
* Inline function, so utils can use them without linking of libcfs
*/
-#define __ALLOC_ZERO (1 << 2)
-static inline void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
+
+static inline void *kmalloc(size_t nr_bytes, u_int32_t flags)
{
- void *result;
+ void *result;
- result = malloc(nr_bytes);
- if (result != NULL && (flags & __ALLOC_ZERO))
- memset(result, 0, nr_bytes);
- return result;
+ result = malloc(nr_bytes);
+ if (result != NULL && (flags & __GFP_ZERO))
+ memset(result, 0, nr_bytes);
+ return result;
}
-#define cfs_free(addr) free(addr)
-#define cfs_alloc_large(nr_bytes) cfs_alloc(nr_bytes, 0)
-#define cfs_free_large(addr) cfs_free(addr)
+#define kfree(addr) free(addr)
+#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0)
+#define vfree(addr) free(addr)
-#define CFS_ALLOC_ATOMIC_TRY (0)
+#define ALLOC_ATOMIC_TRY (0)
/*
* SLAB allocator
*/
-typedef struct {
+struct kmem_cache {
int size;
-} cfs_mem_cache_t;
+};
-#define CFS_SLAB_HWCACHE_ALIGN 0
+#define SLAB_HWCACHE_ALIGN 0
#define SLAB_DESTROY_BY_RCU 0
-#define CFS_SLAB_KERNEL 0
-#define CFS_SLAB_NOFS 0
+#define SLAB_KERNEL 0
+#define SLAB_NOFS 0
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c);
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp);
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *c);
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp);
+void kmem_cache_free(struct kmem_cache *c, void *addr);
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem);
/*
* NUMA allocators
*/
#define cfs_cpt_malloc(cptab, cpt, bytes, flags) \
- cfs_alloc(bytes, flags)
+ kmalloc(bytes, flags)
#define cfs_cpt_vmalloc(cptab, cpt, bytes) \
- cfs_alloc(bytes)
+ kmalloc(bytes)
#define cfs_page_cpt_alloc(cptab, cpt, mask) \
- cfs_alloc_page(mask)
+ alloc_page(mask)
#define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \
- cfs_mem_cache_alloc(cache, gfp)
+ kmem_cache_alloc(cache, gfp)
#define smp_rmb() do {} while (0)
/*
* Copy to/from user
*/
-static inline int cfs_copy_from_user(void *a,void *b, int c)
+static inline int copy_from_user(void *a, void *b, int c)
{
- memcpy(a,b,c);
- return 0;
+ memcpy(a, b, c);
+ return 0;
}
-static inline int cfs_copy_to_user(void *a,void *b, int c)
+static inline int copy_to_user(void *a, void *b, int c)
{
- memcpy(a,b,c);
- return 0;
+ memcpy(a,b,c);
+ return 0;
}
#endif
((unsigned char *)&addr)[1], \
((unsigned char *)&addr)[0]
-static int cfs_copy_from_user(void *to, void *from, int c)
+static int copy_from_user(void *to, void *from, int c)
{
- memcpy(to, from, c);
- return 0;
+ memcpy(to, from, c);
+ return 0;
}
-static int cfs_copy_to_user(void *to, const void *from, int c)
+static int copy_to_user(void *to, const void *from, int c)
{
- memcpy(to, from, c);
- return 0;
+ memcpy(to, from, c);
+ return 0;
}
static unsigned long
0 \
)
-#define cfs_num_physpages (64 * 1024)
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define num_physpages (64 * 1024)
+#define NUM_CACHEPAGES num_physpages
#else
#ifdef __KERNEL__
-typedef struct cfs_mem_cache cfs_mem_cache_t;
-
/*
* page definitions
*/
-#define CFS_PAGE_SIZE PAGE_SIZE
-#define CFS_PAGE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SIZE PAGE_SIZE
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
#define CFS_PAGE_MASK (~(PAGE_SIZE - 1))
-typedef struct cfs_page {
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
+
+struct page {
void * addr;
cfs_atomic_t count;
void * private;
void * mapping;
__u32 index;
__u32 flags;
-} cfs_page_t;
+};
#define page cfs_page
#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
&(page)->flags)
-#define __GFP_FS (1)
-#define GFP_KERNEL (2)
-#define GFP_ATOMIC (4)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+ /* allocation is not allowed to block */
+ GFP_ATOMIC = 0x1,
+ /* allocation is allowed to block */
+ __GFP_WAIT = 0x2,
+ /* allocation should return zeroed memory */
+ __GFP_ZERO = 0x4,
+ /* allocation is allowed to call file-system code to free/clean
+ * memory */
+ __GFP_FS = 0x8,
+ /* allocation is allowed to do io to free/clean memory */
+ __GFP_IO = 0x10,
+ /* don't report allocation failure to the console */
+ __GFP_NOWARN = 0x20,
+ /* standard allocator flag combination */
+ GFP_IOFS = __GFP_FS | __GFP_IO,
+ GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO,
+ GFP_NOFS = __GFP_WAIT | __GFP_IO,
+ GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+ /* allow to return page beyond KVM. It has to be mapped into KVM by
+ * kmap() and unmapped with kunmap(). */
+ __GFP_HIGHMEM = 0x40,
+ GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+ __GFP_HIGHMEM,
+};
-cfs_page_t *cfs_alloc_page(int flags);
-void cfs_free_page(cfs_page_t *pg);
-void cfs_release_page(cfs_page_t *pg);
-cfs_page_t * virt_to_page(void * addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct page *alloc_page(int flags);
+void __free_page(struct page *pg);
+void cfs_release_page(struct page *pg);
+struct page *virt_to_page(void *addr);
#define page_cache_get(a) do {} while (0)
#define page_cache_release(a) do {} while (0)
-static inline void *cfs_page_address(cfs_page_t *page)
+static inline void *page_address(struct page *page)
{
return page->addr;
}
-static inline void *cfs_kmap(cfs_page_t *page)
+static inline void *kmap(struct page *page)
{
return page->addr;
}
-static inline void cfs_kunmap(cfs_page_t *page)
+static inline void kunmap(struct page *page)
{
return;
}
-static inline void cfs_get_page(cfs_page_t *page)
+static inline void get_page(struct page *page)
{
cfs_atomic_inc(&page->count);
}
-static inline void cfs_put_page(cfs_page_t *page)
+static inline void cfs_put_page(struct page *page)
{
cfs_atomic_dec(&page->count);
}
-static inline int cfs_page_count(cfs_page_t *page)
+static inline int page_count(struct page *page)
{
return cfs_atomic_read(&page->count);
}
-#define cfs_page_index(p) ((p)->index)
+#define page_index(p) ((p)->index)
/*
* Memory allocator
*/
-#define CFS_ALLOC_ATOMIC_TRY (0)
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void cfs_free(void *addr);
-
-#define kmalloc cfs_alloc
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void cfs_free_large(void *addr);
+#define ALLOC_ATOMIC_TRY (0)
+extern void *kmalloc(size_t nr_bytes, u_int32_t flags);
+extern void kfree(void *addr);
+extern void *vmalloc(size_t nr_bytes);
+extern void vfree(void *addr);
/*
* SLAB allocator
*/
-#define CFS_SLAB_HWCACHE_ALIGN 0
+#define SLAB_HWCACHE_ALIGN 0
/* The cache name is limited to 20 chars */
-struct cfs_mem_cache {
+struct kmem_cache {
char name[20];
ulong_ptr_t flags;
NPAGED_LOOKASIDE_LIST npll;
};
-extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t,
- unsigned long);
-extern int cfs_mem_cache_destroy (cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long, void *);
+extern kmem_cache_destroy(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, int);
+extern void kmem_cache_free(struct kmem_cache *, void *);
/*
* shrinker
*/
typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
-struct cfs_shrinker {
+struct shrinker {
shrink_callback cb;
int seeks; /* seeks to recreate an obj */
long nr; /* objs pending delete */
};
-struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb);
-void cfs_remove_shrinker(struct cfs_shrinker *s);
+struct shrinker *set_shrinker(int seeks, shrink_callback cb);
+void remove_shrinker(struct shrinker *s);
int start_shrinker_timer();
void stop_shrinker_timer();
* Page allocator slabs
*/
-extern cfs_mem_cache_t *cfs_page_t_slab;
-extern cfs_mem_cache_t *cfs_page_p_slab;
+extern struct kmem_cache *cfs_page_t_slab;
+extern struct kmem_cache *cfs_page_p_slab;
-#define CFS_DECL_MMSPACE
-#define CFS_MMSPACE_OPEN do {} while(0)
-#define CFS_MMSPACE_CLOSE do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN do {} while (0)
+#define MMSPACE_CLOSE do {} while (0)
#define cfs_mb() do {} while(0)
* MM defintions from (linux/mm.h)
*/
-#define CFS_DEFAULT_SEEKS 2 /* shrink seek */
+#define DEFAULT_SEEKS 2 /* shrink seek */
#else /* !__KERNEL__ */
{
cfs_group_info_t * groupinfo;
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
- groupinfo =
- (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0);
+ groupinfo = kmalloc(sizeof(cfs_group_info_t), 0);
if (groupinfo) {
memset(groupinfo, 0, sizeof(cfs_group_info_t));
}
return groupinfo;
}
+
static __inline void cfs_groups_free(cfs_group_info_t *group_info)
{
- KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
- __FUNCTION__));
- cfs_free(group_info);
+ KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+ __FUNCTION__));
+ kfree(group_info);
}
+
static __inline int
cfs_set_current_groups(cfs_group_info_t *group_info)
{
__FUNCTION__));
return 0;
}
+
static __inline int groups_search(cfs_group_info_t *group_info,
gid_t grp) {
KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
spinlock_t Lock; /* Protection lock */
- cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ struct kmem_cache *slab; /* Memory slab for task slot */
ULONG NumOfTasks; /* Total tasks (threads) */
LIST_ENTRY TaskList; /* List of task slots */
int ksnd_ntconns; /* number of tconns in list */
cfs_list_t ksnd_tconns; /* tdi connections list */
- cfs_mem_cache_t *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
+ struct kmem_cache *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
event_t ksnd_tconn_exit; /* event signal by last tconn */
spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
int ksnd_ntsdus; /* number of tsdu buffers allocated */
ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
- cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
+ struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */
cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */
static struct cfs_zone_nob cfs_zone_nob;
static spinlock_t cfs_zone_guard;
-cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
+struct kmem_cache *mem_cache_find(const char *name, size_t objsize)
{
- cfs_mem_cache_t *walker = NULL;
+ struct kmem_cache *walker = NULL;
LASSERT(cfs_zone_nob.z_nob != NULL);
* survives kext unloading, so that @name cannot be just static string
* embedded into kext image.
*/
-cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t objsize, const char *name)
{
- cfs_mem_cache_t *mc = NULL;
+ struct kmem_cache *mc = NULL;
char *cname;
- MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+ MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
if (mc == NULL){
CERROR("cfs_mem_cache created fail!\n");
return NULL;
return mc;
}
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
{
/*
* zone can NOT be destroyed after creating,
#else /* !CFS_INDIVIDUAL_ZONE */
-cfs_mem_cache_t *
+struct kmem_cache *
mem_cache_find(const char *name, size_t objsize)
{
return NULL;
}
-cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t size, const char *name)
{
- cfs_mem_cache_t *mc = NULL;
+ struct kmem_cache *mc = NULL;
- MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+ MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
if (mc == NULL){
CERROR("cfs_mem_cache created fail!\n");
return NULL;
return mc;
}
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
{
OSMalloc_Tagfree(mc->mc_cache);
FREE(mc, M_TEMP);
#endif /* !CFS_INDIVIDUAL_ZONE */
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name,
- size_t objsize, size_t off, unsigned long arg1)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+ unsigned long arg1, void *ctro)
{
- cfs_mem_cache_t *mc;
+ struct kmem_cache *mc;
- mc = mem_cache_find(name, objsize);
- if (mc)
- return mc;
- mc = mem_cache_create(objsize, name);
+ mc = mem_cache_find(name, objsize);
+ if (mc)
+ return mc;
+ mc = mem_cache_create(objsize, name);
return mc;
}
-int cfs_mem_cache_destroy (cfs_mem_cache_t *cachep)
+kmem_cache_destroy (struct kmem_cache *cachep)
{
- mem_cache_destroy(cachep);
- return 0;
+ mem_cache_destroy(cachep);
+ return 0;
}
-void *cfs_mem_cache_alloc (cfs_mem_cache_t *cachep, int flags)
+void *kmem_cache_alloc (struct kmem_cache *cachep, int flags)
{
- void *result;
+ void *result;
- /* zalloc_canblock() is not exported... Emulate it. */
- if (flags & CFS_ALLOC_ATOMIC) {
- result = (void *)mem_cache_alloc_nb(cachep);
- } else {
- LASSERT(get_preemption_level() == 0);
- result = (void *)mem_cache_alloc(cachep);
- }
- if (result != NULL && (flags & CFS_ALLOC_ZERO))
- memset(result, 0, cachep->mc_size);
+ /* zalloc_canblock() is not exported... Emulate it. */
+ if (flags & GFP_ATOMIC) {
+ result = (void *)mem_cache_alloc_nb(cachep);
+ } else {
+ LASSERT(get_preemption_level() == 0);
+ result = (void *)mem_cache_alloc(cachep);
+ }
+ if (result != NULL && (flags & __GFP_ZERO))
+ memset(result, 0, cachep->mc_size);
- return result;
+ return result;
}
-void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp)
+void kmem_cache_free (struct kmem_cache *cachep, void *objp)
{
- mem_cache_free(cachep, objp);
+ mem_cache_free(cachep, objp);
}
/* ---------------------------------------------------------------------------
* "Raw" pages
*/
-static unsigned int raw_pages = 0;
-static cfs_mem_cache_t *raw_page_cache = NULL;
+static unsigned int raw_pages;
+static struct kmem_cache *raw_page_cache;
static struct xnu_page_ops raw_page_ops;
static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = {
};
#if defined(LIBCFS_DEBUG)
-static int page_type_is_valid(cfs_page_t *page)
+static int page_type_is_valid(struct page *page)
{
- LASSERT(page != NULL);
- return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
+ LASSERT(page != NULL);
+ return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
}
-static int page_is_raw(cfs_page_t *page)
+static int page_is_raw(struct page *page)
{
- return page->type == XNU_PAGE_RAW;
+ return page->type == XNU_PAGE_RAW;
}
#endif
-static struct xnu_raw_page *as_raw(cfs_page_t *page)
+static struct xnu_raw_page *as_raw(struct page *page)
{
- LASSERT(page_is_raw(page));
- return list_entry(page, struct xnu_raw_page, header);
+ LASSERT(page_is_raw(page));
+ return list_entry(page, struct xnu_raw_page, header);
}
-static void *raw_page_address(cfs_page_t *pg)
+static void *raw_page_address(struct page *pg)
{
- return (void *)as_raw(pg)->virtual;
+ return (void *)as_raw(pg)->virtual;
}
-static void *raw_page_map(cfs_page_t *pg)
+static void *raw_page_map(struct page *pg)
{
- return (void *)as_raw(pg)->virtual;
+ return (void *)as_raw(pg)->virtual;
}
-static void raw_page_unmap(cfs_page_t *pg)
+static void raw_page_unmap(struct page *pg)
{
}
static void raw_page_finish(struct xnu_raw_page *pg)
{
- -- raw_pages;
- if (pg->virtual != NULL)
- cfs_mem_cache_free(raw_page_cache, pg->virtual);
- cfs_free(pg);
+ --raw_pages;
+ if (pg->virtual != NULL)
+ kmem_cache_free(raw_page_cache, pg->virtual);
+ kfree(pg);
}
void raw_page_death_row_clean(void)
/*
* kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
* block. (raw_page_done()->upl_abort() can block too) On the other
- * hand, cfs_free_page() may be called in non-blockable context. To
+ * hand, __free_page() may be called in non-blockable context. To
* work around this, park pages on global list when cannot block.
*/
if (get_preemption_level() > 0) {
}
}
-cfs_page_t *cfs_alloc_page(u_int32_t flags)
+struct page *alloc_page(u_int32_t flags)
{
- struct xnu_raw_page *page;
+ struct xnu_raw_page *page;
- /*
- * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
+ /*
+ * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
- page = cfs_alloc(sizeof *page, flags);
- if (page != NULL) {
- page->virtual = cfs_mem_cache_alloc(raw_page_cache, flags);
- if (page->virtual != NULL) {
- ++ raw_pages;
- page->header.type = XNU_PAGE_RAW;
- atomic_set(&page->count, 1);
- } else {
- cfs_free(page);
- page = NULL;
- }
- }
- return page != NULL ? &page->header : NULL;
+ page = kmalloc(sizeof *page, flags);
+ if (page != NULL) {
+ page->virtual = kmem_cache_alloc(raw_page_cache, flags);
+ if (page->virtual != NULL) {
+ ++raw_pages;
+ page->header.type = XNU_PAGE_RAW;
+ atomic_set(&page->count, 1);
+ } else {
+ kfree(page);
+ page = NULL;
+ }
+ }
+ return page != NULL ? &page->header : NULL;
}
-void cfs_free_page(cfs_page_t *pages)
+void __free_page(struct page *pages)
{
- free_raw_page(as_raw(pages));
+ free_raw_page(as_raw(pages));
}
-void cfs_get_page(cfs_page_t *p)
+void get_page(struct page *p)
{
- atomic_inc(&as_raw(p)->count);
+ atomic_inc(&as_raw(p)->count);
}
-int cfs_put_page_testzero(cfs_page_t *p)
+int cfs_put_page_testzero(struct page *p)
{
return atomic_dec_and_test(&as_raw(p)->count);
}
-int cfs_page_count(cfs_page_t *p)
+int page_count(struct page *p)
{
- return atomic_read(&as_raw(p)->count);
+ return atomic_read(&as_raw(p)->count);
}
/*
* Generic page operations
*/
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
{
- /*
- * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- LASSERT(page_type_is_valid(pg));
- return page_ops[pg->type]->page_address(pg);
+ /*
+ * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+ LASSERT(page_type_is_valid(pg));
+ return page_ops[pg->type]->page_address(pg);
}
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
{
- LASSERT(page_type_is_valid(pg));
- return page_ops[pg->type]->page_map(pg);
+ LASSERT(page_type_is_valid(pg));
+ return page_ops[pg->type]->page_map(pg);
}
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
{
- LASSERT(page_type_is_valid(pg));
- page_ops[pg->type]->page_unmap(pg);
+ LASSERT(page_type_is_valid(pg));
+ page_ops[pg->type]->page_unmap(pg);
}
void xnu_page_ops_register(int type, struct xnu_page_ops *ops)
#define get_preemption_level() (0)
#endif
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+void *kmalloc(size_t nr_bytes, u_int32_t flags)
{
- int mflags;
+ int mflags;
- mflags = 0;
- if (flags & CFS_ALLOC_ATOMIC) {
- mflags |= M_NOWAIT;
- } else {
- LASSERT(get_preemption_level() == 0);
- mflags |= M_WAITOK;
- }
+ mflags = 0;
+ if (flags & GFP_ATOMIC) {
+ mflags |= M_NOWAIT;
+ } else {
+ LASSERT(get_preemption_level() == 0);
+ mflags |= M_WAITOK;
+ }
- if (flags & CFS_ALLOC_ZERO)
- mflags |= M_ZERO;
+ if (flags & __GFP_ZERO)
+ mflags |= M_ZERO;
- return _MALLOC(nr_bytes, M_TEMP, mflags);
+ return _MALLOC(nr_bytes, M_TEMP, mflags);
}
-void cfs_free(void *addr)
+void kfree(void *addr)
{
- return _FREE(addr, M_TEMP);
+ return _FREE(addr, M_TEMP);
}
-void *cfs_alloc_large(size_t nr_bytes)
+void *vmalloc(size_t nr_bytes)
{
- LASSERT(get_preemption_level() == 0);
- return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
+ LASSERT(get_preemption_level() == 0);
+ return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
}
-void cfs_free_large(void *addr)
+void vfree(void *addr)
{
- LASSERT(get_preemption_level() == 0);
- return _FREE(addr, M_TEMP);
+ LASSERT(get_preemption_level() == 0);
+ return _FREE(addr, M_TEMP);
}
/*
#endif
CFS_INIT_LIST_HEAD(&page_death_row);
spin_lock_init(&page_death_row_phylax);
- raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+ raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE,
+ 0, 0, NULL);
return 0;
}
{
raw_page_death_row_clean();
spin_lock_done(&page_death_row_phylax);
- cfs_mem_cache_destroy(raw_page_cache);
+ kmem_cache_destroy(raw_page_cache);
#if CFS_INDIVIDUAL_ZONE
cfs_zone_nob.z_nob = NULL;
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
tcd = &trace_data[0].tcd;
CFS_INIT_LIST_HEAD(&pages);
if (get_preemption_level() == 0)
- nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
+ nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages);
else
nr_pages = 0;
spin_lock(&trace_cpu_serializer);
max = TCD_MAX_PAGES;
} else {
max = (max / cfs_num_possible_cpus());
- max = (max << (20 - CFS_PAGE_SHIFT));
+ max = (max << (20 - PAGE_CACHE_SHIFT));
}
rc = cfs_tracefile_init(max);
do { \
if ((h)->cbh_flags & CBH_FLAG_ATOMIC_GROW) \
LIBCFS_CPT_ALLOC_GFP((ptr), h->cbh_cptab, h->cbh_cptid, \
- CBH_NOB, CFS_ALLOC_ATOMIC); \
+ CBH_NOB, GFP_ATOMIC); \
else \
LIBCFS_CPT_ALLOC((ptr), h->cbh_cptab, h->cbh_cptid, \
CBH_NOB); \
return -EBADF;
/* freed in group_rem */
- reg = cfs_alloc(sizeof(*reg), 0);
+ reg = kmalloc(sizeof(*reg), 0);
if (reg == NULL)
return -ENOMEM;
reg->kr_uid, reg->kr_fp, group);
if (reg->kr_fp != NULL)
fput(reg->kr_fp);
- cfs_free(reg);
+ kfree(reg);
}
}
up_write(&kg_sem);
lenz = strlen(str) + 1;
- dup_str = cfs_alloc(lenz, flags);
+ dup_str = kmalloc(lenz, flags);
if (dup_str == NULL)
return NULL;
int err;
const struct cfs_crypto_hash_type *type;
- hdesc = cfs_alloc(sizeof(*hdesc), 0);
+ hdesc = kmalloc(sizeof(*hdesc), 0);
if (hdesc == NULL)
return ERR_PTR(-ENOMEM);
err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
if (err) {
- cfs_free(hdesc);
+ kfree(hdesc);
return ERR_PTR(err);
}
return (struct cfs_crypto_hash_desc *)hdesc;
EXPORT_SYMBOL(cfs_crypto_hash_init);
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len)
{
struct scatterlist sl;
if (hash_len == NULL) {
crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- cfs_free(hdesc);
+ kfree(hdesc);
return 0;
}
if (hash == NULL || *hash_len < size) {
return err;
}
crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- cfs_free(hdesc);
+ kfree(hdesc);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
* kmalloc size for 2.6.18 kernel is 128K */
unsigned int data_len = 1 * 128 * 1024;
- data = cfs_alloc(data_len, 0);
+ data = kmalloc(data_len, 0);
if (data == NULL) {
CERROR("Failed to allocate mem\n");
return -ENOMEM;
for (i = 0; i < CFS_HASH_ALG_MAX; i++)
cfs_crypto_performance_test(i, data, data_len);
- cfs_free(data);
+ kfree(data);
return 0;
}
{
struct mm_struct *mm;
char *buffer, *tmp_buf = NULL;
- int buf_len = CFS_PAGE_SIZE;
+ int buf_len = PAGE_CACHE_SIZE;
int key_len = strlen(key);
unsigned long addr;
int rc;
ENTRY;
- buffer = cfs_alloc(buf_len, CFS_ALLOC_USER);
+ buffer = kmalloc(buf_len, GFP_USER);
if (!buffer)
RETURN(-ENOMEM);
mm = get_task_mm(current);
if (!mm) {
- cfs_free(buffer);
+ kfree(buffer);
RETURN(-EINVAL);
}
out:
mmput(mm);
- cfs_free((void *)buffer);
+ kfree((void *)buffer);
if (tmp_buf)
- cfs_free((void *)tmp_buf);
+ kfree((void *)tmp_buf);
return rc;
}
EXPORT_SYMBOL(cfs_get_environ);
#include <linux/highmem.h>
#include <libcfs/libcfs.h>
-static unsigned int cfs_alloc_flags_to_gfp(u_int32_t flags)
-{
- unsigned int mflags = 0;
-
- if (flags & CFS_ALLOC_ATOMIC)
- mflags |= __GFP_HIGH;
- else
- mflags |= __GFP_WAIT;
- if (flags & CFS_ALLOC_NOWARN)
- mflags |= __GFP_NOWARN;
- if (flags & CFS_ALLOC_IO)
- mflags |= __GFP_IO;
- if (flags & CFS_ALLOC_FS)
- mflags |= __GFP_FS;
- if (flags & CFS_ALLOC_HIGHMEM)
- mflags |= __GFP_HIGHMEM;
- return mflags;
-}
-
-void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
-{
- void *ptr = NULL;
-
- ptr = kmalloc(nr_bytes, cfs_alloc_flags_to_gfp(flags));
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO))
- memset(ptr, 0, nr_bytes);
- return ptr;
-}
-
-void
-cfs_free(void *addr)
-{
- kfree(addr);
-}
-
-void *
-cfs_alloc_large(size_t nr_bytes)
-{
- return vmalloc(nr_bytes);
-}
-
-void
-cfs_free_large(void *addr)
-{
- vfree(addr);
-}
-
-cfs_page_t *cfs_alloc_page(unsigned int flags)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return alloc_page(cfs_alloc_flags_to_gfp(flags));
-}
-
-void cfs_free_page(cfs_page_t *page)
-{
- __free_page(page);
-}
-
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name, size_t size, size_t offset,
- unsigned long flags)
-{
-#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
- return kmem_cache_create(name, size, offset, flags, NULL, NULL);
-#else
- return kmem_cache_create(name, size, offset, flags, NULL);
-#endif
-}
-
-int
-cfs_mem_cache_destroy (cfs_mem_cache_t * cachep)
-{
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
- return kmem_cache_destroy(cachep);
-#else
- kmem_cache_destroy(cachep);
- return 0;
-#endif
-}
-
-void *
-cfs_mem_cache_alloc(cfs_mem_cache_t *cachep, int flags)
-{
- return kmem_cache_alloc(cachep, cfs_alloc_flags_to_gfp(flags));
-}
-
-void
-cfs_mem_cache_free(cfs_mem_cache_t *cachep, void *objp)
-{
- return kmem_cache_free(cachep, objp);
-}
-
-/**
- * Returns true if \a addr is an address of an allocated object in a slab \a
- * kmem. Used in assertions. This check is optimistically imprecise, i.e., it
- * occasionally returns true for the incorrect addresses, but if it returns
- * false, then the addresses is guaranteed to be incorrect.
- */
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
-{
-#ifdef CONFIG_SLAB
- struct page *page;
-
- /*
- * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other
- * allocators, like slub and slob.
- */
- page = virt_to_page(addr);
- if (unlikely(PageCompound(page)))
- page = (struct page *)page->private;
- return PageSlab(page) && ((void *)page->lru.next) == kmem;
-#else
- return 1;
-#endif
-}
-EXPORT_SYMBOL(cfs_mem_is_in_cache);
-
-
-EXPORT_SYMBOL(cfs_alloc);
-EXPORT_SYMBOL(cfs_free);
-EXPORT_SYMBOL(cfs_alloc_large);
-EXPORT_SYMBOL(cfs_free_large);
-EXPORT_SYMBOL(cfs_alloc_page);
-EXPORT_SYMBOL(cfs_free_page);
-EXPORT_SYMBOL(cfs_mem_cache_create);
-EXPORT_SYMBOL(cfs_mem_cache_destroy);
-EXPORT_SYMBOL(cfs_mem_cache_alloc);
-EXPORT_SYMBOL(cfs_mem_cache_free);
-
-/*
- * NB: we will rename some of above functions in another patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
-
void *
cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
size_t nr_bytes, unsigned int flags)
{
void *ptr;
- ptr = kmalloc_node(nr_bytes, cfs_alloc_flags_to_gfp(flags),
+ ptr = kmalloc_node(nr_bytes, flags,
cfs_cpt_spread_node(cptab, cpt));
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO) != 0)
+ if (ptr != NULL && (flags & __GFP_ZERO) != 0)
memset(ptr, 0, nr_bytes);
return ptr;
}
EXPORT_SYMBOL(cfs_cpt_vmalloc);
-cfs_page_t *
+struct page *
cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
{
- return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt),
- cfs_alloc_flags_to_gfp(flags), 0);
+ return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
}
EXPORT_SYMBOL(cfs_page_cpt_alloc);
void *
-cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, struct cfs_cpt_table *cptab,
+cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
int cpt, unsigned int flags)
{
- return kmem_cache_alloc_node(cachep, cfs_alloc_flags_to_gfp(flags),
+ return kmem_cache_alloc_node(cachep, flags,
cfs_cpt_spread_node(cptab, cpt));
}
EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc);
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
- toobig = 1;
- nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
- CWARN("Too many interfaces: only enumerating first %d\n",
- nalloc);
- }
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ toobig = 1;
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+ CWARN("Too many interfaces: only enumerating first %d\n",
+ nalloc);
+ }
LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
if (ifr == NULL) {
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+ int total_mb = (num_physpages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
*size = strnlen (knl_ptr, maxsize - 1) + 1;
- if (user_ptr != NULL) {
- if (user_size < 4)
- return (-EINVAL);
+ if (user_ptr != NULL) {
+ if (user_size < 4)
+ return -EINVAL;
- if (cfs_copy_to_user (user_ptr, knl_ptr, *size))
- return (-EFAULT);
+ if (copy_to_user(user_ptr, knl_ptr, *size))
+ return -EFAULT;
- /* Did I truncate the string? */
- if (knl_ptr[*size - 1] != 0)
- cfs_copy_to_user (user_ptr + *size - 4, "...", 4);
- }
+ /* Did I truncate the string? */
+ if (knl_ptr[*size - 1] != 0)
+ copy_to_user(user_ptr + *size - 4, "...", 4);
+ }
- return (0);
+ return 0;
}
int
continue;
for (j = 0; j < lwt_pages_per_cpu; j++) {
- memset (p->lwtp_events, 0, CFS_PAGE_SIZE);
+ memset(p->lwtp_events, 0, PAGE_CACHE_SIZE);
p = cfs_list_entry (p->lwtp_list.next,
lwt_page_t, lwtp_list);
}
int
-lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
- void *user_ptr, int user_size)
+lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size,
+ void *user_ptr, int user_size)
{
- const int events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t);
- const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
- lwt_page_t *p;
- int i;
- int j;
+ const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
+ const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
+ lwt_page_t *p;
+ int i;
+ int j;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return (-EPERM);
p = lwt_cpus[i].lwtc_current_page;
if (p == NULL)
- return (-ENODATA);
+ return -ENODATA;
- for (j = 0; j < lwt_pages_per_cpu; j++) {
- if (cfs_copy_to_user(user_ptr, p->lwtp_events,
- bytes_per_page))
- return (-EFAULT);
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
+ if (copy_to_user(user_ptr, p->lwtp_events,
+ bytes_per_page))
+ return -EFAULT;
user_ptr = ((char *)user_ptr) + bytes_per_page;
p = cfs_list_entry(p->lwtp_list.next,
/* NULL pointers, zero scalars */
memset (lwt_cpus, 0, sizeof (lwt_cpus));
- lwt_pages_per_cpu =
- LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE);
+ lwt_pages_per_cpu =
+ LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);
for (i = 0; i < cfs_num_online_cpus(); i++)
for (j = 0; j < lwt_pages_per_cpu; j++) {
- struct page *page = alloc_page (GFP_KERNEL);
+ struct page *page = alloc_page(GFP_KERNEL);
lwt_page_t *lwtp;
if (page == NULL) {
lwtp->lwtp_page = page;
lwtp->lwtp_events = page_address(page);
- memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE);
+ memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
if (j == 0) {
CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
void
kportal_memhog_free (struct libcfs_device_userstate *ldu)
{
- cfs_page_t **level0p = &ldu->ldu_memhog_root_page;
- cfs_page_t **level1p;
- cfs_page_t **level2p;
- int count1;
- int count2;
+ struct page **level0p = &ldu->ldu_memhog_root_page;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
- if (*level0p != NULL) {
+ if (*level0p != NULL) {
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
- level1p = (cfs_page_t **)cfs_page_address(*level0p);
- count1 = 0;
+ while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level1p != NULL) {
- while (count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
- *level1p != NULL) {
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
- level2p = (cfs_page_t **)cfs_page_address(*level1p);
- count2 = 0;
+ while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level2p != NULL) {
- while (count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
- *level2p != NULL) {
+ __free_page(*level2p);
+ ldu->ldu_memhog_pages--;
+ level2p++;
+ count2++;
+ }
- cfs_free_page(*level2p);
- ldu->ldu_memhog_pages--;
- level2p++;
- count2++;
- }
-
- cfs_free_page(*level1p);
- ldu->ldu_memhog_pages--;
- level1p++;
- count1++;
- }
+ __free_page(*level1p);
+ ldu->ldu_memhog_pages--;
+ level1p++;
+ count1++;
+ }
- cfs_free_page(*level0p);
- ldu->ldu_memhog_pages--;
+ __free_page(*level0p);
+ ldu->ldu_memhog_pages--;
- *level0p = NULL;
- }
+ *level0p = NULL;
+ }
- LASSERT (ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_pages == 0);
}
int
kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags)
{
- cfs_page_t **level0p;
- cfs_page_t **level1p;
- cfs_page_t **level2p;
- int count1;
- int count2;
+ struct page **level0p;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
- LASSERT (ldu->ldu_memhog_pages == 0);
- LASSERT (ldu->ldu_memhog_root_page == NULL);
+ LASSERT(ldu->ldu_memhog_pages == 0);
+ LASSERT(ldu->ldu_memhog_root_page == NULL);
- if (npages < 0)
- return -EINVAL;
+ if (npages < 0)
+ return -EINVAL;
- if (npages == 0)
- return 0;
+ if (npages == 0)
+ return 0;
- level0p = &ldu->ldu_memhog_root_page;
- *level0p = cfs_alloc_page(flags);
- if (*level0p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
+ level0p = &ldu->ldu_memhog_root_page;
+ *level0p = alloc_page(flags);
+ if (*level0p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level1p = (cfs_page_t **)cfs_page_address(*level0p);
- count1 = 0;
- memset(level1p, 0, CFS_PAGE_SIZE);
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
+ memset(level1p, 0, PAGE_CACHE_SIZE);
- while (ldu->ldu_memhog_pages < npages &&
- count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+ while (ldu->ldu_memhog_pages < npages &&
+ count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
- return (-EINTR);
+ if (cfs_signal_pending())
+ return -EINTR;
- *level1p = cfs_alloc_page(flags);
- if (*level1p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
+ *level1p = alloc_page(flags);
+ if (*level1p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level2p = (cfs_page_t **)cfs_page_address(*level1p);
- count2 = 0;
- memset(level2p, 0, CFS_PAGE_SIZE);
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
+ memset(level2p, 0, PAGE_CACHE_SIZE);
- while (ldu->ldu_memhog_pages < npages &&
- count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+ while (ldu->ldu_memhog_pages < npages &&
+ count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
- if (cfs_signal_pending())
- return (-EINTR);
+ if (cfs_signal_pending())
+ return -EINTR;
- *level2p = cfs_alloc_page(flags);
- if (*level2p == NULL)
- return (-ENOMEM);
- ldu->ldu_memhog_pages++;
+ *level2p = alloc_page(flags);
+ if (*level2p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
- level2p++;
- count2++;
- }
+ level2p++;
+ count2++;
+ }
- level1p++;
- count1++;
- }
+ level1p++;
+ count1++;
+ }
- return 0;
+ return 0;
}
/* called when opening /dev/device */
RETURN(err);
}
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+static int libcfs_ioctl(struct cfs_psdev_file *pfile,
+ unsigned long cmd, void *arg)
{
- char *buf;
- struct libcfs_ioctl_data *data;
- int err = 0;
- ENTRY;
-
- LIBCFS_ALLOC_GFP(buf, 1024, CFS_ALLOC_STD);
- if (buf == NULL)
- RETURN(-ENOMEM);
+ char *buf;
+ struct libcfs_ioctl_data *data;
+ int err = 0;
+ ENTRY;
+
+ LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
+ if (buf == NULL)
+ RETURN(-ENOMEM);
/* 'cmd' and permissions get checked in our arch-specific caller */
if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
const char *format1, va_list args,
const char *format2, ...)
{
- struct timeval tv;
- int nob;
- int remain;
- va_list ap;
- char buf[CFS_PAGE_SIZE]; /* size 4096 used for compatimble
- * with linux, where message can`t
- * be exceed PAGE_SIZE */
+ struct timeval tv;
+ int nob;
+ int remain;
+ va_list ap;
+ char buf[PAGE_CACHE_SIZE]; /* size 4096 used for compatimble
+ * with linux, where message can`t
+ * be exceed PAGE_SIZE */
int console = 0;
char *prefix = "Lustre";
static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
- cfs_page_t *page;
- struct cfs_trace_page *tage;
-
- /* My caller is trying to free memory */
- if (!cfs_in_interrupt() && cfs_memory_pressure_get())
- return NULL;
-
- /*
- * Don't spam console with allocation failures: they will be reported
- * by upper layer anyway.
- */
- gfp |= CFS_ALLOC_NOWARN;
- page = cfs_alloc_page(gfp);
- if (page == NULL)
- return NULL;
+ struct page *page;
+ struct cfs_trace_page *tage;
- tage = cfs_alloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- cfs_free_page(page);
- return NULL;
- }
+ /* My caller is trying to free memory */
+ if (!cfs_in_interrupt() && memory_pressure_get())
+ return NULL;
+
+ /*
+ * Don't spam console with allocation failures: they will be reported
+ * by upper layer anyway.
+ */
+ gfp |= __GFP_NOWARN;
+ page = alloc_page(gfp);
+ if (page == NULL)
+ return NULL;
+
+ tage = kmalloc(sizeof(*tage), gfp);
+ if (tage == NULL) {
+ __free_page(page);
+ return NULL;
+ }
- tage->page = page;
- cfs_atomic_inc(&cfs_tage_allocated);
- return tage;
+ tage->page = page;
+ cfs_atomic_inc(&cfs_tage_allocated);
+ return tage;
}
static void cfs_tage_free(struct cfs_trace_page *tage)
{
- __LASSERT(tage != NULL);
- __LASSERT(tage->page != NULL);
+ __LASSERT(tage != NULL);
+ __LASSERT(tage->page != NULL);
- cfs_free_page(tage->page);
- cfs_free(tage);
- cfs_atomic_dec(&cfs_tage_allocated);
+ __free_page(tage->page);
+ kfree(tage);
+ cfs_atomic_dec(&cfs_tage_allocated);
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!cfs_list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= CFS_PAGE_SIZE)
+ if (tage->used + len <= PAGE_CACHE_SIZE)
return tage;
}
--tcd->tcd_cur_stock_pages;
cfs_list_del_init(&tage->linkage);
} else {
- tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
+ tage = cfs_tage_alloc(GFP_ATOMIC);
if (unlikely(tage == NULL)) {
- if ((!cfs_memory_pressure_get() ||
+ if ((!memory_pressure_get() ||
cfs_in_interrupt()) && printk_ratelimit())
printk(CFS_KERN_WARNING
"cannot allocate a tage (%ld)\n",
* from here: this will lead to infinite recursion.
*/
- if (len > CFS_PAGE_SIZE) {
+ if (len > PAGE_CACHE_SIZE) {
printk(CFS_KERN_ERR
"cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (tage == NULL) {
- if (needed + known_size > CFS_PAGE_SIZE)
+ if (needed + known_size > PAGE_CACHE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
goto console;
}
- string_buf = (char *)cfs_page_address(tage->page) +
+ string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = CFS_PAGE_SIZE - tage->used - known_size;
+ max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(CFS_KERN_EMERG "negative max_nob: %d\n",
max_nob);
"newline\n", file, msgdata->msg_line, msgdata->msg_fn);
header.ph_len = known_size + needed;
- debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
+ debug_buf = (char *)page_address(tage->page) + tage->used;
if (libcfs_debug_binary) {
memcpy(debug_buf, &header, sizeof(header));
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT (tage->used <= CFS_PAGE_SIZE);
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
collect_pages(&pc);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
- char *p, *file, *fn;
- cfs_page_t *page;
+ char *p, *file, *fn;
+ struct page *page;
- __LASSERT_TAGE_INVARIANT(tage);
+ __LASSERT_TAGE_INVARIANT(tage);
- page = tage->page;
- p = cfs_page_address(page);
- while (p < ((char *)cfs_page_address(page) + tage->used)) {
+ page = tage->page;
+ p = page_address(page);
+ while (p < ((char *)page_address(page) + tage->used)) {
struct ptldebug_header *hdr;
int len;
hdr = (void *)p;
struct cfs_trace_page *tmp;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
cfs_tracefile_write_lock();
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO */
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, filp_poff(filp));
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u but wrote "
cfs_list_del(&tage->linkage);
cfs_tage_free(tage);
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
rc = filp_fsync(filp);
if (rc)
printk(CFS_KERN_ERR "sync returns %d\n", rc);
if (usr_buffer_nob > knl_buffer_nob)
return -EOVERFLOW;
- if (cfs_copy_from_user((void *)knl_buffer,
+ if (copy_from_user((void *)knl_buffer,
(void *)usr_buffer, usr_buffer_nob))
return -EFAULT;
if (nob > usr_buffer_nob)
nob = usr_buffer_nob;
- if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
+ if (copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
if (append != NULL && nob < usr_buffer_nob) {
- if (cfs_copy_to_user(usr_buffer + nob, append, 1))
+ if (copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
nob++;
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
return -EINVAL;
- *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
+ *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
if (*str == NULL)
return -ENOMEM;
void cfs_trace_free_string_buffer(char *str, int nob)
{
- cfs_free(str);
+ kfree(str);
}
int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
}
mb /= cfs_num_possible_cpus();
- pages = mb << (20 - CFS_PAGE_SHIFT);
+ pages = mb << (20 - PAGE_CACHE_SHIFT);
cfs_tracefile_write_lock();
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
int last_loop = 0;
int rc;
- CFS_DECL_MMSPACE;
+ DECL_MMSPACE;
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
goto end_loop;
}
- CFS_MMSPACE_OPEN;
+ MMSPACE_OPEN;
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
struct cfs_trace_page,
else if (f_pos > (off_t)filp_size(filp))
f_pos = filp_size(filp);
- rc = filp_write(filp, cfs_page_address(tage->page),
+ rc = filp_write(filp, page_address(tage->page),
tage->used, &f_pos);
if (rc != (int)tage->used) {
printk(CFS_KERN_WARNING "wanted to write %u "
__LASSERT(cfs_list_empty(&pc.pc_pages));
}
}
- CFS_MMSPACE_CLOSE;
+ MMSPACE_CLOSE;
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
extern int libcfs_panic_in_progress;
extern int cfs_trace_max_debug_mb(void);
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* Private declare for tracefile
*/
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
/*
* page itself
*/
- cfs_page_t *page;
+ struct page *page;
/*
* linkage into one of the lists in trace_data_union or
* page_collection
do { \
__LASSERT(tage != NULL); \
__LASSERT(tage->page != NULL); \
- __LASSERT(tage->used <= CFS_PAGE_SIZE); \
- __LASSERT(cfs_page_count(tage->page) > 0); \
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(page_count(tage->page) > 0); \
} while (0)
#endif /* LUSTRE_TRACEFILE_PRIVATE */
return ERR_PTR(-ENODEV);
}
- hdesc = cfs_alloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
+ hdesc = kmalloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
if (hdesc == NULL)
return ERR_PTR(-ENOMEM);
if (err == 0) {
return (struct cfs_crypto_hash_desc *) hdesc;
} else {
- cfs_free(hdesc);
+ kfree(hdesc);
return ERR_PTR(err);
}
}
}
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
- cfs_page_t *page, unsigned int offset,
+ struct page *page, unsigned int offset,
unsigned int len)
{
const void *p = page->addr + offset;
int err;
if (hash_len == NULL) {
- cfs_free(d);
+ kfree(d);
return 0;
}
if (hash == NULL || *hash_len < size) {
err = d->hd_hash->final(d->hd_ctx, hash, *hash_len);
if (err == 0) {
/* If get final digest success free hash descriptor */
- cfs_free(d);
+ kfree(d);
}
return err;
unsigned char *data;
unsigned int j, data_len = 1024 * 1024;
- data = cfs_alloc(data_len, 0);
+ data = kmalloc(data_len, 0);
if (data == NULL) {
CERROR("Failed to allocate mem\n");
return -ENOMEM;
for (i = 0; i < CFS_HASH_ALG_MAX; i++)
cfs_crypto_performance_test(i, data, data_len);
- cfs_free(data);
+ kfree(data);
return 0;
}
* Allocator
*/
-cfs_page_t *cfs_alloc_page(unsigned int flags)
+struct page *alloc_page(unsigned int flags)
{
- cfs_page_t *pg = malloc(sizeof(*pg));
+ struct page *pg = malloc(sizeof(*pg));
int rc = 0;
if (!pg)
pg->addr = NULL;
#if defined (__DARWIN__)
- pg->addr = valloc(CFS_PAGE_SIZE);
+ pg->addr = valloc(PAGE_CACHE_SIZE);
#elif defined (__WINNT__)
pg->addr = pgalloc(0);
#else
- rc = posix_memalign(&pg->addr, CFS_PAGE_SIZE, CFS_PAGE_SIZE);
+ rc = posix_memalign(&pg->addr, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE);
#endif
if (rc != 0 || pg->addr == NULL) {
free(pg);
return pg;
}
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
{
#if defined (__WINNT__)
pgfree(pg->addr);
free(pg);
}
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
{
return pg->addr;
}
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
{
return pg->addr;
}
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
{
}
* SLAB allocator
*/
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long flags)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+ unsigned long flags, void *ctor)
{
- cfs_mem_cache_t *c;
+ struct kmem_cache *c;
c = malloc(sizeof(*c));
if (!c)
return c;
}
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c)
+void kmem_cache_destroy(struct kmem_cache *c)
{
CDEBUG(D_MALLOC, "destroy slab cache %p, objsize %u\n", c, c->size);
free(c);
- return 0;
}
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp)
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp)
{
- return cfs_alloc(c->size, gfp);
+ return kmalloc(c->size, gfp);
}
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr)
+void kmem_cache_free(struct kmem_cache *c, void *addr)
{
- cfs_free(addr);
+ kfree(addr);
}
/**
* occasionally returns true for the incorrect addresses, but if it returns
* false, then the addresses is guaranteed to be incorrect.
*/
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
{
return 1;
}
* task slot routiens
*/
-PTASK_SLOT
-alloc_task_slot()
+PTASK_SLOT alloc_task_slot()
{
- PTASK_SLOT task = NULL;
-
- if (cfs_win_task_manger.slab) {
- task = cfs_mem_cache_alloc(cfs_win_task_manger.slab, 0);
- } else {
- task = cfs_alloc(sizeof(TASK_SLOT), 0);
- }
-
- return task;
+ if (cfs_win_task_manger.slab)
+ return kmem_cache_alloc(cfs_win_task_manger.slab, 0);
+ else
+ return kmalloc(sizeof(TASK_SLOT), 0);
}
void
cfs_init_event(&task->Event, TRUE, FALSE);
}
-void
-cleanup_task_slot(PTASK_SLOT task)
+void cleanup_task_slot(PTASK_SLOT task)
{
- if (task->task.pid) {
- cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
- }
+ if (task->task.pid)
+ cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
- if (cfs_win_task_manger.slab) {
- cfs_mem_cache_free(cfs_win_task_manger.slab, task);
- } else {
- cfs_free(task);
- }
+ if (cfs_win_task_manger.slab)
+ kmem_cache_free(cfs_win_task_manger.slab, task);
+ else
+ kfree(task);
}
/*
/* initialize the spinlock protection */
spin_lock_init(&cfs_win_task_manger.Lock);
- /* create slab memory cache */
- cfs_win_task_manger.slab = cfs_mem_cache_create(
- "TSLT", sizeof(TASK_SLOT), 0, 0);
+ /* create slab memory cache */
+ cfs_win_task_manger.slab = kmem_cache_create("TSLT", sizeof(TASK_SLOT),
+ 0, 0, NULL);
/* intialize the list header */
InitializeListHead(&(cfs_win_task_manger.TaskList));
spin_unlock(&cfs_win_task_manger.Lock);
- /* destroy the taskslot cache slab */
- cfs_mem_cache_destroy(cfs_win_task_manger.slab);
- memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
+ /* destroy the taskslot cache slab */
+ kmem_cache_destroy(cfs_win_task_manger.slab);
+ memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
}
return ERR_PTR(-EINVAL);
}
- AnsiString = cfs_alloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
- CFS_ALLOC_ZERO);
+ AnsiString = kmalloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
+ __GFP_ZERO);
if (NULL == AnsiString)
return ERR_PTR(-ENOMEM);
UnicodeString =
- cfs_alloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
- CFS_ALLOC_ZERO);
+ kmalloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
+ __GFP_ZERO);
if (NULL == UnicodeString) {
- cfs_free(AnsiString);
+ kfree(AnsiString);
return ERR_PTR(-ENOMEM);
}
/* Check the returned status of IoStatus... */
if (!NT_SUCCESS(IoStatus.Status)) {
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ kfree(UnicodeString);
+ kfree(AnsiString);
return ERR_PTR(cfs_error_code(IoStatus.Status));
}
/* Allocate the file_t: libcfs file object */
- fp = cfs_alloc(sizeof(*fp) + NameLength, CFS_ALLOC_ZERO);
+ fp = kmalloc(sizeof(*fp) + NameLength, __GFP_ZERO);
if (NULL == fp) {
Status = ZwClose(FileHandle);
ASSERT(NT_SUCCESS(Status));
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ kfree(UnicodeString);
+ kfree(AnsiString);
return ERR_PTR(-ENOMEM);
}
fp->f_mode = (mode_t)mode;
fp->f_count = 1;
- /* free the memory of temporary name strings */
- cfs_free(UnicodeString);
- cfs_free(AnsiString);
+ /* free the memory of temporary name strings */
+ kfree(UnicodeString);
+ kfree(AnsiString);
- return fp;
+ return fp;
}
Status = ZwClose(fp->f_handle);
ASSERT(NT_SUCCESS(Status));
- /* free the file flip structure */
- cfs_free(fp);
- return 0;
+ /* free the file flip structure */
+ kfree(fp);
+ return 0;
}
return;
}
if (cfs_atomic_dec_and_test(&de->d_count)) {
- cfs_free(de);
+ kfree(de);
}
}
#include <libcfs/libcfs.h>
-cfs_mem_cache_t *cfs_page_t_slab = NULL;
-cfs_mem_cache_t *cfs_page_p_slab = NULL;
+struct kmem_cache *cfs_page_t_slab;
+struct kmem_cache *cfs_page_p_slab;
-cfs_page_t * virt_to_page(void * addr)
+struct page *virt_to_page(void *addr)
{
- cfs_page_t *pg;
- pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-
- if (NULL == pg) {
- cfs_enter_debugger();
- return NULL;
- }
+ struct page *pg;
+ pg = kmem_cache_alloc(cfs_page_t_slab, 0);
+
+ if (NULL == pg) {
+ cfs_enter_debugger();
+ return NULL;
+ }
- memset(pg, 0, sizeof(cfs_page_t));
- pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
- pg->mapping = addr;
- cfs_atomic_set(&pg->count, 1);
+ memset(pg, 0, sizeof(struct page));
+ pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
+ pg->mapping = addr;
+ cfs_atomic_set(&pg->count, 1);
set_bit(PG_virt, &(pg->flags));
- cfs_enter_debugger();
- return pg;
+ cfs_enter_debugger();
+ return pg;
}
/*
- * cfs_alloc_page
- * To allocate the cfs_page_t and also 1 page of memory
+ * alloc_page
+ * To allocate the struct page and also 1 page of memory
*
* Arguments:
* flags: the allocation options
*
* Return Value:
- * pointer to the cfs_page_t strcture in success or
+ * pointer to the struct page strcture in success or
* NULL in failure case
*
* Notes:
cfs_atomic_t libcfs_total_pages;
-cfs_page_t * cfs_alloc_page(int flags)
+struct page *alloc_page(int flags)
{
- cfs_page_t *pg;
- pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-
- if (NULL == pg) {
- cfs_enter_debugger();
- return NULL;
- }
+ struct page *pg;
+ pg = kmem_cache_alloc(cfs_page_t_slab, 0);
- memset(pg, 0, sizeof(cfs_page_t));
- pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
- cfs_atomic_set(&pg->count, 1);
-
- if (pg->addr) {
- if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
- memset(pg->addr, 0, CFS_PAGE_SIZE);
- }
- cfs_atomic_inc(&libcfs_total_pages);
- } else {
- cfs_enter_debugger();
- cfs_mem_cache_free(cfs_page_t_slab, pg);
- pg = NULL;
- }
+ if (NULL == pg) {
+ cfs_enter_debugger();
+ return NULL;
+ }
+
+ memset(pg, 0, sizeof(struct page));
+ pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
+ cfs_atomic_set(&pg->count, 1);
+
+ if (pg->addr) {
+ if (cfs_is_flag_set(flags, __GFP_ZERO))
+ memset(pg->addr, 0, PAGE_CACHE_SIZE);
+ cfs_atomic_inc(&libcfs_total_pages);
+ } else {
+ cfs_enter_debugger();
+ kmem_cache_free(cfs_page_t_slab, pg);
+ pg = NULL;
+ }
- return pg;
+ return pg;
}
/*
- * cfs_free_page
- * To free the cfs_page_t including the page
+ * __free_page
+ * To free the struct page including the page
*
* Arguments:
- * pg: pointer to the cfs_page_t strcture
+ * pg: pointer to the struct page strcture
*
* Return Value:
* N/A
* Notes:
* N/A
*/
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
{
- ASSERT(pg != NULL);
- ASSERT(pg->addr != NULL);
- ASSERT(cfs_atomic_read(&pg->count) <= 1);
+ ASSERT(pg != NULL);
+ ASSERT(pg->addr != NULL);
+ ASSERT(cfs_atomic_read(&pg->count) <= 1);
if (!test_bit(PG_virt, &pg->flags)) {
- cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
- cfs_atomic_dec(&libcfs_total_pages);
- } else {
- cfs_enter_debugger();
- }
- cfs_mem_cache_free(cfs_page_t_slab, pg);
+ kmem_cache_free(cfs_page_p_slab, pg->addr);
+ cfs_atomic_dec(&libcfs_total_pages);
+ } else {
+ cfs_enter_debugger();
+ }
+ kmem_cache_free(cfs_page_t_slab, pg);
}
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
{
- KdPrint(("cfs_mem_is_in_cache: not implemented. (should maintain a"
- "chain to keep all allocations traced.)\n"));
- return 1;
+ KdPrint(("kmem_is_in_cache: not implemented. (should maintain a"
+ "chain to keep all allocations traced.)\n"));
+ return 1;
}
/*
- * cfs_alloc
+ * kmalloc
* To allocate memory from system pool
*
* Arguments:
*/
void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
+kmalloc(size_t nr_bytes, u_int32_t flags)
{
- void *ptr;
+ void *ptr;
- /* Ignore the flags: always allcoate from NonPagedPool */
- ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
- if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) {
- memset(ptr, 0, nr_bytes);
- }
+ /* Ignore the flags: always allcoate from NonPagedPool */
+ ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
+ if (ptr != NULL && (flags & __GFP_ZERO))
+ memset(ptr, 0, nr_bytes);
- if (!ptr) {
- cfs_enter_debugger();
- }
+ if (!ptr)
+ cfs_enter_debugger();
- return ptr;
+ return ptr;
}
/*
- * cfs_free
+ * kfree
* To free the sepcified memory to system pool
*
* Arguments:
*/
void
-cfs_free(void *addr)
+kfree(void *addr)
{
- ExFreePool(addr);
+ ExFreePool(addr);
}
/*
- * cfs_alloc_large
+ * vmalloc
* To allocate large block of memory from system pool
*
* Arguments:
*/
void *
-cfs_alloc_large(size_t nr_bytes)
+vmalloc(size_t nr_bytes)
{
- return cfs_alloc(nr_bytes, 0);
+ return kmalloc(nr_bytes, 0);
}
/*
- * cfs_free_large
+ * vfree
* To free the sepcified memory to system pool
*
* Arguments:
* N/A
*/
-void
-cfs_free_large(void *addr)
+void vfree(void *addr)
{
- cfs_free(addr);
+ kfree(addr);
}
/*
- * cfs_mem_cache_create
+ * kmem_cache_create
* To create a SLAB cache
*
* Arguments:
* 3, parameters C/D are removed.
*/
-cfs_mem_cache_t *
-cfs_mem_cache_create(
- const char * name,
- size_t size,
- size_t offset,
- unsigned long flags
- )
+struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ size_t offset, unsigned long flags,
+ void *ctor)
{
- cfs_mem_cache_t * kmc = NULL;
+ struct kmem_cache *kmc = NULL;
- /* The name of the SLAB could not exceed 20 chars */
+ /* The name of the SLAB could not exceed 20 chars */
- if (name && strlen(name) >= 20) {
- goto errorout;
- }
+ if (name && strlen(name) >= 20)
+ goto errorout;
- /* Allocate and initialize the SLAB strcture */
+ /* Allocate and initialize the SLAB strcture */
- kmc = cfs_alloc (sizeof(cfs_mem_cache_t), 0);
+ kmc = kmalloc(sizeof(struct kmem_cache), 0);
- if (NULL == kmc) {
- goto errorout;
- }
+ if (NULL == kmc)
+ goto errorout;
- memset(kmc, 0, sizeof(cfs_mem_cache_t));
- kmc->flags = flags;
+ memset(kmc, 0, sizeof(struct kmem_cache));
+ kmc->flags = flags;
if (name) {
strcpy(&kmc->name[0], name);
}
/*
- * cfs_mem_cache_destroy
+ *kmem_cache_destroy
* To destroy the unused SLAB cache
*
* Arguments:
* N/A
*/
-int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc)
+kmem_cache_destroy(struct kmem_cache *kmc)
{
- ASSERT(kmc != NULL);
+ ASSERT(kmc != NULL);
- ExDeleteNPagedLookasideList(&(kmc->npll));
+ ExDeleteNPagedLookasideList(&(kmc->npll));
- cfs_free(kmc);
+ kfree(kmc);
- return 0;
+ return 0;
}
/*
- * cfs_mem_cache_alloc
+ * kmem_cache_alloc
* To allocate an object (LookAside entry) from the SLAB
*
* Arguments:
* N/A
*/
-void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags)
+void *kmem_cache_alloc(struct kmem_cache *kmc, int flags)
{
- void *buf = NULL;
+ void *buf = NULL;
- buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
+ buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
- return buf;
+ return buf;
}
/*
- * cfs_mem_cache_free
+ * kmem_cache_free
* To free an object (LookAside entry) to the SLAB cache
*
* Arguments:
* N/A
*/
-void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
+void kmem_cache_free(struct kmem_cache *kmc, void *buf)
{
ExFreeToNPagedLookasideList(&(kmc->npll), buf);
}
CFS_LIST_HEAD(shrinker_hdr);
cfs_timer_t shrinker_timer = {0};
-struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
+struct shrinker *set_shrinker(int seeks, shrink_callback cb)
{
- struct cfs_shrinker * s = (struct cfs_shrinker *)
- cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
+ struct shrinker *s = (struct shrinker *)
+ kmalloc(sizeof(struct shrinker), __GFP_ZERO);
if (s) {
s->cb = cb;
s->seeks = seeks;
return s;
}
-void cfs_remove_shrinker(struct cfs_shrinker *s)
+void remove_shrinker(struct shrinker *s)
{
- struct cfs_shrinker *tmp;
+ struct shrinker *tmp;
spin_lock(&shrinker_guard);
#if TRUE
- cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
- struct cfs_shrinker, list) {
- if (tmp == s) {
- cfs_list_del(&tmp->list);
- break;
- }
- }
+ cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
+ struct shrinker, list) {
+ if (tmp == s) {
+ cfs_list_del(&tmp->list);
+ break;
+ }
+ }
#else
- cfs_list_del(&s->list);
+ cfs_list_del(&s->list);
#endif
spin_unlock(&shrinker_guard);
- cfs_free(s);
+ kfree(s);
}
/* time ut test proc */
void shrinker_timer_proc(ulong_ptr_t arg)
{
- struct cfs_shrinker *s;
+ struct shrinker *s;
spin_lock(&shrinker_guard);
cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct cfs_shrinker, list) {
+ struct shrinker, list) {
s->cb(s->nr, __GFP_FS);
}
spin_unlock(&shrinker_guard);
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
- err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr));
+ err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
RETURN(err);
RETURN(-EINVAL);
}
- err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len);
+ err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
RETURN(err);
int libcfs_ioctl_popdata(void *arg, void *data, int size)
{
- if (cfs_copy_to_user((char *)arg, data, size))
+ if (copy_to_user((char *)arg, data, size))
return -EFAULT;
return 0;
}
*/
void
-cfs_thread_proc(
- void * context
- )
+cfs_thread_proc(void *context)
{
cfs_thread_context_t * thread_context =
(cfs_thread_context_t *) context;
/* Free the context memory */
- cfs_free(context);
+ kfree(context);
/* Terminate this system thread */
{
cfs_handle_t thread = NULL;
NTSTATUS status;
- cfs_thread_context_t * context = NULL;
+ cfs_thread_context_t *context = NULL;
/* Allocate the context to be transferred to system thread */
- context = cfs_alloc(sizeof(cfs_thread_context_t), CFS_ALLOC_ZERO);
+ context = kmalloc(sizeof(cfs_thread_context_t), __GFP_ZERO);
if (!context) {
return ERR_PTR(-ENOMEM);
if (!NT_SUCCESS(status)) {
- cfs_free(context);
+ kfree(context);
/* We need translate the nt status to linux error code */
struct cfs_symbol *sym = NULL;
struct cfs_symbol *new = NULL;
- new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO);
- if (!new) {
- return (-ENOMEM);
- }
+ new = kmalloc(sizeof(struct cfs_symbol), __GFP_ZERO);
+ if (!new)
+ return -ENOMEM;
+
strncpy(new->name, name, CFS_SYMBOL_LEN);
new->value = (void *)value;
new->ref = 0;
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
up_write(&cfs_symbol_lock);
- cfs_free(new);
+ kfree(new);
return 0; /* alreay registerred */
}
}
if (!strcmp(sym->name, name)) {
LASSERT(sym->ref == 0);
cfs_list_del (&sym->sym_list);
- cfs_free(sym);
+ kfree(sym);
break;
}
}
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
LASSERT(sym->ref == 0);
cfs_list_del (&sym->sym_list);
- cfs_free(sym);
+ kfree(sym);
}
up_write(&cfs_symbol_lock);
return;
and kernel ntoskrnl.lib) */
cfs_libc_init();
- /* create slab memory caches for page alloctors */
- cfs_page_t_slab = cfs_mem_cache_create(
- "CPGT", sizeof(cfs_page_t), 0, 0 );
+ /* create slab memory caches for page alloctors */
+ cfs_page_t_slab = kmem_cache_create("CPGT", sizeof(struct page),
+ 0, 0, NULL);
- cfs_page_p_slab = cfs_mem_cache_create(
- "CPGP", CFS_PAGE_SIZE, 0, 0 );
+ cfs_page_p_slab = kmem_cache_create("CPGP", PAGE_CACHE_SIZE,
+ 0, 0, NULL);
if ( cfs_page_t_slab == NULL ||
cfs_page_p_slab == NULL ){
errorout:
- if (rc != 0) {
- /* destroy the taskslot cache slab */
- if (cfs_page_t_slab) {
- cfs_mem_cache_destroy(cfs_page_t_slab);
- }
- if (cfs_page_p_slab) {
- cfs_mem_cache_destroy(cfs_page_p_slab);
- }
- }
+ if (rc != 0) {
+ /* destroy the taskslot cache slab */
+ if (cfs_page_t_slab)
+ kmem_cache_destroy(cfs_page_t_slab);
+ if (cfs_page_p_slab)
+ kmem_cache_destroy(cfs_page_p_slab);
+ }
return rc;
}
/* destroy the taskslot cache slab */
if (cfs_page_t_slab) {
- cfs_mem_cache_destroy(cfs_page_t_slab);
+kmem_cache_destroy(cfs_page_t_slab);
}
if (cfs_page_p_slab) {
- cfs_mem_cache_destroy(cfs_page_p_slab);
+kmem_cache_destroy(cfs_page_p_slab);
}
return;
/* SLAB object for cfs_proc_entry_t allocation */
-cfs_mem_cache_t * proc_entry_cache = NULL;
+struct kmem_cache *proc_entry_cache;
/* root node for sysctl table */
cfs_sysctl_table_header_t root_table_header;
char *start;
cfs_proc_entry_t * dp;
- dp = (cfs_proc_entry_t *) file->f_inode->i_priv;
- if (!(page = (char*) cfs_alloc(CFS_PAGE_SIZE, 0)))
- return -ENOMEM;
+ dp = (cfs_proc_entry_t *) file->f_inode->i_priv;
+ page = (char *) kmalloc(PAGE_CACHE_SIZE, 0);
+ if (page == NULL)
+ return -ENOMEM;
while ((nbytes > 0) && !eof) {
break;
}
- n -= cfs_copy_to_user((void *)buf, start, n);
+ n -= copy_to_user((void *)buf, start, n);
if (n == 0) {
if (retval == 0)
retval = -EFAULT;
buf += n;
retval += n;
}
- cfs_free(page);
+ kfree(page);
- return retval;
+ return retval;
}
static ssize_t
{
cfs_proc_entry_t * entry = NULL;
- entry = cfs_mem_cache_alloc(proc_entry_cache, 0);
- if (!entry) {
- return NULL;
- }
+ entry = kmem_cache_alloc(proc_entry_cache, 0);
+ if (!entry)
+ return NULL;
memset(entry, 0, sizeof(cfs_proc_entry_t));
void
proc_free_entry(cfs_proc_entry_t * entry)
-
{
- ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
-
- cfs_mem_cache_free(proc_entry_cache, entry);
+ ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
+ kmem_cache_free(proc_entry_cache, entry);
}
/* dissect the path string for a given full proc path */
parent = root;
entry = NULL;
- ename = cfs_alloc(0x21, CFS_ALLOC_ZERO);
+ ename = kmalloc(0x21, __GFP_ZERO);
- if (ename == NULL) {
- goto errorout;
- }
+ if (ename == NULL)
+ goto errorout;
again:
errorout:
if (ename) {
- cfs_free(ename);
+ kfree(ename);
}
return entry;
entry = proc_alloc_entry();
memcpy(entry->name, ename, flen);
- if (entry) {
- if(!proc_insert_splay(parent, entry)) {
- proc_free_entry(entry);
- entry = NULL;
- }
- }
+ if (entry && !proc_insert_splay(parent, entry)) {
+ proc_free_entry(entry);
+ entry = NULL;
+ }
}
if (!entry) {
void proc_destroy_fs()
{
- LOCK_PROCFS();
+ LOCK_PROCFS();
- if (cfs_proc_root) {
- proc_destroy_splay(cfs_proc_root);
- }
+ if (cfs_proc_root)
+ proc_destroy_splay(cfs_proc_root);
- if (proc_entry_cache) {
- cfs_mem_cache_destroy(proc_entry_cache);
- }
+ if (proc_entry_cache)
+ kmem_cache_destroy(proc_entry_cache);
- UNLOCK_PROCFS();
+ UNLOCK_PROCFS();
}
static char proc_item_path[512];
CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry));
INIT_PROCFS_LOCK();
- proc_entry_cache = cfs_mem_cache_create(
- NULL,
- sizeof(cfs_proc_entry_t),
- 0,
- 0
- );
+ proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t),
+ 0, 0, NULL);
if (!proc_entry_cache) {
return (-ENOMEM);
return -ENOTDIR;
if (oldval && oldlenp) {
- if(get_user(len, oldlenp))
+ if (get_user(len, oldlenp))
return -EFAULT;
- if (len) {
- l = strlen(table->data);
- if (len > l) len = l;
- if (len >= table->maxlen)
- len = table->maxlen;
- if(cfs_copy_to_user(oldval, table->data, len))
- return -EFAULT;
- if(put_user(0, ((char *) oldval) + len))
- return -EFAULT;
- if(put_user(len, oldlenp))
- return -EFAULT;
- }
+ if (len) {
+ l = strlen(table->data);
+ if (len > l)
+ len = l;
+ if (len >= table->maxlen)
+ len = table->maxlen;
+ if (copy_to_user(oldval, table->data, len))
+ return -EFAULT;
+ if (put_user(0, ((char *) oldval) + len))
+ return -EFAULT;
+ if (put_user(len, oldlenp))
+ return -EFAULT;
+ }
}
if (newval && newlen) {
len = newlen;
if (len > table->maxlen)
len = table->maxlen;
- if(cfs_copy_from_user(table->data, newval, len))
+ if (copy_from_user(table->data, newval, len))
return -EFAULT;
if (len == table->maxlen)
len--;
if (write) {
while (left) {
char c;
- if(get_user(c,(char *) buffer))
- return -EFAULT;
+ if (get_user(c, (char *)buffer))
+ return -EFAULT;
if (!isspace(c))
- break;
+ break;
left--;
- ((char *) buffer)++;
+ ((char *)buffer)++;
}
if (!left)
break;
len = left;
if (len > TMPBUFLEN-1)
len = TMPBUFLEN-1;
- if(cfs_copy_from_user(buf, buffer, len))
+ if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[len] = 0;
p = buf;
val = -val;
(char *)buffer += len;
left -= len;
- switch(op) {
- case OP_SET: *i = val; break;
- case OP_AND: *i &= val; break;
- case OP_OR: *i |= val; break;
- case OP_MAX: if(*i < val)
- *i = val;
- break;
- case OP_MIN: if(*i > val)
- *i = val;
- break;
- }
+ switch(op) {
+ case OP_SET:
+ *i = val;
+ break;
+ case OP_AND:
+ *i &= val;
+ break;
+ case OP_OR:
+ *i |= val;
+ break;
+ case OP_MAX:
+ if (*i < val)
+ *i = val;
+ break;
+ case OP_MIN:
+ if (*i > val)
+ *i = val;
+ break;
+ }
} else {
p = buf;
if (!first)
len = strlen(buf);
if (len > left)
len = left;
- if(cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
left -= len;
(char *)buffer += len;
}
if (!write && !first && left) {
- if(put_user('\n', (char *) buffer))
+ if (put_user('\n', (char *) buffer))
return -EFAULT;
left--, ((char *)buffer)++;
}
p = (char *) buffer;
while (left) {
char c;
- if(get_user(c, p++))
+ if (get_user(c, p++))
return -EFAULT;
if (!isspace(c))
break;
len = 0;
p = buffer;
while (len < *lenp) {
- if(get_user(c, p++))
+ if (get_user(c, p++))
return -EFAULT;
if (c == 0 || c == '\n')
break;
}
if (len >= (size_t)table->maxlen)
len = (size_t)table->maxlen-1;
- if(cfs_copy_from_user(table->data, buffer, len))
+ if (copy_from_user(table->data, buffer, len))
return -EFAULT;
((char *) table->data)[len] = 0;
filp->f_pos += *lenp;
if (len > *lenp)
len = *lenp;
if (len)
- if(cfs_copy_to_user(buffer, table->data, len))
+ if (copy_to_user(buffer, table->data, len))
return -EFAULT;
if (len < *lenp) {
- if(put_user('\n', ((char *) buffer) + len))
+ if (put_user('\n', ((char *) buffer) + len))
return -EFAULT;
len++;
}
if (len) {
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(cfs_copy_to_user(oldval, table->data, len))
+ if (copy_to_user(oldval, table->data, len))
return -EFAULT;
- if(put_user(len, oldlenp))
+ if (put_user(len, oldlenp))
return -EFAULT;
}
}
len = newlen;
if (len > (size_t)table->maxlen)
len = (size_t)table->maxlen;
- if(cfs_copy_from_user(table->data, newval, len))
+ if (copy_from_user(table->data, newval, len))
return -EFAULT;
}
}
newval, newlen, head->ctl_table,
&context);
if (context)
- cfs_free(context);
+ kfree(context);
if (error != -ENOTDIR)
return error;
tmp = tmp->next;
int insert_at_head)
{
struct ctl_table_header *tmp;
- tmp = cfs_alloc(sizeof(struct ctl_table_header), 0);
+ tmp = kmalloc(sizeof(struct ctl_table_header), 0);
if (!tmp)
return NULL;
tmp->ctl_table = table;
#ifdef CONFIG_PROC_FS
unregister_proc_table(header->ctl_table, cfs_proc_sys);
#endif
- cfs_free(header);
+ kfree(header);
}
if (fp == NULL)
return NULL;
- fh = cfs_alloc(sizeof(*fh), CFS_ALLOC_ZERO);
+ fh = kmalloc(sizeof(*fh), __GFP_ZERO);
if (fh == NULL)
return NULL;
- fh->f_inode = cfs_alloc(sizeof(struct inode), CFS_ALLOC_ZERO);
+ fh->f_inode = kmalloc(sizeof(struct inode), __GFP_ZERO);
if (!fh->f_inode) {
- cfs_free(fh);
+ kfree(fh);
return NULL;
}
}
if (0 != rc) {
- cfs_free(fh->f_inode);
- cfs_free(fh);
+ kfree(fh->f_inode);
+ kfree(fh);
return NULL;
}
fp->nlink--;
}
- cfs_free(fh->f_inode);
- cfs_free(fh);
+ kfree(fh->f_inode);
+ kfree(fh);
return rc;
}
/* if not empty - flush it first */
if (m->count) {
n = min(m->count, size);
- err = cfs_copy_to_user(buf, m->buf + m->from, n);
+ err = copy_to_user(buf, m->buf + m->from, n);
if (err)
goto Efault;
m->count -= n;
if (m->count < m->size)
goto Fill;
m->op->stop(m, p);
- cfs_free(m->buf);
+ kfree(m->buf);
m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
if (!m->buf)
goto Enomem;
}
m->op->stop(m, p);
n = min(m->count, size);
- err = cfs_copy_to_user(buf, m->buf, n);
+ err = copy_to_user(buf, m->buf, n);
if (err)
goto Efault;
copied += n;
Eoverflow:
m->op->stop(m, p);
- cfs_free(m->buf);
- m->buf = cfs_alloc(m->size <<= 1, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+ kfree(m->buf);
+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | __GFP_ZERO);
return !m->buf ? -ENOMEM : -EAGAIN;
}
struct seq_file *m = (struct seq_file *)file->private_data;
if (m) {
if (m->buf)
- cfs_free(m->buf);
- cfs_free(m);
+ kfree(m->buf);
+ kfree(m);
}
return 0;
}
if (!res)
((struct seq_file *)file->private_data)->private = data;
else
- cfs_free(op);
+ kfree(op);
}
return res;
}
{
const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
int res = seq_release(inode, file);
- cfs_free((void *)op);
+ kfree((void *)op);
return res;
}
EXPORT_SYMBOL(single_release);
{
struct seq_file *seq = file->private_data;
- cfs_free(seq->private);
+ kfree(seq->private);
seq->private = NULL;
return seq_release(inode, file);
}
void *private;
struct seq_file *seq;
- private = cfs_alloc(psize, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+ private = kmalloc(psize, GFP_KERNEL | __GFP_ZERO);
if (private == NULL)
goto out;
return private;
out_free:
- cfs_free(private);
+ kfree(private);
out:
return NULL;
}
} else {
- KsTsdu = (PKS_TSDU) cfs_mem_cache_alloc(
+ KsTsdu = (PKS_TSDU) kmem_cache_alloc(
ks_data.ksnd_tsdu_slab, 0);
}
PKS_TSDU KsTsdu
)
{
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
list = RemoveHeadList(&ks_data.ksnd_addrs_list);
slot = CONTAINING_RECORD(list, ks_addr_slot_t, link);
- cfs_free(slot);
+ kfree(slot);
ks_data.ksnd_naddrs--;
}
return;
}
- slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
+ slot = kmalloc(sizeof(ks_addr_slot_t) + DeviceName->Length, __GFP_ZERO);
if (slot != NULL) {
spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
/* free the Context structure... */
ASSERT(Context->Magic == KS_TCP_CONTEXT_MAGIC);
Context->Magic = 'CDAB';
- cfs_free(Context);
+ kfree(Context);
}
/* free the Irp */
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* free the Irp structure */
/* there's still data in tdi internal queue, we need issue a new
Irp to receive all of them. first allocate the tcp context */
- context = cfs_alloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
+ context = kmalloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
ks_abort_tconn(tconn);
ks_tconn_t * tconn = NULL;
/* allocate ksoc_tconn_t from the slab cache memory */
- tconn = (ks_tconn_t *)cfs_mem_cache_alloc(
- ks_data.ksnd_tconn_slab, CFS_ALLOC_ZERO);
+ tconn = (ks_tconn_t *)kmem_cache_alloc(
+ ks_data.ksnd_tconn_slab, __GFP_ZERO);
if (tconn) {
spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
- cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
+ kmem_cache_free(ks_data.ksnd_tconn_slab, tconn);
KsPrint((3, "ks_free_tconn: tconn %p is freed.\n", tconn));
}
length = KsQueryMdlsSize(mdl);
/* we need allocate the ks_tx_t structure from memory pool. */
- context = cfs_alloc(sizeof(ks_tdi_tx_t), 0);
+ context = kmalloc(sizeof(ks_tdi_tx_t), 0);
if (!context) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto errorout;
if (context) {
ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
context->Magic = 'CDAB';
- cfs_free(context);
+ kfree(context);
}
/* here need free the Irp. */
CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
- ks_data.ksnd_tconn_slab = cfs_mem_cache_create(
- "tcon", sizeof(ks_tconn_t) , 0, 0);
+ ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
+ 0, 0, NULL);
if (!ks_data.ksnd_tconn_slab) {
rc = -ENOMEM;
spin_lock_init(&ks_data.ksnd_tsdu_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
- ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
- "tsdu", ks_data.ksnd_tsdu_size, 0, 0);
+ ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
+ 0, 0, NULL);
if (!ks_data.ksnd_tsdu_slab) {
rc = -ENOMEM;
if (ks_data.ksnd_engine_nums < 4) {
ks_data.ksnd_engine_nums = 4;
}
- ks_data.ksnd_engine_mgr = cfs_alloc(sizeof(ks_engine_mgr_t) *
- ks_data.ksnd_engine_nums,CFS_ALLOC_ZERO);
+ ks_data.ksnd_engine_mgr = kmalloc(sizeof(ks_engine_mgr_t) *
+ ks_data.ksnd_engine_nums, __GFP_ZERO);
if (ks_data.ksnd_engine_mgr == NULL) {
rc = -ENOMEM;
goto errorout;
/* do cleanup in case we get failures */
if (rc < 0) {
if (ks_data.ksnd_tconn_slab) {
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
}
}
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
/* it's safe to delete the tconn slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
- cfs_mem_cache_free(
+ kmem_cache_free(
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
- cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
+kmem_cache_destroy(ks_data.ksnd_tsdu_slab);
ks_data.ksnd_tsdu_slab = NULL;
/* good! it's smooth to do the cleaning up...*/
spin_lock(&ks_data.ksnd_addrs_lock);
- *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
+ *names = kmalloc(sizeof(char *) * ks_data.ksnd_naddrs, __GFP_ZERO);
if (*names == NULL) {
goto errorout;
}
void libcfs_ipif_free_enumeration(char **names, int n)
{
if (names) {
- cfs_free(names);
+ kfree(names);
}
}
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
cfs_trace_data[i] =
- cfs_alloc(sizeof(union cfs_trace_data_union) * \
- CFS_NR_CPUS, CFS_ALLOC_KERNEL);
+ kmalloc(sizeof(union cfs_trace_data_union) * \
+ CFS_NR_CPUS, GFP_KERNEL);
if (cfs_trace_data[i] == NULL)
goto out;
}
for (i = 0; i < cfs_num_possible_cpus(); i++)
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
cfs_trace_console_buffers[i][j] =
- cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
- CFS_ALLOC_KERNEL);
+ kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ GFP_KERNEL);
if (cfs_trace_console_buffers[i][j] == NULL)
goto out;
for (i = 0; i < cfs_num_possible_cpus(); i++) {
for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
if (cfs_trace_console_buffers[i][j] != NULL) {
- cfs_free(cfs_trace_console_buffers[i][j]);
+ kfree(cfs_trace_console_buffers[i][j]);
cfs_trace_console_buffers[i][j] = NULL;
}
}
}
for (i = 0; cfs_trace_data[i] != NULL; i++) {
- cfs_free(cfs_trace_data[i]);
+ kfree(cfs_trace_data[i]);
cfs_trace_data[i] = NULL;
}
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
+ int total_mb = (num_physpages >> (20 - PAGE_CACHE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
{
LPVOID page;
- page = VirtualAlloc(NULL, CFS_PAGE_SIZE << factor,
+ page = VirtualAlloc(NULL, PAGE_CACHE_SIZE << factor,
MEM_COMMIT, PAGE_READWRITE);
return page;
}
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
- new = cfs_alloc(sizeof(struct idr_layer), CFS_ALLOC_ZERO);
+ new = kmalloc(sizeof(struct idr_layer), __GFP_ZERO);
if(new == NULL)
return (0);
free_layer(idp, new);
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
- cfs_free(p);
+ kfree(p);
}
return 0;
}
struct idr_context *cfs_idr_init()
{
struct idr_context * idp = NULL;
- idp = cfs_alloc(sizeof(struct idr_context), 0);
+ idp = kmalloc(sizeof(struct idr_context), 0);
if (idp) {
memset(idp, 0, sizeof(struct idr_context));
}
void cfs_idr_exit(struct idr_context *idp)
{
if (idp) {
- cfs_free(idp);
+ kfree(idp);
}
}
/*
* XXX Liang:
*
- * Temporary fix, because lnet_me_free()->cfs_free->FREE() can be blocked in xnu,
+ * Temporary fix, because lnet_me_free()->kfree->FREE() can be blocked in xnu,
* at then same time we've taken LNET_LOCK(), which is a spinlock.
* by using LNET_USE_LIB_FREELIST, we can avoid calling of FREE().
*
* A page-based fragment of a MD.
*/
typedef struct {
- /** Pointer to the page where the fragment resides */
- cfs_page_t *kiov_page;
- /** Length in bytes of the fragment */
- unsigned int kiov_len;
- /**
- * Starting offset of the fragment within the page. Note that the
- * end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= CFS_PAGE_SIZE.
- */
- unsigned int kiov_offset;
+ /** Pointer to the page where the fragment resides */
+ struct page *kiov_page;
+ /** Length in bytes of the fragment */
+ unsigned int kiov_len;
+ /**
+ * Starting offset of the fragment within the page. Note that the
+ * end of the fragment must not pass the end of the page; i.e.,
+ * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ */
+ unsigned int kiov_offset;
} lnet_kiov_t;
/** @} lnet_md */
for (i = 0; i < npages; i++) {
if (p->ibp_pages[i] != NULL)
- cfs_free_page(p->ibp_pages[i]);
+ __free_page(p->ibp_pages[i]);
}
LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
for (i = 0; i < npages; i++) {
p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- CFS_ALLOC_IO);
+ __GFP_IO);
if (p->ibp_pages[i] == NULL) {
CERROR("Can't allocate page %d of %d\n", i, npages);
kiblnd_free_pages(p);
LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
if (kptllnd_data.kptl_rx_cache != NULL)
- cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
+ kmem_cache_destroy(kptllnd_data.kptl_rx_cache);
if (kptllnd_data.kptl_peers != NULL)
LIBCFS_FREE(kptllnd_data.kptl_peers,
kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
kptllnd_data.kptl_rx_cache =
- cfs_mem_cache_create("ptllnd_rx",
+ kmem_cache_create("ptllnd_rx",
sizeof(kptl_rx_t) +
*kptllnd_tunables.kptl_max_msg_size,
0, /* offset */
cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */
kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */
- cfs_mem_cache_t* kptl_rx_cache; /* rx descripter cache */
+ struct kmem_cache *kptl_rx_cache; /* rx descripter cache */
cfs_atomic_t kptl_ntx; /* # tx descs allocated */
spinlock_t kptl_tx_lock; /* serialise idle tx list*/
return NULL;
}
- rx = cfs_mem_cache_alloc(kptllnd_data.kptl_rx_cache, CFS_ALLOC_ATOMIC);
+ rx = kmem_cache_alloc(kptllnd_data.kptl_rx_cache, GFP_ATOMIC);
if (rx == NULL) {
CERROR("Failed to allocate rx\n");
return NULL;
kptllnd_peer_decref(peer);
}
- cfs_mem_cache_free(kptllnd_data.kptl_rx_cache, rx);
+ kmem_cache_free(kptllnd_data.kptl_rx_cache, rx);
}
void
int i;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
*/
rc = -sock_send(sock, &msg, MSG_DONTWAIT, &sndlen);
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc == 0)
rc = sndlen;
return rc;
/* NB we can't trust socket ops to either consume our iovs
* or leave them alone. */
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + \
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + \
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
rc = -sock_receive(C2B_SOCK(conn->ksnc_sock), &msg, MSG_DONTWAIT, &rcvlen);
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc == 0)
rc = rcvlen;
return (rc);
CFS_DECL_NET_DATA;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
CFS_NET_EX;
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc != 0) {
if (suio.uio_resid != nob &&\
CFS_DECL_NET_DATA;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
nob += scratchiov[i].iov_len = kiov[i].kiov_len;
}
LASSERT (nob <= conn->ksnc_rx_nob_wanted);
CFS_NET_EX;
for (i = 0; i < niov; i++)
- cfs_kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].kiov_page);
if (rc){
if (ruio.uio_resid != nob && \
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1))
+ (kiov[i].kiov_offset + kiov[i].kiov_len !=
+ PAGE_CACHE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
for (i = 0; i < n_ids; i++) {
tmpid.pid = info->pi_pid;
tmpid.nid = info->pi_ni[i].ns_nid;
-#ifdef __KERNEL__
- if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+ if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
goto out_1;
-#else
- ids[i] = tmpid;
-#endif
}
rc = info->pi_nnis;
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
+ lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
siov->kiov_len - soffset);
this_nob = MIN(this_nob, nob);
- if (daddr == NULL)
- daddr = ((char *)cfs_kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
- if (saddr == NULL)
- saddr = ((char *)cfs_kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
-
- /* Vanishing risk of kmap deadlock when mapping 2 pages.
- * However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs */
-
- memcpy (daddr, saddr, this_nob);
- nob -= this_nob;
-
- if (diov->kiov_len > doffset + this_nob) {
- daddr += this_nob;
- doffset += this_nob;
- } else {
- cfs_kunmap(diov->kiov_page);
- daddr = NULL;
- diov++;
- ndiov--;
- doffset = 0;
- }
+ if (daddr == NULL)
+ daddr = ((char *)kmap(diov->kiov_page)) +
+ diov->kiov_offset + doffset;
+ if (saddr == NULL)
+ saddr = ((char *)kmap(siov->kiov_page)) +
+ siov->kiov_offset + soffset;
+
+ /* Vanishing risk of kmap deadlock when mapping 2 pages.
+ * However in practice at least one of the kiovs will be mapped
+ * kernel pages and the map/unmap will be NOOPs */
+
+ memcpy (daddr, saddr, this_nob);
+ nob -= this_nob;
+
+ if (diov->kiov_len > doffset + this_nob) {
+ daddr += this_nob;
+ doffset += this_nob;
+ } else {
+ kunmap(diov->kiov_page);
+ daddr = NULL;
+ diov++;
+ ndiov--;
+ doffset = 0;
+ }
- if (siov->kiov_len > soffset + this_nob) {
- saddr += this_nob;
- soffset += this_nob;
- } else {
- cfs_kunmap(siov->kiov_page);
- saddr = NULL;
- siov++;
- nsiov--;
- soffset = 0;
- }
- } while (nob > 0);
+ if (siov->kiov_len > soffset + this_nob) {
+ saddr += this_nob;
+ soffset += this_nob;
+ } else {
+ kunmap(siov->kiov_page);
+ saddr = NULL;
+ siov++;
+ nsiov--;
+ soffset = 0;
+ }
+ } while (nob > 0);
- if (daddr != NULL)
- cfs_kunmap(diov->kiov_page);
- if (saddr != NULL)
- cfs_kunmap(siov->kiov_page);
+ if (daddr != NULL)
+ kunmap(diov->kiov_page);
+ if (saddr != NULL)
+ kunmap(siov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
+ addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
addr += this_nob;
kiovoffset += this_nob;
} else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- } while (nob > 0);
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2iov);
this_nob = MIN(this_nob, nob);
if (addr == NULL)
- addr = ((char *)cfs_kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
+ addr = ((char *)kmap(kiov->kiov_page)) +
+ kiov->kiov_offset + kiovoffset;
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
- nob -= this_nob;
+ memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ nob -= this_nob;
- if (kiov->kiov_len > kiovoffset + this_nob) {
- addr += this_nob;
- kiovoffset += this_nob;
- } else {
- cfs_kunmap(kiov->kiov_page);
- addr = NULL;
- kiov++;
- nkiov--;
- kiovoffset = 0;
- }
+ if (kiov->kiov_len > kiovoffset + this_nob) {
+ addr += this_nob;
+ kiovoffset += this_nob;
+ } else {
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
- if (iov->iov_len > iovoffset + this_nob) {
- iovoffset += this_nob;
- } else {
- iov++;
- niov--;
- iovoffset = 0;
- }
- } while (nob > 0);
+ if (iov->iov_len > iovoffset + this_nob) {
+ iovoffset += this_nob;
+ } else {
+ iov++;
+ niov--;
+ iovoffset = 0;
+ }
+ } while (nob > 0);
- if (addr != NULL)
- cfs_kunmap(kiov->kiov_page);
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_iov2kiov);
dst->kiov_page = src->kiov_page;
dst->kiov_offset = src->kiov_offset + offset;
- if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
- return (niov);
- }
+ if (len <= frag_len) {
+ dst->kiov_len = len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ return niov;
+ }
- dst->kiov_len = frag_len;
- LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
+ dst->kiov_len = frag_len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
len -= frag_len;
dst++;
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
libcfs_id2str(target));
return -ENOMEM;
}
- msg->msg_vmflush = !!cfs_memory_pressure_get();
+ msg->msg_vmflush = !!memory_pressure_get();
cpt = lnet_cpt_of_cookie(mdh.cookie);
lnet_res_lock(cpt);
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
while (--npages >= 0)
- cfs_free_page(rb->rb_kiov[npages].kiov_page);
+ __free_page(rb->rb_kiov[npages].kiov_page);
LIBCFS_FREE(rb, sz);
}
for (i = 0; i < npages; i++) {
page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
- CFS_ALLOC_ZERO | CFS_ALLOC_STD);
+ __GFP_ZERO | GFP_IOFS);
if (page == NULL) {
while (--i >= 0)
- cfs_free_page(rb->rb_kiov[i].kiov_page);
+ __free_page(rb->rb_kiov[i].kiov_page);
LIBCFS_FREE(rb, sz);
return NULL;
}
- rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
+ rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}
lnet_rtrpools_alloc(int im_a_router)
{
lnet_rtrbufpool_t *rtrp;
- int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ int large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
int small_pages = 1;
int nrb_tiny;
int nrb_small;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else {
off += 1;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else {
off += 1;
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (cfs_copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos += 1;
npg = breq->blk_npg;
/* NB: this is not going to work for variable page size,
* but we have to keep it for compatibility */
- len = npg * CFS_PAGE_SIZE;
+ len = npg * PAGE_CACHE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
}
void
-brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_fill_page (struct page *pg, int pattern, __u64 magic)
{
- char *addr = cfs_page_address(pg);
+ char *addr = page_address(pg);
int i;
LASSERT (addr != NULL);
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += CFS_PAGE_SIZE - BRW_MSIZE;
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++)
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
}
int
-brw_check_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_check_page (struct page *pg, int pattern, __u64 magic)
{
- char *addr = cfs_page_address(pg);
+ char *addr = page_address(pg);
__u64 data = 0; /* make compiler happy */
int i;
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
- addr += CFS_PAGE_SIZE - BRW_MSIZE;
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
data = *((__u64 *) addr);
if (data != magic) goto bad_data;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++) {
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *) addr) + i);
if (data != magic) goto bad_data;
}
brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
- cfs_page_t *pg;
+ struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
#ifdef __KERNEL__
brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
{
int i;
- cfs_page_t *pg;
+ struct page *pg;
for (i = 0; i < bk->bk_niov; i++) {
#ifdef __KERNEL__
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
- len = npg * CFS_PAGE_SIZE;
+ len = npg * PAGE_CACHE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
- npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
reply->brw_status = EINVAL;
return 0;
}
- npg = reqst->brw_len >> CFS_PAGE_SHIFT;
+ npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
} else {
- npg = (reqst->brw_len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
+ if (copy_from_user(name, args->lstio_ses_namep,
+ args->lstio_ses_nmlen)) {
LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_dbg_namep,
+ if (copy_from_user(name, args->lstio_dbg_namep,
args->lstio_dbg_nmlen)) {
LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_grp_namep,
+ if (copy_from_user(name, args->lstio_grp_namep,
args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
if (rc == 0 &&
- cfs_copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
+ copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
return -EINVAL;
}
args->lstio_grp_ndentp == NULL) /* # of node entry */
return -EINVAL;
- if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp,
- sizeof(ndent)) ||
- cfs_copy_from_user(&index, args->lstio_grp_idxp,
- sizeof(index)))
- return -EFAULT;
+ if (copy_from_user(&ndent, args->lstio_grp_ndentp,
+ sizeof(ndent)) ||
+ copy_from_user(&index, args->lstio_grp_idxp,
+ sizeof(index)))
+ return -EFAULT;
- if (ndent <= 0 || index < 0)
- return -EINVAL;
- }
+ if (ndent <= 0 || index < 0)
+ return -EINVAL;
+ }
- LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
if (rc != 0)
return rc;
- if (args->lstio_grp_dentsp != NULL &&
- (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
- cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
+ if (args->lstio_grp_dentsp != NULL &&
+ (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
- return 0;
+ return 0;
}
int
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
- LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- return -EFAULT;
- }
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
name[args->lstio_bat_nmlen] = 0;
args->lstio_bat_ndentp == NULL) /* # of node entry */
return -EINVAL;
- if (cfs_copy_from_user(&index, args->lstio_bat_idxp,
+ if (copy_from_user(&index, args->lstio_bat_idxp,
sizeof(index)) ||
- cfs_copy_from_user(&ndent, args->lstio_bat_ndentp,
+ copy_from_user(&ndent, args->lstio_bat_ndentp,
sizeof(ndent)))
return -EFAULT;
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name,
- args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
if (rc != 0)
return rc;
- if (args->lstio_bat_dentsp != NULL &&
- (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
- cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
- rc = -EFAULT;
+ if (args->lstio_bat_dentsp != NULL &&
+ (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
- return rc;
+ return rc;
}
int
if (name == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(name, args->lstio_sta_namep,
+ if (copy_from_user(name, args->lstio_sta_namep,
args->lstio_sta_nmlen)) {
LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
return -EFAULT;
/* have parameter, check if parameter length is valid */
if (args->lstio_tes_param != NULL &&
(args->lstio_tes_param_len <= 0 ||
- args->lstio_tes_param_len > CFS_PAGE_SIZE - sizeof(lstcon_test_t)))
+ args->lstio_tes_param_len >
+ PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
goto out;
}
- rc = -EFAULT;
- if (cfs_copy_from_user(name,
- args->lstio_tes_bat_name,
- args->lstio_tes_bat_nmlen) ||
- cfs_copy_from_user(srcgrp,
- args->lstio_tes_sgrp_name,
- args->lstio_tes_sgrp_nmlen) ||
- cfs_copy_from_user(dstgrp,
- args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
- cfs_copy_from_user(param, args->lstio_tes_param,
- args->lstio_tes_param_len))
- goto out;
+ rc = -EFAULT;
+ if (copy_from_user(name, args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ copy_from_user(srcgrp, args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ copy_from_user(dstgrp, args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
+ copy_from_user(param, args->lstio_tes_param,
+ args->lstio_tes_param_len))
+ goto out;
rc = lstcon_test_add(name,
args->lstio_tes_type,
&ret, args->lstio_tes_resultp);
if (ret != 0)
- rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret,
+ rc = (copy_to_user(args->lstio_tes_retp, &ret,
sizeof(ret))) ? -EFAULT : 0;
out:
if (name != NULL)
int
lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
{
- char *buf;
- int opc = data->ioc_u32[0];
- int rc;
+ char *buf;
+ int opc = data->ioc_u32[0];
+ int rc;
- if (cmd != IOC_LIBCFS_LNETST)
- return -EINVAL;
+ if (cmd != IOC_LIBCFS_LNETST)
+ return -EINVAL;
- if (data->ioc_plen1 > CFS_PAGE_SIZE)
- return -EINVAL;
+ if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ return -EINVAL;
- LIBCFS_ALLOC(buf, data->ioc_plen1);
- if (buf == NULL)
- return -ENOMEM;
+ LIBCFS_ALLOC(buf, data->ioc_plen1);
+ if (buf == NULL)
+ return -ENOMEM;
- /* copy in parameter */
- if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
- LIBCFS_FREE(buf, data->ioc_plen1);
- return -EFAULT;
- }
+ /* copy in parameter */
+ if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+ LIBCFS_FREE(buf, data->ioc_plen1);
+ return -EFAULT;
+ }
mutex_lock(&console_session.ses_mutex);
rc = -EINVAL;
}
- if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(lstcon_trans_stat_t)))
- rc = -EFAULT;
+ if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+ sizeof(lstcon_trans_stat_t)))
+ rc = -EFAULT;
out:
mutex_unlock(&console_session.ses_mutex);
- LIBCFS_FREE(buf, data->ioc_plen1);
+ LIBCFS_FREE(buf, data->ioc_plen1);
- return rc;
+ return rc;
}
EXPORT_SYMBOL(lstcon_ioctl_entry);
if (bulk->bk_iovs[i].kiov_page == NULL)
continue;
- cfs_free_page(bulk->bk_iovs[i].kiov_page);
+ __free_page(bulk->bk_iovs[i].kiov_page);
}
srpc_client_rpc_decref(crpc->crp_rpc);
cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
lstcon_rpc_t, crp_link) {
- if (cfs_copy_from_user(&tmp, next,
+ if (copy_from_user(&tmp, next,
sizeof(cfs_list_t)))
return -EFAULT;
(cfs_time_t)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
- if (cfs_copy_to_user(&ent->rpe_peer,
- &nd->nd_id, sizeof(lnet_process_id_t)) ||
- cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- cfs_copy_to_user(&ent->rpe_state,
- &nd->nd_state, sizeof(nd->nd_state)) ||
- cfs_copy_to_user(&ent->rpe_rpc_errno, &error,
- sizeof(error)))
- return -EFAULT;
+ if (copy_to_user(&ent->rpe_peer,
+ &nd->nd_id, sizeof(lnet_process_id_t)) ||
+ copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+ copy_to_user(&ent->rpe_state,
+ &nd->nd_state, sizeof(nd->nd_state)) ||
+ copy_to_user(&ent->rpe_rpc_errno, &error,
+ sizeof(error)))
+ return -EFAULT;
- if (error != 0)
- continue;
+ if (error != 0)
+ continue;
- /* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+ /* RPC is done */
+ rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
- if (cfs_copy_to_user(&ent->rpe_sid,
- &rep->sid, sizeof(lst_sid_t)) ||
- cfs_copy_to_user(&ent->rpe_fwk_errno,
- &rep->status, sizeof(rep->status)))
- return -EFAULT;
+ if (copy_to_user(&ent->rpe_sid,
+ &rep->sid, sizeof(lst_sid_t)) ||
+ copy_to_user(&ent->rpe_fwk_errno,
+ &rep->status, sizeof(rep->status)))
+ return -EFAULT;
- if (readent == NULL)
- continue;
+ if (readent == NULL)
+ continue;
- if ((error = readent(trans->tas_opc, msg, ent)) != 0)
- return error;
- }
+ error = readent(trans->tas_opc, msg, ent);
+ if (error != 0)
+ return error;
+ }
- return 0;
+ return 0;
}
void
LASSERT (i < nkiov);
- pid = (lnet_process_id_packed_t *)cfs_page_address(kiov[i].kiov_page);
+ pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
return &pid[idx % SFW_ID_PER_PAGE];
}
{
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
- brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + CFS_PAGE_SIZE - 1) / CFS_PAGE_SIZE;
- brq->blk_flags = param->blk_flags;
+ brq->blk_opc = param->blk_opc;
+ brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
+ PAGE_CACHE_SIZE;
+ brq->blk_flags = param->blk_flags;
- return 0;
+ return 0;
}
int
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
- npg * CFS_PAGE_SIZE :
+ npg * PAGE_CACHE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
LASSERT(nob > 0);
len = (feats & LST_FEAT_BULK_LEN) == 0 ?
- CFS_PAGE_SIZE : min_t(int, nob, CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;
bulk->bk_iovs[i].kiov_len = len;
bulk->bk_iovs[i].kiov_page =
- cfs_alloc_page(CFS_ALLOC_STD);
+ alloc_page(GFP_IOFS);
if (bulk->bk_iovs[i].kiov_page == NULL) {
lstcon_rpc_put(*crpc);
case LST_TRANS_SESQRY:
rep = &msg->msg_body.dbg_reply;
- if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+ if (copy_to_user(&ent_up->rpe_priv[0],
&rep->dbg_timeout, sizeof(int)) ||
- cfs_copy_to_user(&ent_up->rpe_payload[0],
+ copy_to_user(&ent_up->rpe_payload[0],
&rep->dbg_name, LST_NAME_SIZE))
return -EFAULT;
}
for (i = 0 ; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
}
for (i = 0; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
goto error;
}
cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
lstcon_group_t, grp_link) {
if (index-- == 0) {
- return cfs_copy_to_user(name_up, grp->grp_name, len) ?
+ return copy_to_user(name_up, grp->grp_name, len) ?
-EFAULT : 0;
}
}
break;
nd = ndl->ndl_node;
- if (cfs_copy_to_user(&dents_up[count].nde_id,
+ if (copy_to_user(&dents_up[count].nde_id,
&nd->nd_id, sizeof(nd->nd_id)) ||
- cfs_copy_to_user(&dents_up[count].nde_state,
+ copy_to_user(&dents_up[count].nde_state,
&nd->nd_state, sizeof(nd->nd_state)))
return -EFAULT;
lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
- rc = cfs_copy_to_user(gents_p, gentp,
+ rc = copy_to_user(gents_p, gentp,
sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list,
lstcon_batch_t, bat_link) {
if (index-- == 0) {
- return cfs_copy_to_user(name_up,bat->bat_name, len) ?
+ return copy_to_user(name_up, bat->bat_name, len) ?
-EFAULT: 0;
}
}
cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
- rc = cfs_copy_to_user(ent_up, entp,
+ rc = copy_to_user(ent_up, entp,
sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
transop == LST_TRANS_TSBSRVQRY);
/* positive errno, framework error code */
- if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+ if (copy_to_user(&ent_up->rpe_priv[0],
&rep->bar_active, sizeof(rep->bar_active)))
return -EFAULT;
srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
- if (cfs_copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
- cfs_copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
- cfs_copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
+ if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
+ copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
+ copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
return -EFAULT;
return 0;
}
for (i = 0 ; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
}
for (i = 0; i < count; i++) {
- if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
rc = -EFAULT;
break;
}
return rc;
}
- if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+ if (copy_to_user(sid_up, &console_session.ses_id,
sizeof(lst_sid_t)) == 0)
return rc;
lstcon_ndlink_t, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
- if (cfs_copy_to_user(sid_up, &console_session.ses_id,
+ if (copy_to_user(sid_up, &console_session.ses_id,
sizeof(lst_sid_t)) ||
- cfs_copy_to_user(key_up, &console_session.ses_key,
+ copy_to_user(key_up, &console_session.ses_key,
sizeof(*key_up)) ||
- cfs_copy_to_user(featp, &console_session.ses_features,
+ copy_to_user(featp, &console_session.ses_features,
sizeof(*featp)) ||
- cfs_copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
- cfs_copy_to_user(name_up, console_session.ses_name, len))
+ copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
+ copy_to_user(name_up, console_session.ses_name, len))
rc = -EFAULT;
LIBCFS_FREE(entp, sizeof(*entp));
int j;
#ifdef __KERNEL__
- dests = cfs_page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT (dests != NULL); /* my pages are within KVM always */
+ dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+ LASSERT (dests != NULL); /* my pages are within KVM always */
#else
- dests = cfs_page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]);
+ dests = page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]);
#endif
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
int len;
if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- len = npg * CFS_PAGE_SIZE;
+ len = npg * PAGE_CACHE_SIZE;
} else {
len = sizeof(lnet_process_id_packed_t) *
}
int
-srpc_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i, int nob)
+srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
- nob = min(nob, (int)CFS_PAGE_SIZE);
+ nob = min(nob, (int)PAGE_CACHE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
bk->bk_pages[i] = pg;
bk->bk_iovs[i].iov_len = nob;
- bk->bk_iovs[i].iov_base = cfs_page_address(pg);
+ bk->bk_iovs[i].iov_base = page_address(pg);
#endif
return nob;
}
srpc_free_bulk (srpc_bulk_t *bk)
{
int i;
- cfs_page_t *pg;
+ struct page *pg;
LASSERT (bk != NULL);
#ifndef __KERNEL__
#endif
if (pg == NULL) break;
- cfs_free_page(pg);
+ __free_page(pg);
}
#ifndef __KERNEL__
- LIBCFS_FREE(bk->bk_pages, sizeof(cfs_page_t *) * bk->bk_niov);
+ LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov);
#endif
LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
return;
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
srpc_bulk_t *bk;
- cfs_page_t **pages;
+ struct page **pages;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
bk->bk_niov = bulk_npg;
#ifndef __KERNEL__
LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
- sizeof(cfs_page_t *) * bulk_npg);
+ sizeof(struct page *) * bulk_npg);
if (pages == NULL) {
LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
CERROR("Can't allocate page array for %d pages\n", bulk_npg);
return NULL;
}
- memset(pages, 0, sizeof(cfs_page_t *) * bulk_npg);
+ memset(pages, 0, sizeof(struct page *) * bulk_npg);
bk->bk_pages = pages;
#else
UNUSED(pages);
#endif
for (i = 0; i < bulk_npg; i++) {
- cfs_page_t *pg;
+ struct page *pg;
int nob;
- pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, CFS_ALLOC_STD);
+ pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS);
if (pg == NULL) {
CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
srpc_free_bulk(bk);
#ifdef __KERNEL__
lnet_kiov_t bk_iovs[0];
#else
- cfs_page_t **bk_pages;
+ struct page **bk_pages;
lnet_md_iovec_t bk_iovs[0];
#endif
} srpc_bulk_t; /* bulk descriptor */
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
+/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
* the end of pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
void sfw_unpack_message(srpc_msg_t *msg);
void sfw_free_pages(srpc_server_rpc_t *rpc);
-void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
+void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
int sink);
int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
else if (*end == 'm' || *end == 'M')
bulk->blk_size *= 1024 * 1024;
- if (bulk->blk_size > CFS_PAGE_SIZE * LNET_MAX_IOV) {
+ if (bulk->blk_size > PAGE_CACHE_SIZE * LNET_MAX_IOV) {
fprintf(stderr, "Size exceed limitation: %d bytes\n",
bulk->blk_size);
return -1;
struct fld_cache_entry *fldt;
ENTRY;
- OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC);
+ OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
if (!fldt) {
OBD_FREE_PTR(f_new);
EXIT;
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
* corresponding radix tree at the corresponding logical offset.
*
* cl_page is associated with VM page of the hosting environment (struct
- * page in Linux kernel, for example), cfs_page_t. It is assumed, that this
+ * page in Linux kernel, for example), struct page. It is assumed, that this
* association is implemented by one of cl_page layers (top layer in the
* current design) that
*
* - translates state (page flag bits) and locking between lustre and
* environment.
*
- * The association between cl_page and cfs_page_t is immutable and
+ * The association between cl_page and struct page is immutable and
* established when cl_page is created.
*
* cl_page can be "owned" by a particular cl_io (see below), guaranteeing
* eviction of the page from the memory). Note, that in general cl_io
* cannot be identified with a particular thread, and page ownership is not
* exactly equal to the current thread holding a lock on the page. Layer
- * implementing association between cl_page and cfs_page_t has to implement
+ * implementing association between cl_page and struct page has to implement
* ownership on top of available synchronization mechanisms.
*
* While lustre client maintains the notion of an page ownership by io,
* - by doing a lookup in the cl_object radix tree, protected by the
* spin-lock;
*
- * - by starting from VM-locked cfs_page_t and following some
+ * - by starting from VM-locked struct page and following some
* hosting environment method (e.g., following ->private pointer in
* the case of Linux kernel), see cl_vmpage_page();
*
*
* Linux Kernel implementation.
*
- * Binding between cl_page and cfs_page_t (which is a typedef for
+ * Binding between cl_page and struct page (which is a typedef for
* struct page) is implemented in the vvp layer. cl_page is attached to the
* ->private pointer of the struct page, together with the setting of
* PG_private bit in page->flags, and acquiring additional reference on the
};
/**
- * Fields are protected by the lock on cfs_page_t, except for atomics and
+ * Fields are protected by the lock on struct page, except for atomics and
* immutables.
*
* \invariant Data type invariants are in cl_page_invariant(). Basically:
*/
struct cl_page_operations {
/**
- * cl_page<->cfs_page_t methods. Only one layer in the stack has to
+ * cl_page<->struct page methods. Only one layer in the stack has to
* implement these. Current code assumes that this functionality is
* provided by the topmost layer, see cl_page_disown0() as an example.
*/
/**
* \return the underlying VM page. Optional.
*/
- cfs_page_t *(*cpo_vmpage)(const struct lu_env *env,
+ struct page *(*cpo_vmpage)(const struct lu_env *env,
const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_page *pg);
-cfs_page_t *cl_page_vmpage (const struct lu_env *env,
+struct page *cl_page_vmpage (const struct lu_env *env,
struct cl_page *page);
-struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
*/
cfs_list_t cpg_pending_linkage;
/** VM page */
- cfs_page_t *cpg_page;
+ struct page *cpg_page;
};
static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
return container_of(slice, struct ccc_page, cpg_cl);
}
-struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage);
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
struct ccc_device {
struct cl_device cdv_cl;
const struct cl_object *obj, struct ost_lvb *lvb);
int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-cfs_page_t *ccc_page_vmpage(const struct lu_env *env,
+struct page *ccc_page_vmpage(const struct lu_env *env,
const struct cl_page_slice *slice);
int ccc_page_is_under_lock(const struct lu_env *env,
const struct cl_page_slice *slice, struct cl_io *io);
struct ccc_io *cl2ccc_io (const struct lu_env *env,
const struct cl_io_slice *slice);
struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-cfs_page_t *cl2vm_page (const struct cl_page_slice *slice);
+struct page *cl2vm_page (const struct cl_page_slice *slice);
struct inode *ccc_object_inode(const struct cl_object *obj);
struct ccc_object *cl_inode2ccc (struct inode *inode);
int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
struct obd_capa *capa);
-struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage);
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
int ccc_object_invariant(const struct cl_object *obj);
int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
void cl_inode_fini(struct inode *inode);
/* memory */
/* memory size: used for some client tunables */
-#define cfs_num_physpages (256 * 1024) /* 1GB */
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define num_physpages (256 * 1024) /* 1GB */
+#define NUM_CACHEPAGES num_physpages
/* VFS stuff */
{ \
type *value; \
\
- CLASSERT(CFS_PAGE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
\
OBD_ALLOC_PTR(value); \
if (value == NULL) \
void lu_global_fini(void);
struct lu_kmem_descr {
- cfs_mem_cache_t **ckd_cache;
+ struct kmem_cache **ckd_cache;
const char *ckd_name;
const size_t ckd_size;
};
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than CFS_PAGE_SIZE because the client needs to
+ * It's different than PAGE_CACHE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server CFS_PAGE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-#define LU_PAGE_COUNT (1 << (CFS_PAGE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */
extern cfs_list_t capa_list[];
extern spinlock_t capa_lock;
extern int capa_count[];
-extern cfs_mem_cache_t *capa_cachep;
+extern struct kmem_cache *capa_cachep;
cfs_hlist_head_t *init_capa_hash(void);
void cleanup_capa_hash(cfs_hlist_head_t *hash);
#define ASSERT_MAX_SIZE_MB 60000ULL
#define ASSERT_PAGE_INDEX(index, OP) \
-do { if (index > ASSERT_MAX_SIZE_MB << (20 - CFS_PAGE_SHIFT)) { \
+do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \
CERROR("bad page index %lu > %llu\n", index, \
- ASSERT_MAX_SIZE_MB << (20 - CFS_PAGE_SHIFT)); \
+ ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \
libcfs_debug = ~0UL; \
OP; \
}} while(0)
/*
* This limit is arbitrary (131072 clients on x86), but it is convenient to use
- * 2^n * CFS_PAGE_SIZE * 8 for the number of bits that fit an order-n allocation.
+ * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
* If we need more than 131072 clients (order-2 allocation on x86) then this
* should become an array of single-page pointers that are allocated on demand.
*/
-#if (128 * 1024UL) > (CFS_PAGE_SIZE * 8)
+#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
#define LR_MAX_CLIENTS (128 * 1024UL)
#else
-#define LR_MAX_CLIENTS (CFS_PAGE_SIZE * 8)
+#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
#endif
/** COMPAT_146: this is an OST (temporary) */
#include <libcfs/libcfs.h>
-#define CFS_NGROUPS_PER_BLOCK ((int)(CFS_PAGE_SIZE / sizeof(gid_t)))
+#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_CACHE_SIZE / sizeof(gid_t)))
#define CFS_GROUP_AT(gi, i) \
((gi)->blocks[(i) / CFS_NGROUPS_PER_BLOCK][(i) % CFS_NGROUPS_PER_BLOCK])
int offset = 0;
ENTRY;
- err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+ err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if (err)
RETURN(err);
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+ err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
if (err) {
OBD_FREE_LARGE(*buf, hdr.ioc_len);
RETURN(err);
static inline int obd_ioctl_popdata(void *arg, void *data, int len)
{
- int err = cfs_copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
- return err;
+ int err = copy_to_user(arg, data, len);
+ if (err)
+ err = -EFAULT;
+ return err;
}
#endif
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
*/
/* depress threads factor for VM with small memory size */
#define OSS_THR_FACTOR min_t(int, 8, \
- CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT))
+ NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
#define OSS_NTHRS_BASE 64
#define OSS_NTHRS_MAX 512
* id number, so this _should_ be more than enough for the maximum number of
* CPTs on any system. If it does happen that this statement is incorrect,
* nrs_orr_genobjname() will inevitably yield a non-unique name and cause
- * cfs_mem_cache_create() to complain (on Linux), so the erroneous situation
+ * kmem_cache_create() to complain (on Linux), so the erroneous situation
* will hopefully not go unnoticed.
*/
#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
struct ptlrpc_nrs_resource od_res;
cfs_binheap_t *od_binheap;
cfs_hash_t *od_obj_hash;
- cfs_mem_cache_t *od_cache;
+ struct kmem_cache *od_cache;
/**
* Used when a new scheduling round commences, in order to synchronize
* all object or OST batches with the new round number.
__ptlrpc_free_bulk(bulk, 0);
}
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset, int len, int);
+ struct page *page, int pageoffset, int len, int);
static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset,
+ struct page *page, int pageoffset,
int len)
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
}
static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset,
+ struct page *page, int pageoffset,
int len)
{
__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
}
static inline int lov_lum_swab_if_needed(struct lov_user_md_v3 *lumv3,
- int *lmm_magic,
- struct lov_user_md *lum)
+ int *lmm_magic,
+ struct lov_user_md *lum)
{
- if (lum && cfs_copy_from_user(lumv3, lum,sizeof(struct lov_user_md_v1)))
- return -EFAULT;
-
- *lmm_magic = lumv3->lmm_magic;
-
- if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
- lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3);
- *lmm_magic = LOV_USER_MAGIC_V1;
- } else if (*lmm_magic == LOV_USER_MAGIC_V3) {
- if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3)))
- return -EFAULT;
- } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
- if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3)))
- return -EFAULT;
- lustre_swab_lov_user_md_v3(lumv3);
- *lmm_magic = LOV_USER_MAGIC_V3;
- } else if (*lmm_magic != LOV_USER_MAGIC_V1) {
- CDEBUG(D_IOCTL,
- "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
- *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
- return -EINVAL;
- }
- return 0;
+ if (lum && copy_from_user(lumv3, lum, sizeof(struct lov_user_md_v1)))
+ return -EFAULT;
+
+ *lmm_magic = lumv3->lmm_magic;
+
+ if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V1;
+ } else if (*lmm_magic == LOV_USER_MAGIC_V3) {
+ if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
+ if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ lustre_swab_lov_user_md_v3(lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V3;
+ } else if (*lmm_magic != LOV_USER_MAGIC_V1) {
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
+ return -EINVAL;
+ }
+ return 0;
}
void lov_stripe_lock(struct lov_stripe_md *md);
};
struct brw_page {
- obd_off off;
- cfs_page_t *pg;
- int count;
- obd_flag flag;
+ obd_off off;
+ struct page *pg;
+ int count;
+ obd_flag flag;
};
/* Individual type definitions */
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(CFS_PAGE_SIZE, OST block size) */
+ * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
int cl_chunkbits;
int cl_chunk;
int cl_extent_tax; /* extent overhead, by bytes */
__u32 lnb_page_offset;
__u32 len;
__u32 flags;
- cfs_page_t *page;
+ struct page *page;
struct dentry *dentry;
int lnb_grant_used;
int rc;
static inline int cli_brw_size(struct obd_device *obd)
{
LASSERT(obd != NULL);
- return obd->u.cli.cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
+ return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
}
#endif /* __OBD_H */
extern void obd_cleanup_caches(void);
/* support routines */
-extern cfs_mem_cache_t *obdo_cachep;
+extern struct kmem_cache *obdo_cachep;
#define OBDO_ALLOC(ptr) \
do { \
- OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, CFS_ALLOC_IO); \
+ OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, __GFP_IO); \
} while(0)
#define OBDO_FREE(ptr) \
#define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \
do { \
(ptr) = (cptab) == NULL ? \
- cfs_alloc(size, flags) : \
- cfs_cpt_malloc(cptab, cpt, size, flags); \
+ kmalloc(size, flags | __GFP_ZERO) : \
+ cfs_cpt_malloc(cptab, cpt, size, flags | __GFP_ZERO); \
if (unlikely((ptr) == NULL)) { \
CERROR("kmalloc of '" #ptr "' (%d bytes) failed at %s:%d\n", \
(int)(size), __FILE__, __LINE__); \
} else { \
- memset(ptr, 0, size); \
CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p\n", \
(int)(size), ptr); \
} \
#else /* this version is for the kernel and liblustre */
#define OBD_FREE_RTN0(ptr) \
({ \
- cfs_free(ptr); \
+ kfree(ptr); \
(ptr) = NULL; \
0; \
})
#define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \
do { \
(ptr) = (cptab) == NULL ? \
- cfs_alloc(size, flags) : \
- cfs_cpt_malloc(cptab, cpt, size, flags); \
+ kmalloc(size, flags | __GFP_ZERO) : \
+ cfs_cpt_malloc(cptab, cpt, size, flags | __GFP_ZERO); \
if (likely((ptr) != NULL && \
(!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \
!obd_alloc_fail(ptr, #ptr, "km", size, \
__FILE__, __LINE__) || \
OBD_FREE_RTN0(ptr)))){ \
- memset(ptr, 0, size); \
OBD_ALLOC_POST(ptr, size, "kmalloced"); \
} \
} while (0)
#define OBD_ALLOC_GFP(ptr, size, gfp_mask) \
__OBD_MALLOC_VERBOSE(ptr, NULL, 0, size, gfp_mask)
-#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, CFS_ALLOC_IO)
-#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, CFS_ALLOC_STD)
+#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, __GFP_IO)
+#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_IOFS)
#define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof *(ptr))
#define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof *(ptr))
__OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask)
#define OBD_CPT_ALLOC(ptr, cptab, cpt, size) \
- OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO)
+ OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
#define OBD_CPT_ALLOC_PTR(ptr, cptab, cpt) \
OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof *(ptr))
# define __OBD_VMALLOC_VEROBSE(ptr, cptab, cpt, size) \
do { \
(ptr) = cptab == NULL ? \
- cfs_alloc_large(size) : \
+ vmalloc(size) : \
cfs_cpt_vmalloc(cptab, cpt, size); \
if (unlikely((ptr) == NULL)) { \
CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n", \
* since vmalloc in Linux doesn't perform well on multi-cores system, calling
* vmalloc in critical path would hurt peformance badly. See LU-66.
*/
-#define OBD_ALLOC_BIG (4 * CFS_PAGE_SIZE)
+#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE)
#define OBD_ALLOC_LARGE(ptr, size) \
do { \
#endif
#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { memset(kmap(page), val, CFS_PAGE_SIZE); \
+#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \
kunmap(page); } while (0)
#else
#define POISON_PAGE(page, val) do { } while (0)
#define OBD_FREE(ptr, size) \
do { \
OBD_FREE_PRE(ptr, size, "kfreed"); \
- cfs_free(ptr); \
+ kfree(ptr); \
POISON_PTR(ptr); \
} while(0)
#define OBD_VFREE(ptr, size) \
do { \
OBD_FREE_PRE(ptr, size, "vfreed"); \
- cfs_free_large(ptr); \
+ vfree(ptr); \
POISON_PTR(ptr); \
} while (0)
* love to assert on that, but slab.c keeps kmem_cache_s all to itself. */
#define OBD_SLAB_FREE_RTN0(ptr, slab) \
({ \
- cfs_mem_cache_free((slab), (ptr)); \
+ kmem_cache_free((slab), (ptr)); \
(ptr) = NULL; \
0; \
})
#define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \
do { \
- LASSERT(ergo((type) != CFS_ALLOC_ATOMIC, !cfs_in_interrupt())); \
+ LASSERT(ergo((type) != GFP_ATOMIC, !cfs_in_interrupt())); \
(ptr) = (cptab) == NULL ? \
- cfs_mem_cache_alloc(slab, type) : \
- cfs_mem_cache_cpt_alloc(slab, cptab, cpt, type); \
+ kmem_cache_alloc(slab, type | __GFP_ZERO) : \
+ cfs_mem_cache_cpt_alloc(slab, cptab, cpt, type | __GFP_ZERO); \
if (likely((ptr) != NULL && \
(!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \
!obd_alloc_fail(ptr, #ptr, "slab-", size, \
__FILE__, __LINE__) || \
OBD_SLAB_FREE_RTN0(ptr, slab)))) { \
- memset(ptr, 0, size); \
OBD_ALLOC_POST(ptr, size, "slab-alloced"); \
} \
} while(0)
#define OBD_SLAB_FREE(ptr, slab, size) \
do { \
OBD_FREE_PRE(ptr, size, "slab-freed"); \
- cfs_mem_cache_free(slab, ptr); \
+ kmem_cache_free(slab, ptr); \
POISON_PTR(ptr); \
} while(0)
#define OBD_SLAB_ALLOC(ptr, slab, size) \
- OBD_SLAB_ALLOC_GFP(ptr, slab, size, CFS_ALLOC_IO)
+ OBD_SLAB_ALLOC_GFP(ptr, slab, size, __GFP_IO)
#define OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, size) \
- OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, CFS_ALLOC_IO)
+ OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, __GFP_IO)
#define OBD_SLAB_ALLOC_PTR(ptr, slab) \
OBD_SLAB_ALLOC(ptr, slab, sizeof *(ptr))
#define __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask) \
do { \
(ptr) = (cptab) == NULL ? \
- cfs_alloc_page(gfp_mask) : \
+ alloc_page(gfp_mask) : \
cfs_page_cpt_alloc(cptab, cpt, gfp_mask); \
if (unlikely((ptr) == NULL)) { \
CERROR("alloc_pages of '" #ptr "' %d page(s) / "LPU64" bytes "\
"failed\n", (int)1, \
- (__u64)(1 << CFS_PAGE_SHIFT)); \
+ (__u64)(1 << PAGE_CACHE_SHIFT)); \
CERROR(LPU64" total bytes and "LPU64" total pages " \
"("LPU64" bytes) allocated by Lustre, " \
"%d total bytes by LNET\n", \
obd_memory_sum(), \
- obd_pages_sum() << CFS_PAGE_SHIFT, \
+ obd_pages_sum() << PAGE_CACHE_SHIFT, \
obd_pages_sum(), \
cfs_atomic_read(&libcfs_kmemory)); \
} else { \
CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
LPU64" bytes at %p.\n", \
(int)1, \
- (__u64)(1 << CFS_PAGE_SHIFT), ptr); \
+ (__u64)(1 << PAGE_CACHE_SHIFT), ptr); \
} \
} while (0)
obd_pages_sub(0); \
CDEBUG(D_MALLOC, "free_pages '" #ptr "': %d page(s) / "LPU64" bytes " \
"at %p.\n", \
- (int)1, (__u64)(1 << CFS_PAGE_SHIFT), \
+ (int)1, (__u64)(1 << PAGE_CACHE_SHIFT), \
ptr); \
- cfs_free_page(ptr); \
+ __free_page(ptr); \
(ptr) = (void *)0xdeadbeef; \
} while (0)
* ccc_ prefix stands for "Common Client Code".
*/
-static cfs_mem_cache_t *ccc_lock_kmem;
-static cfs_mem_cache_t *ccc_object_kmem;
-static cfs_mem_cache_t *ccc_thread_kmem;
-static cfs_mem_cache_t *ccc_session_kmem;
-static cfs_mem_cache_t *ccc_req_kmem;
+static struct kmem_cache *ccc_lock_kmem;
+static struct kmem_cache *ccc_object_kmem;
+static struct kmem_cache *ccc_thread_kmem;
+static struct kmem_cache *ccc_session_kmem;
+static struct kmem_cache *ccc_req_kmem;
static struct lu_kmem_descr ccc_caches[] = {
{
{
struct ccc_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct ccc_session *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO);
if (session == NULL)
session = ERR_PTR(-ENOMEM);
return session;
struct ccc_req *vrq;
int result;
- OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO);
if (vrq != NULL) {
cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
result = 0;
struct ccc_object *vob;
struct lu_object *obj;
- OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO);
if (vob != NULL) {
struct cl_object_header *hdr;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO);
if (clk != NULL) {
cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
result = 0;
*
*/
-cfs_page_t *ccc_page_vmpage(const struct lu_env *env,
+struct page *ccc_page_vmpage(const struct lu_env *env,
const struct cl_page_slice *slice)
{
return cl2vm_page(slice);
* kernel will check such case correctly.
* linux-2.6.18-128.1.1 miss to do that.
* --bug 17336 */
- loff_t size = cl_isize_read(inode);
- unsigned long cur_index = start >> CFS_PAGE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- (((size - 1) >> CFS_PAGE_SHIFT) < cur_index))
- *exceed = 1;
+ loff_t size = cl_isize_read(inode);
+ unsigned long cur_index = start >>
+ PAGE_CACHE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> PAGE_CACHE_SHIFT) <
+ cur_index))
+ *exceed = 1;
}
return result;
} else {
return container_of0(slice, struct ccc_req, crq_cl);
}
-cfs_page_t *cl2vm_page(const struct cl_page_slice *slice)
+struct page *cl2vm_page(const struct cl_page_slice *slice)
{
return cl2ccc_page(slice)->cpg_page;
}
* additional reference to the resulting page. This is an unsafe version of
* cl_vmpage_page() that can only be used under vmpage lock.
*/
-struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage)
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
{
KLASSERT(PageLocked(vmpage));
return (struct cl_page *)vmpage->private;
* the client requested. Also we need to make sure it's also server
* page size aligned otherwise a server page can be covered by two
* write locks. */
- mask = CFS_PAGE_SIZE;
+ mask = PAGE_CACHE_SIZE;
req_align = (req_end + 1) | req_start;
if (req_align != 0 && (req_align & (mask - 1)) == 0) {
while ((req_align & mask) == 0)
}
EXPORT_SYMBOL(ldlm_extent_shift_kms);
-cfs_mem_cache_t *ldlm_interval_slab;
+struct kmem_cache *ldlm_interval_slab;
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
{
struct ldlm_interval *node;
ENTRY;
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
if (node == NULL)
RETURN(NULL);
};
/* interval tree, for LDLM_EXTENT. */
-extern cfs_mem_cache_t *ldlm_interval_slab; /* slab cache for ldlm_interval */
+extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
extern struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
cli->cl_dirty = 0;
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
- cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > cfs_num_physpages / 8)
- cli->cl_dirty_max = cfs_num_physpages << (CFS_PAGE_SHIFT - 3);
+ cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
+ if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8)
+ cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3);
CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
* 1MB until we know what the performance looks like.
* In the future this should likely be increased. LU-1431 */
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
- LNET_MTU >> CFS_PAGE_SHIFT);
-
- if (!strcmp(name, LUSTRE_MDC_NAME)) {
- cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) {
- cli->cl_max_rpcs_in_flight = 2;
- } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) {
- cli->cl_max_rpcs_in_flight = 3;
- } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) {
- cli->cl_max_rpcs_in_flight = 4;
- } else {
+ LNET_MTU >> PAGE_CACHE_SHIFT);
+
+ if (!strcmp(name, LUSTRE_MDC_NAME)) {
+ cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
+ } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 2;
+ } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 3;
+ } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 4;
+ } else {
if (osc_on_mdt(obddev->obd_name))
cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT;
else
}
EXPORT_SYMBOL(ldlm_it2str);
-extern cfs_mem_cache_t *ldlm_lock_slab;
+extern struct kmem_cache *ldlm_lock_slab;
#ifdef HAVE_SERVER_SUPPORT
static ldlm_processing_policy ldlm_processing_policy_table[] = {
if (resource == NULL)
LBUG();
- OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO);
if (lock == NULL)
RETURN(NULL);
* have to allocate the interval node early otherwise we can't regrant
* this lock in the future. - jay */
if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
lock_res_and_lock(lock);
if (local && lock->l_req_mode == lock->l_granted_mode) {
/* I can't check the type of lock here because the bitlock of lock
* is not held here, so do the allocation blindly. -jay */
- OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
- if (node == NULL)
- /* Actually, this causes LUSTRE_EDEADLK to be returned */
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+ if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */
RETURN(NULL);
LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
"CPU partitions ldlm threads should run on");
-extern cfs_mem_cache_t *ldlm_resource_slab;
-extern cfs_mem_cache_t *ldlm_lock_slab;
+extern struct kmem_cache *ldlm_resource_slab;
+extern struct kmem_cache *ldlm_lock_slab;
static struct mutex ldlm_ref_mutex;
static int ldlm_refcount;
lock = cfs_list_entry(expired->next, struct ldlm_lock,
l_pending_chain);
- if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
- (void *)lock >= LP_POISON) {
+ if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE &&
+ (void *)lock >= LP_POISON) {
spin_unlock_bh(&waiting_locks_spinlock);
- CERROR("free lock on elt list %p\n", lock);
- LBUG();
- }
- cfs_list_del_init(&lock->l_pending_chain);
- if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE &&
+ CERROR("free lock on elt list %p\n", lock);
+ LBUG();
+ }
+ cfs_list_del_init(&lock->l_pending_chain);
+ if ((void *)lock->l_export <
+ LP_POISON + PAGE_CACHE_SIZE &&
(void *)lock->l_export >= LP_POISON) {
CERROR("lock with free export on elt list %p\n",
lock->l_export);
init_completion(&blwi->blwi_comp);
CFS_INIT_LIST_HEAD(&blwi->blwi_head);
- if (cfs_memory_pressure_get())
+ if (memory_pressure_get())
blwi->blwi_mem_pressure = 1;
blwi->blwi_ns = ns;
ldlm_bl_thread_start(blp);
if (blwi->blwi_mem_pressure)
- cfs_memory_pressure_set();
+ memory_pressure_set();
if (blwi->blwi_count) {
int count;
blwi->blwi_lock);
}
if (blwi->blwi_mem_pressure)
- cfs_memory_pressure_clr();
+ memory_pressure_clr();
if (blwi->blwi_flags & LCF_ASYNC)
OBD_FREE(blwi, sizeof(*blwi));
mutex_init(&ldlm_ref_mutex);
mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
- ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
- sizeof(struct ldlm_resource), 0,
- CFS_SLAB_HWCACHE_ALIGN);
- if (ldlm_resource_slab == NULL)
- return -ENOMEM;
+ ldlm_resource_slab = kmem_cache_create("ldlm_resources",
+ sizeof(struct ldlm_resource), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (ldlm_resource_slab == NULL)
+ return -ENOMEM;
- ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks",
+ ldlm_lock_slab = kmem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0,
- CFS_SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU);
+ SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
if (ldlm_lock_slab == NULL) {
- cfs_mem_cache_destroy(ldlm_resource_slab);
+ kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
- ldlm_interval_slab = cfs_mem_cache_create("interval_node",
+ ldlm_interval_slab = kmem_cache_create("interval_node",
sizeof(struct ldlm_interval),
- 0, CFS_SLAB_HWCACHE_ALIGN);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (ldlm_interval_slab == NULL) {
- cfs_mem_cache_destroy(ldlm_resource_slab);
- cfs_mem_cache_destroy(ldlm_lock_slab);
+ kmem_cache_destroy(ldlm_resource_slab);
+ kmem_cache_destroy(ldlm_lock_slab);
return -ENOMEM;
}
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_exit(void)
{
- int rc;
- if (ldlm_refcount)
- CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
- rc = cfs_mem_cache_destroy(ldlm_resource_slab);
- LASSERTF(rc == 0, "couldn't free ldlm resource slab\n");
+ if (ldlm_refcount)
+ CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
+ kmem_cache_destroy(ldlm_resource_slab);
#ifdef __KERNEL__
- /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
- * synchronize_rcu() to wait a grace period elapsed, so that
- * ldlm_lock_free() get a chance to be called. */
- synchronize_rcu();
+ /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
+ * synchronize_rcu() to wait a grace period elapsed, so that
+ * ldlm_lock_free() get a chance to be called. */
+ synchronize_rcu();
#endif
- rc = cfs_mem_cache_destroy(ldlm_lock_slab);
- LASSERTF(rc == 0, "couldn't free ldlm lock slab\n");
- rc = cfs_mem_cache_destroy(ldlm_interval_slab);
- LASSERTF(rc == 0, "couldn't free interval node slab\n");
+ kmem_cache_destroy(ldlm_lock_slab);
+ kmem_cache_destroy(ldlm_interval_slab);
}
/*
* 50 ldlm locks for 1MB of RAM.
*/
-#define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
/*
* Maximal possible grant step plan in %.
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct cfs_shrinker *ldlm_pools_srv_shrinker;
-static struct cfs_shrinker *ldlm_pools_cli_shrinker;
+static struct shrinker *ldlm_pools_srv_shrinker;
+static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
/*
int ldlm_pools_init(void)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- rc = ldlm_pools_thread_start();
- if (rc == 0) {
- ldlm_pools_srv_shrinker =
- cfs_set_shrinker(CFS_DEFAULT_SEEKS,
- ldlm_pools_srv_shrink);
- ldlm_pools_cli_shrinker =
- cfs_set_shrinker(CFS_DEFAULT_SEEKS,
- ldlm_pools_cli_shrink);
- }
- RETURN(rc);
+ rc = ldlm_pools_thread_start();
+ if (rc == 0) {
+ ldlm_pools_srv_shrinker =
+ set_shrinker(DEFAULT_SEEKS,
+ ldlm_pools_srv_shrink);
+ ldlm_pools_cli_shrinker =
+ set_shrinker(DEFAULT_SEEKS,
+ ldlm_pools_cli_shrink);
+ }
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_srv_shrinker != NULL) {
- cfs_remove_shrinker(ldlm_pools_srv_shrinker);
- ldlm_pools_srv_shrinker = NULL;
- }
- if (ldlm_pools_cli_shrinker != NULL) {
- cfs_remove_shrinker(ldlm_pools_cli_shrinker);
- ldlm_pools_cli_shrinker = NULL;
- }
- ldlm_pools_thread_stop();
+ if (ldlm_pools_srv_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_srv_shrinker);
+ ldlm_pools_srv_shrinker = NULL;
+ }
+ if (ldlm_pools_cli_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_cli_shrinker);
+ ldlm_pools_cli_shrinker = NULL;
+ }
+ ldlm_pools_thread_stop();
}
EXPORT_SYMBOL(ldlm_pools_fini);
#endif /* __KERNEL__ */
{
int avail;
- avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else
#include <obd_class.h>
#include "ldlm_internal.h"
-cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
+struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
int ldlm_srv_namespace_nr = 0;
int ldlm_cli_namespace_nr = 0;
int lru_resize;
dummy[MAX_STRING_SIZE] = '\0';
- if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
return -EFAULT;
if (strncmp(dummy, "clear", 5) == 0) {
struct ldlm_resource *res;
int idx;
- OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO);
if (res == NULL)
return NULL;
ldata->ld_buf =
lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf,
- CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE);
if (ldata->ld_buf->lb_buf == NULL)
return -ENOMEM;
return rc;
}
-static cfs_page_t *llu_dir_read_page(struct inode *ino, __u64 hash,
+static struct page *llu_dir_read_page(struct inode *ino, __u64 hash,
int exact, struct ll_dir_chain *chain)
{
- cfs_page_t *page;
+ struct page *page;
int rc;
ENTRY;
struct intnl_stat *st = llu_i2stat(dir);
loff_t pos = *basep;
struct ll_dir_chain chain;
- cfs_page_t *page;
+ struct page *page;
int filled = 0;
int rc;
int done;
static void slp_type_fini (struct lu_device_type *t);
static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
static int slp_attr_get (const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
*/
static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct ccc_page *cpg = cl_object_page_slice(obj, page);
static void slp_page_fini_common(struct ccc_page *cp)
{
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
LASSERT(vmpage != NULL);
llu_free_user_page(vmpage);
unsigned long index, offset, bytes;
offset = (pos & ~CFS_PAGE_MASK);
- index = pos >> CFS_PAGE_SHIFT;
- bytes = CFS_PAGE_SIZE - offset;
+ index = pos >> PAGE_CACHE_SHIFT;
+ bytes = PAGE_CACHE_SIZE - offset;
if (bytes > count)
bytes = count;
static int max_io_pages(ssize_t len, int iovlen)
{
- return (((len + CFS_PAGE_SIZE -1) / CFS_PAGE_SIZE) + 2 + iovlen - 1);
+ return ((len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE) +
+ 2 + iovlen - 1;
}
void put_io_group(struct llu_io_group *group)
/* This should not be "optimized" use ~0ULL because page->index is a long and
* 32-bit systems are therefore limited to 16TB in a mapping */
-#define MAX_LFS_FILESIZE ((__u64)(~0UL) << CFS_PAGE_SHIFT)
+#define MAX_LFS_FILESIZE ((__u64)(~0UL) << PAGE_CACHE_SHIFT)
struct ll_file_data {
struct obd_client_handle fd_mds_och;
__u32 fd_flags;
#define LLU_IO_GROUP_SIZE(x) \
(sizeof(struct llu_io_group) + \
(sizeof(struct ll_async_page) + \
- sizeof(cfs_page_t) + \
+ sizeof(struct page) + \
llap_cookie_size) * (x))
struct llu_io_session {
LASSERT(sizeof(lum) == sizeof(*lump));
LASSERT(sizeof(lum.lmm_objects[0]) ==
sizeof(lump->lmm_objects[0]));
- if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
+ if (copy_from_user(&lum, lump, sizeof(lum)))
return(-EFAULT);
switch (lum.lmm_magic) {
LASSERT(sizeof(lum) == sizeof(*lump));
LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0]));
- if (cfs_copy_from_user(&lum, lump, sizeof(lum)))
+ if (copy_from_user(&lum, lump, sizeof(lum)))
RETURN(-EFAULT);
rc = llu_lov_setstripe_ea_info(ino, flags, &lum, sizeof(lum));
/* create sample data */
for (i = 0, buf = buf_alloc; i < _npages; i++) {
- for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) {
+ for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) {
*buf = rand();
}
}
/* compute checksum */
for (i = 0, buf = buf_alloc; i < _npages; i++) {
- for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) {
+ for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) {
check_sum[i] += *buf;
}
}
}
gettimeofday(&tw1, NULL);
for (i = 0, buf = buf_alloc; i < _npages;
- i += xfer, buf += xfer * CFS_PAGE_SIZE / sizeof(int)) {
- rc = write(fd, buf, CFS_PAGE_SIZE * xfer);
- if (rc != CFS_PAGE_SIZE * xfer) {
+ i += xfer, buf += xfer * PAGE_CACHE_SIZE / sizeof(int)) {
+ rc = write(fd, buf, PAGE_CACHE_SIZE * xfer);
+ if (rc != PAGE_CACHE_SIZE * xfer) {
printf("write error (i %d, rc %d): %s\n", i, rc,
strerror(errno));
return(1);
}
gettimeofday(&tr1, NULL);
for (i = 0, buf = buf_alloc; i < _npages;
- i += xfer, buf += xfer * CFS_PAGE_SIZE / sizeof(int)) {
- rc = read(fd, buf, CFS_PAGE_SIZE * xfer);
- if (rc != CFS_PAGE_SIZE * xfer) {
+ i += xfer, buf += xfer * PAGE_CACHE_SIZE / sizeof(int)) {
+ rc = read(fd, buf, PAGE_CACHE_SIZE * xfer);
+ if (rc != PAGE_CACHE_SIZE * xfer) {
printf("read error (i %d, rc %d): %s\n", i, rc,
strerror(errno));
return(1);
/* compute checksum */
for (i = 0, buf = buf_alloc; i < _npages; i++) {
int sum = 0;
- for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) {
+ for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) {
sum += *buf;
}
if (sum != check_sum[i]) {
tw = (tw2.tv_sec - tw1.tv_sec) * 1000000 + (tw2.tv_usec - tw1.tv_usec);
tr = (tr2.tv_sec - tr1.tv_sec) * 1000000 + (tr2.tv_usec - tr1.tv_usec);
printf(" (R:%.3fM/s, W:%.3fM/s)\n",
- (_npages * CFS_PAGE_SIZE) / (tw / 1000000.0) / (1024 * 1024),
- (_npages * CFS_PAGE_SIZE) / (tr / 1000000.0) / (1024 * 1024));
+ (_npages * PAGE_CACHE_SIZE) / (tw / 1000000.0) / (1024 * 1024),
+ (_npages * PAGE_CACHE_SIZE) / (tr / 1000000.0) / (1024 * 1024));
if (data_error)
return 1;
__liblustre_setup_();
- buf_size = _npages * CFS_PAGE_SIZE;
+ buf_size = _npages * PAGE_CACHE_SIZE;
if (opt_verbose)
printf("allocating %d bytes buffer\n", buf_size);
buf_alloc = calloc(1, buf_size);
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in CFS_PAGE_SIZE (if CFS_PAGE_SIZE greater than LU_PAGE_SIZE), and the
+ * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
* lu_dirpage for this integrated page will be adjusted. See
* lmv_adjust_dirpages().
*
struct pagevec lru_pvec;
#endif
struct lu_dirpage *dp;
- int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> CFS_PAGE_SHIFT;
+ int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
int nrdpgs = 0; /* number of pages read actually */
int npages;
int i;
if (body->valid & OBD_MD_FLSIZE)
cl_isize_write(inode, body->size);
- nrdpgs = (request->rq_bulk->bd_nob_transferred+CFS_PAGE_SIZE-1)
- >> CFS_PAGE_SHIFT;
+ nrdpgs = (request->rq_bulk->bd_nob_transferred +
+ PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
SetPageUptodate(page0);
}
unlock_page(page0);
SetPageUptodate(page);
- dp = cfs_kmap(page);
- hash = le64_to_cpu(dp->ldp_hash_start);
- cfs_kunmap(page);
+ dp = kmap(page);
+ hash = le64_to_cpu(dp->ldp_hash_start);
+ kunmap(page);
offset = hash_x_index(hash, hash64);
*/
wait_on_page(page);
if (PageUptodate(page)) {
- dp = cfs_kmap(page);
+ dp = kmap(page);
if (BITS_PER_LONG == 32 && hash64) {
*start = le64_to_cpu(dp->ldp_hash_start) >> 32;
*end = le64_to_cpu(dp->ldp_hash_end) >> 32;
OBD_ALLOC(ptr, len);
if (ptr == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(ptr, data, len)) {
+ if (copy_from_user(ptr, data, len)) {
OBD_FREE(ptr, len);
return -EFAULT;
}
LASSERT(sizeof(lumv3.lmm_objects[0]) ==
sizeof(lumv3p->lmm_objects[0]));
/* first try with v1 which is smaller than v3 */
- if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
+ if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
RETURN(-EFAULT);
if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) {
- if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
+ if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
RETURN(-EFAULT);
}
lmdp = (struct lov_user_mds_data *)arg;
lump = &lmdp->lmd_lmm;
}
- if (cfs_copy_to_user(lump, lmm, lmmsize)) {
- if (cfs_copy_to_user(lump, lmm, sizeof(*lump)))
+ if (copy_to_user(lump, lmm, lmmsize)) {
+ if (copy_to_user(lump, lmm, sizeof(*lump)))
GOTO(out_req, rc = -EFAULT);
rc = -EOVERFLOW;
}
st.st_gid = body->gid;
st.st_rdev = body->rdev;
st.st_size = body->size;
- st.st_blksize = CFS_PAGE_SIZE;
+ st.st_blksize = PAGE_CACHE_SIZE;
st.st_blocks = body->blocks;
st.st_atime = body->atime;
st.st_mtime = body->mtime;
st.st_ino = inode->i_ino;
lmdp = (struct lov_user_mds_data *)arg;
- if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
+ if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
GOTO(out_req, rc = -EFAULT);
}
if (lmm == NULL)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(lmm, lum, lmmsize))
+ if (copy_from_user(lmm, lum, lmmsize))
GOTO(free_lmm, rc = -EFAULT);
switch (lmm->lmm_magic) {
if (rc)
GOTO(free_lsm, rc);
- if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
+ if (copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
GOTO(free_lsm, rc = -EFAULT);
EXIT;
NULL);
if (rc) {
CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (cfs_copy_to_user((void *)arg, check,
+ if (copy_to_user((void *)arg, check,
sizeof(*check)))
- CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
+ CDEBUG(D_QUOTA, "copy_to_user failed\n");
GOTO(out_poll, rc);
}
NULL);
if (rc) {
CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (cfs_copy_to_user((void *)arg, check,
+ if (copy_to_user((void *)arg, check,
sizeof(*check)))
- CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n");
+ CDEBUG(D_QUOTA, "copy_to_user failed\n");
GOTO(out_poll, rc);
}
out_poll:
if (!qctl_20)
GOTO(out_quotactl_18, rc = -ENOMEM);
- if (cfs_copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18)))
+ if (copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18)))
GOTO(out_quotactl_20, rc = -ENOMEM);
QCTL_COPY(qctl_20, qctl_18);
QCTL_COPY(qctl_18, qctl_20);
qctl_18->obd_uuid = qctl_20->obd_uuid;
- if (cfs_copy_to_user((void *)arg, qctl_18,
+ if (copy_to_user((void *)arg, qctl_18,
sizeof(*qctl_18)))
rc = -EFAULT;
}
if (!qctl)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
+ if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
GOTO(out_quotactl, rc = -EFAULT);
rc = quotactl_ioctl(sbi, qctl);
- if (rc == 0 && cfs_copy_to_user((void *)arg,qctl,sizeof(*qctl)))
+ if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
rc = -EFAULT;
out_quotactl:
int count, vallen;
struct obd_export *exp;
- if (cfs_copy_from_user(&count, (int *)arg, sizeof(int)))
+ if (copy_from_user(&count, (int *)arg, sizeof(int)))
RETURN(-EFAULT);
/* get ost count when count is zero, get mdt count otherwise */
RETURN(rc);
}
- if (cfs_copy_to_user((int *)arg, &count, sizeof(int)))
+ if (copy_to_user((int *)arg, &count, sizeof(int)))
RETURN(-EFAULT);
RETURN(0);
}
case LL_IOC_PATH2FID:
- if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode),
+ if (copy_to_user((void *)arg, ll_inode2fid(inode),
sizeof(struct lu_fid)))
RETURN(-EFAULT);
RETURN(0);
RETURN(-ENOMEM);
/* We don't know the true size yet; copy the fixed-size part */
- if (cfs_copy_from_user(hur, (void *)arg, sizeof(*hur))) {
+ if (copy_from_user(hur, (void *)arg, sizeof(*hur))) {
OBD_FREE_PTR(hur);
RETURN(-EFAULT);
}
RETURN(-ENOMEM);
/* Copy the whole struct */
- if (cfs_copy_from_user(hur, (void *)arg, totalsize)) {
+ if (copy_from_user(hur, (void *)arg, totalsize)) {
OBD_FREE_LARGE(hur, totalsize);
RETURN(-EFAULT);
}
struct hsm_progress_kernel hpk;
struct hsm_progress hp;
- if (cfs_copy_from_user(&hp, (void *)arg, sizeof(hp)))
+ if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
RETURN(-EFAULT);
hpk.hpk_fid = hp.hp_fid;
OBD_ALLOC_PTR(copy);
if (copy == NULL)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(copy, (char *)arg, sizeof(*copy))) {
+ if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
OBD_FREE_PTR(copy);
RETURN(-EFAULT);
}
rc = ll_ioc_copy_start(inode->i_sb, copy);
- if (cfs_copy_to_user((char *)arg, copy, sizeof(*copy)))
+ if (copy_to_user((char *)arg, copy, sizeof(*copy)))
rc = -EFAULT;
OBD_FREE_PTR(copy);
OBD_ALLOC_PTR(copy);
if (copy == NULL)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(copy, (char *)arg, sizeof(*copy))) {
+ if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
OBD_FREE_PTR(copy);
RETURN(-EFAULT);
}
rc = ll_ioc_copy_end(inode->i_sb, copy);
- if (cfs_copy_to_user((char *)arg, copy, sizeof(*copy)))
+ if (copy_to_user((char *)arg, copy, sizeof(*copy)))
rc = -EFAULT;
OBD_FREE_PTR(copy);
{
struct ll_file_data *fd;
- OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, __GFP_IO);
if (fd == NULL)
return NULL;
struct file *file2;
struct lustre_swap_layouts lsl;
- if (cfs_copy_from_user(&lsl, (char *)arg,
+ if (copy_from_user(&lsl, (char *)arg,
sizeof(struct lustre_swap_layouts)))
RETURN(-EFAULT);
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
op_data, NULL);
- if (cfs_copy_to_user((char *)arg, hca, sizeof(*hca)))
+ if (copy_to_user((char *)arg, hca, sizeof(*hca)))
rc = -EFAULT;
ll_finish_md_op_data(op_data);
/* default to about 40meg of readahead on a given system. That much tied
* up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - CFS_PAGE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - CFS_PAGE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
unsigned long ras_consecutive_stride_requests;
};
-extern cfs_mem_cache_t *ll_file_data_slab;
+extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
#define LLAP_MAGIC 98764321
-extern cfs_mem_cache_t *ll_async_page_slab;
+extern struct kmem_cache *ll_async_page_slab;
extern size_t ll_async_page_slab_size;
void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
#else
int ll_show_options(struct seq_file *seq, struct vfsmount *vfs);
#endif
-void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret);
+void ll_dirty_page_discard_warn(struct page *page, int ioret);
int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct super_block *, struct lookup_intent *);
void lustre_dump_dentry(struct dentry *, int recur);
/**
* locked page returned from vvp_io
*/
- cfs_page_t *ft_vmpage;
+ struct page *ft_vmpage;
#ifndef HAVE_VM_OP_FAULT
struct vm_nopage_api {
/**
if (mapping == NULL)
return;
- ll_teardown_mmaps(mapping, offset, offset + CFS_PAGE_SIZE);
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
truncate_complete_page(mapping, vmpage);
}
int ll_removexattr(struct dentry *dentry, const char *name);
/* llite/remote_perm.c */
-extern cfs_mem_cache_t *ll_remote_perm_cachep;
-extern cfs_mem_cache_t *ll_rmtperm_hash_cachep;
+extern struct kmem_cache *ll_remote_perm_cachep;
+extern struct kmem_cache *ll_rmtperm_hash_cachep;
cfs_hlist_head_t *alloc_rmtperm_hash(void);
void free_rmtperm_hash(cfs_hlist_head_t *hash);
#include <obd_cksum.h>
#include "llite_internal.h"
-cfs_mem_cache_t *ll_file_data_slab;
+struct kmem_cache *ll_file_data_slab;
CFS_LIST_HEAD(ll_super_blocks);
DEFINE_SPINLOCK(ll_sb_lock);
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - CFS_PAGE_SHIFT) < 512) {
+ if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
lru_page_max = pages / 2;
} else {
lru_page_max = (pages / 4) * 3;
valid != CLIENT_CONNECT_MDT_REQD) {
char *buf;
- OBD_ALLOC_WAIT(buf, CFS_PAGE_SIZE);
- obd_connect_flags2str(buf, CFS_PAGE_SIZE,
+ OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
+ obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
"feature(s) needed for correct operation "
"of this client (%s). Please upgrade "
"server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
- OBD_FREE(buf, CFS_PAGE_SIZE);
+ OBD_FREE(buf, PAGE_CACHE_SIZE);
GOTO(out_md_fid, err = -EPROTO);
}
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
sbi->ll_md_brw_size = data->ocd_brw_size;
else
- sbi->ll_md_brw_size = CFS_PAGE_SIZE;
+ sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
LCONSOLE_INFO("Layout lock feature supported.\n");
if (!obd)
RETURN(-ENOENT);
- if (cfs_copy_to_user((void *)arg, obd->obd_name,
- strlen(obd->obd_name) + 1))
- RETURN(-EFAULT);
+ if (copy_to_user((void *)arg, obd->obd_name,
+ strlen(obd->obd_name) + 1))
+ RETURN(-EFAULT);
- RETURN(0);
+ RETURN(0);
}
/**
return path;
}
-void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret)
+void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
struct dentry *dentry = NULL;
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << CFS_PAGE_SHIFT);
+ (vma->vm_pgoff << PAGE_CACHE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
/* return the user space pointer that maps to a file offset via a vma */
static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
{
- return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+ return vma->vm_start +
+ (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
}
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
last - first + 1, 0);
}
offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, i) {
BUG_ON(bvec->bv_offset != 0);
- BUG_ON(bvec->bv_len != CFS_PAGE_SIZE);
+ BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
pages[page_count] = bvec->bv_page;
offsets[page_count] = offset;
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->lo_blocksize = CFS_PAGE_SIZE;
+ lo->lo_blocksize = PAGE_CACHE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
lo->lo_queue->unplug_fn = loop_unplug;
#endif
- /* queue parameters */
- CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)CFS_PAGE_SIZE);
- blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9));
- blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
+ /* queue parameters */
+ CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ blk_queue_logical_block_size(lo->lo_queue,
+ (unsigned short)PAGE_CACHE_SIZE);
+ blk_queue_max_hw_sectors(lo->lo_queue,
+ LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
bd_set_size(bdev, size << 9);
}
static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- int mult, rc, pages_number;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int mult, rc, pages_number;
- mult = 1 << (20 - CFS_PAGE_SHIFT);
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
- if (rc)
- return rc;
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
- if (pages_number < 0 || pages_number > cfs_num_physpages / 2) {
- CERROR("can't set file readahead more than %lu MB\n",
- cfs_num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/
- return -ERANGE;
- }
+ if (pages_number < 0 || pages_number > num_physpages / 2) {
+ /* 1/2 of RAM */
+ CERROR("can't set file readahead more than %lu MB\n",
+ num_physpages >> (20 - PAGE_CACHE_SHIFT + 1));
+ return -ERANGE;
+ }
spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_max_pages = pages_number;
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - CFS_PAGE_SHIFT);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
struct ll_sb_info *sbi = ll_s2sbi(sb);
int mult, rc, pages_number;
- mult = 1 << (20 - CFS_PAGE_SHIFT);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
if (rc)
return rc;
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - CFS_PAGE_SHIFT);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- int mult, rc, pages_number;
-
- mult = 1 << (20 - CFS_PAGE_SHIFT);
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
- if (rc)
- return rc;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int mult, rc, pages_number;
- /* Cap this at the current max readahead window size, the readahead
- * algorithm does this anyway so it's pointless to set it larger. */
- if (pages_number < 0 ||
- pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
- CERROR("can't set max_read_ahead_whole_mb more than "
- "max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - CFS_PAGE_SHIFT));
- return -ERANGE;
- }
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ /* Cap this at the current max readahead window size, the readahead
+ * algorithm does this anyway so it's pointless to set it larger. */
+ if (pages_number < 0 ||
+ pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
+ CERROR("can't set max_read_ahead_whole_mb more than "
+ "max_read_ahead_per_file_mb: %lu\n",
+ sbi->ll_ra_info.ra_max_pages_per_file >>
+ (20 - PAGE_CACHE_SHIFT));
+ return -ERANGE;
+ }
spin_lock(&sbi->ll_lock);
sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
struct super_block *sb = data;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
- int shift = 20 - CFS_PAGE_SHIFT;
+ int shift = 20 - PAGE_CACHE_SHIFT;
int max_cached_mb;
int unused_mb;
int nrpages = 0;
ENTRY;
- mult = 1 << (20 - CFS_PAGE_SHIFT);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count);
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
if (rc)
RETURN(rc);
- if (pages_number < 0 || pages_number > cfs_num_physpages) {
+ if (pages_number < 0 || pages_number > num_physpages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
+ num_physpages >> (20 - PAGE_CACHE_SHIFT));
RETURN(-ERANGE);
}
#include <lustre_param.h>
#include "llite_internal.h"
-cfs_mem_cache_t *ll_remote_perm_cachep = NULL;
-cfs_mem_cache_t *ll_rmtperm_hash_cachep = NULL;
+struct kmem_cache *ll_remote_perm_cachep;
+struct kmem_cache *ll_rmtperm_hash_cachep;
static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
{
struct ll_remote_perm *lrp;
- OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, CFS_ALLOC_KERNEL);
+ OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
if (lrp)
CFS_INIT_HLIST_NODE(&lrp->lrp_list);
return lrp;
OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
REMOTE_PERM_HASHSIZE * sizeof(*hash),
- CFS_ALLOC_STD);
+ GFP_IOFS);
if (!hash)
return NULL;
*/
io->ci_lockreq = CILR_NEVER;
- pos = (vmpage->index << CFS_PAGE_SHIFT);
+ pos = (vmpage->index << PAGE_CACHE_SHIFT);
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, CFS_PAGE_SIZE);
+ /* Create a temp IO to serve write. */
+ result = cl_io_rw_init(env, io, CIT_WRITE,
+ pos, PAGE_CACHE_SIZE);
if (result == 0) {
cio->cui_fd = LUSTRE_FPRIVATE(file);
cio->cui_iov = NULL;
* sense to tune the i_blkbits value for the file based on the OSTs it is
* striped over, rather than having a constant value for all files here. */
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - CFS_PAGE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
* Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
* up quickly which will affect read performance siginificantly. See LU-2816 */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> CFS_PAGE_SHIFT)
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
end = rpc_boundary;
/* Truncate RA window to end of file */
- end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT));
+ end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
- if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT))
+ if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
ll_ra_stats_inc(mapping, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
if (ras->ras_requests == 2 && !ras->ras_request_index) {
__u64 kms_pages;
- kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >>
- CFS_PAGE_SHIFT;
+ kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
* breaking kernel which assumes ->writepage should mark
* PageWriteback or clean the page. */
result = cl_sync_file_range(inode, offset,
- offset + CFS_PAGE_SIZE - 1,
+ offset + PAGE_CACHE_SIZE - 1,
CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
ENTRY;
if (wbc->range_cyclic) {
- start = mapping->writeback_index << CFS_PAGE_SHIFT;
+ start = mapping->writeback_index << PAGE_CACHE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
end = i_size_read(inode);
- mapping->writeback_index = (end >> CFS_PAGE_SHIFT) + 1;
+ mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
}
RETURN(result);
}
return -EFBIG;
}
- *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- *max_pages -= user_addr >> CFS_PAGE_SHIFT;
+ *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
if (*pages) {
/* check the page type: if the page is a host page, then do
* write directly */
if (clp->cp_type == CPT_CACHEABLE) {
- cfs_page_t *vmpage = cl_page_vmpage(env, clp);
- cfs_page_t *src_page;
- cfs_page_t *dst_page;
+ struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *src_page;
+ struct page *dst_page;
void *src;
void *dst;
* representing PAGE_SIZE worth of user data, into a single buffer, and
* then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
-#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \
+#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t file_offset,
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
"offset=%lld=%llx, pages %lu (max %lu)\n",
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> CFS_PAGE_SHIFT,
- MAX_DIO_SIZE >> CFS_PAGE_SHIFT);
+ file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
/* Check that all user buffers are aligned as well */
for (seg = 0; seg < nr_segs; seg++) {
&pages, &max_pages);
if (likely(page_count > 0)) {
if (unlikely(page_count < max_pages))
- bytes = page_count << CFS_PAGE_SHIFT;
+ bytes = page_count << PAGE_CACHE_SHIFT;
result = ll_direct_IO_26_seg(env, io, rw, inode,
file->f_mapping,
bytes, file_offset,
* We should always be able to kmalloc for a
* page worth of page pointers = 4MB on i386. */
if (result == -ENOMEM &&
- size > (CFS_PAGE_SIZE / sizeof(*pages)) *
- CFS_PAGE_SIZE) {
+ size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
+ PAGE_CACHE_SIZE) {
size = ((((size / 2) - 1) |
~CFS_PAGE_MASK) + 1) &
CFS_PAGE_MASK;
#include <lprocfs_status.h>
#include "llite_internal.h"
-static cfs_mem_cache_t *ll_inode_cachep;
+static struct kmem_cache *ll_inode_cachep;
static struct inode *ll_alloc_inode(struct super_block *sb)
{
- struct ll_inode_info *lli;
- ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
- OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, CFS_ALLOC_IO);
- if (lli == NULL)
- return NULL;
-
- inode_init_once(&lli->lli_vfs_inode);
- return &lli->lli_vfs_inode;
+ struct ll_inode_info *lli;
+ ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
+ OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, __GFP_IO);
+ if (lli == NULL)
+ return NULL;
+
+ inode_init_once(&lli->lli_vfs_inode);
+ return &lli->lli_vfs_inode;
}
static void ll_destroy_inode(struct inode *inode)
int ll_init_inodecache(void)
{
- ll_inode_cachep = cfs_mem_cache_create("lustre_inode_cache",
- sizeof(struct ll_inode_info),
- 0, CFS_SLAB_HWCACHE_ALIGN);
- if (ll_inode_cachep == NULL)
- return -ENOMEM;
- return 0;
+ ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
+ sizeof(struct ll_inode_info),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (ll_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
}
void ll_destroy_inodecache(void)
{
- int rc;
-
- rc = cfs_mem_cache_destroy(ll_inode_cachep);
- LASSERTF(rc == 0, "ll_inode_cache: not all structures were freed\n");
+ kmem_cache_destroy(ll_inode_cachep);
}
/* exported operations */
rc = ll_init_inodecache();
if (rc)
return -ENOMEM;
- ll_file_data_slab = cfs_mem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- CFS_SLAB_HWCACHE_ALIGN);
- if (ll_file_data_slab == NULL) {
- ll_destroy_inodecache();
- return -ENOMEM;
- }
-
- ll_remote_perm_cachep = cfs_mem_cache_create("ll_remote_perm_cache",
- sizeof(struct ll_remote_perm),
- 0, 0);
- if (ll_remote_perm_cachep == NULL) {
- cfs_mem_cache_destroy(ll_file_data_slab);
- ll_file_data_slab = NULL;
- ll_destroy_inodecache();
- return -ENOMEM;
- }
-
- ll_rmtperm_hash_cachep = cfs_mem_cache_create("ll_rmtperm_hash_cache",
- REMOTE_PERM_HASHSIZE *
- sizeof(cfs_list_t),
- 0, 0);
- if (ll_rmtperm_hash_cachep == NULL) {
- cfs_mem_cache_destroy(ll_remote_perm_cachep);
- ll_remote_perm_cachep = NULL;
- cfs_mem_cache_destroy(ll_file_data_slab);
- ll_file_data_slab = NULL;
- ll_destroy_inodecache();
- return -ENOMEM;
- }
+ ll_file_data_slab = kmem_cache_create("ll_file_data",
+ sizeof(struct ll_file_data), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (ll_file_data_slab == NULL) {
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
+
+ ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
+ sizeof(struct ll_remote_perm),
+ 0, 0, NULL);
+ if (ll_remote_perm_cachep == NULL) {
+ kmem_cache_destroy(ll_file_data_slab);
+ ll_file_data_slab = NULL;
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
+
+ ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
+ REMOTE_PERM_HASHSIZE *
+ sizeof(cfs_list_t),
+ 0, 0, NULL);
+ if (ll_rmtperm_hash_cachep == NULL) {
+ kmem_cache_destroy(ll_remote_perm_cachep);
+ ll_remote_perm_cachep = NULL;
+ kmem_cache_destroy(ll_file_data_slab);
+ ll_file_data_slab = NULL;
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
proc_lustre_fs_root = proc_lustre_root ?
lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL;
static void __exit exit_lustre_lite(void)
{
- int rc;
-
vvp_global_fini();
del_timer(&ll_capa_timer);
ll_capa_thread_stop();
ll_destroy_inodecache();
- rc = cfs_mem_cache_destroy(ll_rmtperm_hash_cachep);
- LASSERTF(rc == 0, "couldn't destroy ll_rmtperm_hash_cachep\n");
- ll_rmtperm_hash_cachep = NULL;
+ kmem_cache_destroy(ll_rmtperm_hash_cachep);
+ ll_rmtperm_hash_cachep = NULL;
- rc = cfs_mem_cache_destroy(ll_remote_perm_cachep);
- LASSERTF(rc == 0, "couldn't destroy ll_remote_perm_cachep\n");
- ll_remote_perm_cachep = NULL;
+ kmem_cache_destroy(ll_remote_perm_cachep);
+ ll_remote_perm_cachep = NULL;
- rc = cfs_mem_cache_destroy(ll_file_data_slab);
- LASSERTF(rc == 0, "couldn't destroy ll_file_data slab\n");
- if (proc_lustre_fs_root)
- lprocfs_remove(&proc_lustre_fs_root);
+ kmem_cache_destroy(ll_file_data_slab);
+ if (proc_lustre_fs_root)
+ lprocfs_remove(&proc_lustre_fs_root);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
* "llite_" (var. "ll_") prefix.
*/
-cfs_mem_cache_t *vvp_thread_kmem;
-static cfs_mem_cache_t *vvp_session_kmem;
+struct kmem_cache *vvp_thread_kmem;
+static struct kmem_cache *vvp_session_kmem;
static struct lu_kmem_descr vvp_caches[] = {
{
.ckd_cache = &vvp_thread_kmem,
};
static void *vvp_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct vvp_thread_info *info;
+ struct vvp_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, CFS_ALLOC_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void vvp_key_fini(const struct lu_context *ctx,
}
static void *vvp_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct vvp_session *session;
+ struct vvp_session *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, CFS_ALLOC_IO);
- if (session == NULL)
- session = ERR_PTR(-ENOMEM);
- return session;
+ OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, __GFP_IO);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
}
static void vvp_session_key_fini(const struct lu_context *ctx,
static void vvp_pgcache_page_show(const struct lu_env *env,
struct seq_file *seq, struct cl_page *page)
{
- struct ccc_page *cpg;
- cfs_page_t *vmpage;
- int has_flags;
+ struct ccc_page *cpg;
+ struct page *vmpage;
+ int has_flags;
cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
vmpage = cpg->cpg_page;
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
int vvp_page_init (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
struct ccc_object *cl_inode2ccc(struct inode *inode);
-extern cfs_mem_cache_t *vvp_thread_kmem;
+extern struct kmem_cache *vvp_thread_kmem;
#endif /* VVP_INTERNAL_H */
if (!vio->cui_ra_window_set) {
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
- /*
- * XXX: explicit CFS_PAGE_SIZE
- */
- bead->lrr_count = cl_index(obj, tot + CFS_PAGE_SIZE - 1);
+ bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
ll_ra_read_in(file, bead);
}
#ifndef HAVE_VM_OP_FAULT
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- cfs_page_t *vmpage;
+ struct page *vmpage;
vmpage = filemap_nopage(cfio->ft_vma, cfio->nopage.ft_address,
cfio->nopage.ft_type);
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
int result = 0;
- cfs_page_t *vmpage = NULL;
+ struct page *vmpage = NULL;
struct cl_page *page;
loff_t size;
pgoff_t last; /* last page in a file data region */
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
int rc;
struct cl_object *obj = slice->cpl_obj;
struct ccc_page *cp = cl2ccc_page(slice);
struct cl_page *pg = slice->cpl_page;
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
int result;
* We're completely overwriting an existing page, so _don't_
* set it up to date until commit_write
*/
- if (from == 0 && to == CFS_PAGE_SIZE) {
+ if (from == 0 && to == PAGE_CACHE_SIZE) {
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
POISON_PAGE(page, 0x11);
} else
struct inode *inode = ccc_object_inode(obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
int result;
int tallyop;
set_page_dirty(vmpage);
vvp_write_pending(cl2ccc(obj), cp);
} else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
+ pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
bool need_clip = true;
/*
* being.
*/
if (last_index > pg->cp_index) {
- to = CFS_PAGE_SIZE;
+ to = PAGE_CACHE_SIZE;
need_clip = false;
} else if (last_index == pg->cp_index) {
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
static void vvp_page_fini_common(struct ccc_page *cp)
{
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
- LASSERT(vmpage != NULL);
- page_cache_release(vmpage);
+ LASSERT(vmpage != NULL);
+ page_cache_release(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
+ struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- cfs_page_t *vmpage = cp->cpg_page;
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct page *vmpage = cp->cpg_page;
- /*
- * vmpage->private was already cleared when page was moved into
- * VPG_FREEING state.
- */
- LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
+ /*
+ * vmpage->private was already cleared when page was moved into
+ * VPG_FREEING state.
+ */
+ LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
+ vvp_page_fini_common(cp);
}
static int vvp_page_own(const struct lu_env *env,
const struct cl_page_slice *slice, struct cl_io *io,
int nonblock)
{
- struct ccc_page *vpg = cl2ccc_page(slice);
- cfs_page_t *vmpage = vpg->cpg_page;
+ struct ccc_page *vpg = cl2ccc_page(slice);
+ struct page *vmpage = vpg->cpg_page;
LASSERT(vmpage != NULL);
if (nonblock) {
}
static void vvp_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- wait_on_page_writeback(vmpage);
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ wait_on_page_writeback(vmpage);
}
static void vvp_page_unassume(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
}
static void vvp_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
+ const struct cl_page_slice *slice, struct cl_io *io)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
- unlock_page(cl2vm_page(slice));
+ unlock_page(cl2vm_page(slice));
}
static void vvp_page_discard(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
struct address_space *mapping;
- struct ccc_page *cpg = cl2ccc_page(slice);
+ struct ccc_page *cpg = cl2ccc_page(slice);
LASSERT(vmpage != NULL);
LASSERT(PageLocked(vmpage));
}
static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
__u64 offset;
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
- offset = vmpage->index << CFS_PAGE_SHIFT;
+ offset = vmpage->index << PAGE_CACHE_SHIFT;
- /*
- * XXX is it safe to call this with the page lock held?
- */
- ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE);
- return 0;
+ /*
+ * XXX is it safe to call this with the page lock held?
+ */
+ ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+ return 0;
}
static void vvp_page_delete(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
- struct inode *inode = vmpage->mapping->host;
- struct cl_object *obj = slice->cpl_obj;
+ struct page *vmpage = cl2vm_page(slice);
+ struct inode *inode = vmpage->mapping->host;
+ struct cl_object *obj = slice->cpl_obj;
LASSERT(PageLocked(vmpage));
LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
}
static void vvp_page_export(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int uptodate)
+ const struct cl_page_slice *slice,
+ int uptodate)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
- LASSERT(PageLocked(vmpage));
- if (uptodate)
- SetPageUptodate(vmpage);
- else
- ClearPageUptodate(vmpage);
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ if (uptodate)
+ SetPageUptodate(vmpage);
+ else
+ ClearPageUptodate(vmpage);
}
static int vvp_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
LASSERT(PageLocked(vmpage));
LASSERT(!PageDirty(vmpage));
* This takes inode as a separate argument, because inode on which error is to
* be set can be different from \a vmpage inode in case of direct-io.
*/
-static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
+static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
{
struct ccc_object *obj = cl_inode2ccc(inode);
int ioret)
{
struct ccc_page *cp = cl2ccc_page(slice);
- cfs_page_t *vmpage = cp->cpg_page;
+ struct page *vmpage = cp->cpg_page;
struct cl_page *page = cl_page_top(slice->cpl_page);
struct inode *inode = ccc_object_inode(page->cp_obj);
ENTRY;
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- cfs_page_t *vmpage = cp->cpg_page;
- ENTRY;
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ struct page *vmpage = cp->cpg_page;
+ ENTRY;
LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
LASSERT(PageWriteback(vmpage));
static int vvp_page_make_ready(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- cfs_page_t *vmpage = cl2vm_page(slice);
+ struct page *vmpage = cl2vm_page(slice);
struct cl_page *pg = slice->cpl_page;
int result = 0;
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
{
- struct ccc_page *vp = cl2ccc_page(slice);
- cfs_page_t *vmpage = vp->cpg_page;
+ struct ccc_page *vp = cl2ccc_page(slice);
+ struct page *vmpage = vp->cpg_page;
(*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
"vm@%p ",
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct ccc_page *cpg = cl_object_page_slice(obj, page);
RETURN(-EINVAL);
/* copy UUID */
- if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
min((int) data->ioc_plen2,
(int) sizeof(struct obd_uuid))))
RETURN(-EFAULT);
0);
if (rc)
RETURN(rc);
- if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
RETURN(-EFAULT);
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (CFS_PAGE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if CFS_PAGE_SIZE > LU_PAGE_SIZE
+#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
for (i = 0; i < ncfspgs; i++) {
- struct lu_dirpage *dp = cfs_kmap(pages[i]);
+ struct lu_dirpage *dp = kmap(pages[i]);
struct lu_dirpage *first = dp;
struct lu_dirent *end_dirent = NULL;
struct lu_dirent *ent;
first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
- cfs_kunmap(pages[i]);
+ kunmap(pages[i]);
}
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* CFS_PAGE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
- int ncfspgs; /* pages read in CFS_PAGE_SIZE */
+ int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
ENTRY;
if (rc != 0)
RETURN(rc);
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
- >> CFS_PAGE_SHIFT;
+ ncfspgs = ((*request)->rq_bulk->bd_nob_transferred +
+ PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
placement_policy_t policy;
struct lmv_obd *lmv;
- if (cfs_copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
+ if (copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
return -EFAULT;
LASSERT(dev != NULL);
extern struct dt_object_operations lod_obj_ops;
/* Slab for OSD object allocation */
-cfs_mem_cache_t *lod_object_kmem;
+struct kmem_cache *lod_object_kmem;
static struct lu_kmem_descr lod_caches[] = {
{
int rc = 0;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, __GFP_IO);
if (lod_obj == NULL)
RETURN(ERR_PTR(-ENOMEM));
#include "lod_internal.h"
-extern cfs_mem_cache_t *lod_object_kmem;
+extern struct kmem_cache *lod_object_kmem;
static const struct dt_body_operations lod_body_lnk_ops;
static int lod_index_lookup(const struct lu_env *env, struct dt_object *dt,
extern struct lu_context_key lov_key;
extern struct lu_context_key lov_session_key;
-extern cfs_mem_cache_t *lov_lock_kmem;
-extern cfs_mem_cache_t *lov_object_kmem;
-extern cfs_mem_cache_t *lov_thread_kmem;
-extern cfs_mem_cache_t *lov_session_kmem;
-extern cfs_mem_cache_t *lov_req_kmem;
+extern struct kmem_cache *lov_lock_kmem;
+extern struct kmem_cache *lov_object_kmem;
+extern struct kmem_cache *lov_thread_kmem;
+extern struct kmem_cache *lov_session_kmem;
+extern struct kmem_cache *lov_req_kmem;
-extern cfs_mem_cache_t *lovsub_lock_kmem;
-extern cfs_mem_cache_t *lovsub_object_kmem;
-extern cfs_mem_cache_t *lovsub_req_kmem;
+extern struct kmem_cache *lovsub_lock_kmem;
+extern struct kmem_cache *lovsub_object_kmem;
+extern struct kmem_cache *lovsub_req_kmem;
-extern cfs_mem_cache_t *lov_lock_link_kmem;
+extern struct kmem_cache *lov_lock_link_kmem;
int lov_object_init (const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf);
int lov_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
int lov_page_init_empty (const struct lu_env *env,
struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
int lov_page_init_raid0 (const struct lu_env *env,
struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
struct lu_object *lov_object_alloc (const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
#include "lov_cl_internal.h"
-cfs_mem_cache_t *lov_lock_kmem;
-cfs_mem_cache_t *lov_object_kmem;
-cfs_mem_cache_t *lov_thread_kmem;
-cfs_mem_cache_t *lov_session_kmem;
-cfs_mem_cache_t *lov_req_kmem;
+struct kmem_cache *lov_lock_kmem;
+struct kmem_cache *lov_object_kmem;
+struct kmem_cache *lov_thread_kmem;
+struct kmem_cache *lov_session_kmem;
+struct kmem_cache *lov_req_kmem;
-cfs_mem_cache_t *lovsub_lock_kmem;
-cfs_mem_cache_t *lovsub_object_kmem;
-cfs_mem_cache_t *lovsub_req_kmem;
+struct kmem_cache *lovsub_lock_kmem;
+struct kmem_cache *lovsub_object_kmem;
+struct kmem_cache *lovsub_req_kmem;
-cfs_mem_cache_t *lov_lock_link_kmem;
+struct kmem_cache *lov_lock_link_kmem;
/** Lock class of lov_device::ld_mutex. */
struct lock_class_key cl_lov_device_mutex_class;
{
struct lov_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, __GFP_IO);
if (info != NULL)
CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list);
else
{
struct lov_session *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, __GFP_IO);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
int result;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO);
if (lr != NULL) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
return NULL;;
for (i = 0; i < stripe_count; i++) {
- OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO);
if (loi == NULL)
goto err;
lsm->lsm_oinfo[i] = loi;
spinlock_t set_lock;
};
-extern cfs_mem_cache_t *lov_oinfo_slab;
+extern struct kmem_cache *lov_oinfo_slab;
void lov_finish_set(struct lov_request_set *set);
LASSERT(idx < lck->lls_nr);
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
if (link != NULL) {
struct lov_sublock_env *subenv;
struct lov_lock_sub *lls;
int result;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
result = lov_lock_sub_init(env, lck, io);
int result = -ENOMEM;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;
RETURN(-EINVAL);
/* copy UUID */
- if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
+ min((int)data->ioc_plen2,
+ (int)sizeof(struct obd_uuid))))
RETURN(-EFAULT);
flags = uarg ? *(__u32*)uarg : 0;
flags);
if (rc)
RETURN(rc);
- if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
RETURN(-EFAULT);
*genp = lov->lov_tgts[i]->ltd_gen;
}
- if (cfs_copy_to_user((void *)uarg, buf, len))
+ if (copy_to_user((void *)uarg, buf, len))
rc = -EFAULT;
obd_ioctl_freedata(buf, len);
break;
.o_quotacheck = lov_quotacheck,
};
-cfs_mem_cache_t *lov_oinfo_slab;
+struct kmem_cache *lov_oinfo_slab;
extern struct lu_kmem_descr lov_caches[];
int __init lov_init(void)
{
struct lprocfs_static_vars lvars = { 0 };
- int rc, rc2;
+ int rc;
ENTRY;
/* print an address of _any_ initialized kernel symbol from this
if (rc)
return rc;
- lov_oinfo_slab = cfs_mem_cache_create("lov_oinfo",
- sizeof(struct lov_oinfo),
- 0, CFS_SLAB_HWCACHE_ALIGN);
+ lov_oinfo_slab = kmem_cache_create("lov_oinfo",
+ sizeof(struct lov_oinfo), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
if (lov_oinfo_slab == NULL) {
lu_kmem_fini(lov_caches);
return -ENOMEM;
LUSTRE_LOV_NAME, &lov_device_type);
if (rc) {
- rc2 = cfs_mem_cache_destroy(lov_oinfo_slab);
- LASSERT(rc2 == 0);
+ kmem_cache_destroy(lov_oinfo_slab);
lu_kmem_fini(lov_caches);
}
#ifdef __KERNEL__
static void /*__exit*/ lov_exit(void)
{
- int rc;
-
- class_unregister_type(LUSTRE_LOV_NAME);
- rc = cfs_mem_cache_destroy(lov_oinfo_slab);
- LASSERT(rc == 0);
-
+ class_unregister_type(LUSTRE_LOV_NAME);
+ kmem_cache_destroy(lov_oinfo_slab);
lu_kmem_fini(lov_caches);
}
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
}
int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
llo_page_init, env, obj, page, vmpage);
struct lu_object *obj;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO);
if (lov != NULL) {
obj = lov2lu(lov);
lu_object_init(obj, NULL, dev);
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- if (cfs_copy_from_user(&lum, lump, lum_size))
+ if (copy_from_user(&lum, lump, lum_size))
GOTO(out_set, rc = -EFAULT);
else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3))
(lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
lum.lmm_stripe_count = lsm->lsm_stripe_count;
- rc = cfs_copy_to_user(lump, &lum, lum_size);
+ rc = copy_to_user(lump, &lum, lum_size);
GOTO(out_set, rc = -EOVERFLOW);
}
rc = lov_packmd(exp, &lmmk, lsm);
lum.lmm_layout_gen = lmmk->lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
- if (cfs_copy_to_user(lump, lmmk, lmm_size))
+ if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
obd_free_diskmd(exp, &lmmk);
}
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
};
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
ENTRY;
cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
- addr = cfs_kmap(vmpage);
+ addr = kmap(vmpage);
memset(addr, 0, cl_page_size(obj));
- cfs_kunmap(vmpage);
+ kunmap(vmpage);
cl_page_export(env, page, 1);
RETURN(0);
}
struct lovsub_req *lsr;
int result;
- OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, __GFP_IO);
if (lsr != NULL) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
int result;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
if (lsk != NULL) {
CFS_INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
struct lu_object *obj;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO);
if (los != NULL) {
struct cl_object_header *hdr;
};
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *unused)
+ struct cl_page *page, struct page *unused)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
ENTRY;
#define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
#define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
-static cfs_mem_cache_t *fcb_cache;
+static struct kmem_cache *fcb_cache;
struct fsfilt_cb_data {
struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
int pages, unsigned long *blocks,
int create)
{
- int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
int rc = 0, i = 0;
struct page *fp = NULL;
int clen = 0;
int pages, unsigned long *blocks,
int create)
{
- int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
unsigned long *b;
int rc = 0, i;
static int __init fsfilt_ext3_init(void)
{
- int rc;
-
- fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb",
- sizeof(struct fsfilt_cb_data), 0, 0);
- if (!fcb_cache) {
- CERROR("error allocating fsfilt journal callback cache\n");
- GOTO(out, rc = -ENOMEM);
- }
+ int rc;
+
+ fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
+ sizeof(struct fsfilt_cb_data),
+ 0, 0, NULL);
+ if (!fcb_cache) {
+ CERROR("error allocating fsfilt journal callback cache\n");
+ GOTO(out, rc = -ENOMEM);
+ }
- rc = fsfilt_register_ops(&fsfilt_ext3_ops);
+ rc = fsfilt_register_ops(&fsfilt_ext3_ops);
- if (rc) {
- int err = cfs_mem_cache_destroy(fcb_cache);
- LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
- }
+ if (rc)
+ kmem_cache_destroy(fcb_cache);
out:
- return rc;
+ return rc;
}
static void __exit fsfilt_ext3_exit(void)
{
- int rc;
-
- fsfilt_unregister_ops(&fsfilt_ext3_ops);
- rc = cfs_mem_cache_destroy(fcb_cache);
- LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
+ fsfilt_unregister_ops(&fsfilt_ext3_ops);
+ kmem_cache_destroy(fcb_cache);
}
module_init(fsfilt_ext3_init);
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
- CFS_PAGE_SIZE * op_data->op_npages,
+ PAGE_CACHE_SIZE * op_data->op_npages,
&op_data->op_fid1, op_data->op_capa1);
ptlrpc_request_set_replen(req);
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred,
- CFS_PAGE_SIZE * op_data->op_npages);
+ PAGE_CACHE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
RETURN(-EPROTO);
}
GOTO(out, rc = -ENODEV);
/* copy UUID */
- if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
- GOTO(out, rc = -EFAULT);
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
+ min((int)data->ioc_plen2,
+ (int)sizeof(struct obd_uuid))))
+ GOTO(out, rc = -EFAULT);
- rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- 0);
- if (rc != 0)
- GOTO(out, rc);
+ rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
+ if (rc != 0)
+ GOTO(out, rc);
- if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
GOTO(out, rc = -EFAULT);
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
- if (cfs_copy_to_user(uarg,
+ if (copy_to_user(uarg,
exp_connect_flags_ptr(exp),
sizeof(__u64)))
GOTO(out, rc = -EFAULT);
static const char mdd_obf_dir_name[] = "fid";
/* Slab for MDD object allocation */
-cfs_mem_cache_t *mdd_object_kmem;
+struct kmem_cache *mdd_object_kmem;
static struct lu_kmem_descr mdd_caches[] = {
{
/* First try a small buf */
LASSERT(env != NULL);
ldata->ld_buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_link_buf,
- CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE);
if (ldata->ld_buf->lb_buf == NULL)
return -ENOMEM;
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
}
static int lprocfs_wr_changelog_mask(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
- struct mdd_device *mdd = data;
- char *kernbuf;
- int rc;
- ENTRY;
-
- if (count >= CFS_PAGE_SIZE)
- RETURN(-EINVAL);
- OBD_ALLOC(kernbuf, CFS_PAGE_SIZE);
- if (kernbuf == NULL)
- RETURN(-ENOMEM);
- if (cfs_copy_from_user(kernbuf, buffer, count))
- GOTO(out, rc = -EFAULT);
- kernbuf[count] = 0;
-
- rc = cfs_str2mask(kernbuf, changelog_type2str, &mdd->mdd_cl.mc_mask,
- CHANGELOG_MINMASK, CHANGELOG_ALLMASK);
- if (rc == 0)
- rc = count;
+ struct mdd_device *mdd = data;
+ char *kernbuf;
+ int rc;
+ ENTRY;
+
+ if (count >= PAGE_CACHE_SIZE)
+ RETURN(-EINVAL);
+ OBD_ALLOC(kernbuf, PAGE_CACHE_SIZE);
+ if (kernbuf == NULL)
+ RETURN(-ENOMEM);
+ if (copy_from_user(kernbuf, buffer, count))
+ GOTO(out, rc = -EFAULT);
+ kernbuf[count] = 0;
+
+ rc = cfs_str2mask(kernbuf, changelog_type2str, &mdd->mdd_cl.mc_mask,
+ CHANGELOG_MINMASK, CHANGELOG_ALLMASK);
+ if (rc == 0)
+ rc = count;
out:
- OBD_FREE(kernbuf, CFS_PAGE_SIZE);
- return rc;
+ OBD_FREE(kernbuf, PAGE_CACHE_SIZE);
+ return rc;
}
struct cucb_data {
#include "mdd_internal.h"
static const struct lu_object_operations mdd_lu_obj_ops;
-extern cfs_mem_cache_t *mdd_object_kmem;
+extern struct kmem_cache *mdd_object_kmem;
static int mdd_xattr_get(const struct lu_env *env,
struct md_object *obj, struct lu_buf *buf,
{
struct mdd_object *mdd_obj;
- OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, __GFP_IO);
if (mdd_obj != NULL) {
struct lu_object *o;
LASSERT(rdpg->rp_pages != NULL);
pg = rdpg->rp_pages[0];
- dp = (struct lu_dirpage*)cfs_kmap(pg);
+ dp = (struct lu_dirpage *)kmap(pg);
memset(dp, 0 , sizeof(struct lu_dirpage));
dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
- cfs_kunmap(pg);
+ kunmap(pg);
GOTO(out_unlock, rc = LU_PAGE_SIZE);
}
if (rc >= 0) {
struct lu_dirpage *dp;
- dp = cfs_kmap(rdpg->rp_pages[0]);
+ dp = kmap(rdpg->rp_pages[0]);
dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
if (rc == 0) {
/*
dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
rc = min_t(unsigned int, LU_PAGE_SIZE, rdpg->rp_count);
}
- cfs_kunmap(rdpg->rp_pages[0]);
+ kunmap(rdpg->rp_pages[0]);
}
GOTO(out_unlock, rc);
static const struct lu_object_operations mdt_obj_ops;
/* Slab for MDT object allocation */
-static cfs_mem_cache_t *mdt_object_kmem;
+static struct kmem_cache *mdt_object_kmem;
static struct lu_kmem_descr mdt_caches[] = {
{
PFID(mdt_object_fid(o)), rc);
rc = -EFAULT;
} else {
- int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc);
+ int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc);
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
rc -= 2;
for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0;
i++, tmpcount -= tmpsize) {
- tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE);
+ tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE);
ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize);
}
rdpg->rp_attrs |= LUDA_64BITHASH;
rdpg->rp_count = min_t(unsigned int, reqbody->nlink,
exp_max_brw_size(info->mti_exp));
- rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >>
- CFS_PAGE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
if (rdpg->rp_pages == NULL)
RETURN(-ENOMEM);
for (i = 0; i < rdpg->rp_npages; ++i) {
- rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
if (rdpg->rp_pages[i] == NULL)
GOTO(free_rdpg, rc = -ENOMEM);
}
for (i = 0; i < rdpg->rp_npages; i++)
if (rdpg->rp_pages[i] != NULL)
- cfs_free_page(rdpg->rp_pages[i]);
+ __free_page(rdpg->rp_pages[i]);
OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]);
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
GOTO(out, rc = -EFAULT);
rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT,
exp_max_brw_size(info->mti_exp));
- rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT;
+ rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
/* allocate pages to store the containers */
OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
if (rdpg->rp_pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++) {
- rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ rdpg->rp_pages[i] = alloc_page(GFP_IOFS);
if (rdpg->rp_pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
if (rdpg->rp_pages) {
for (i = 0; i < rdpg->rp_npages; i++)
if (rdpg->rp_pages[i])
- cfs_free_page(rdpg->rp_pages[i]);
+ __free_page(rdpg->rp_pages[i]);
OBD_FREE(rdpg->rp_pages,
rdpg->rp_npages * sizeof(rdpg->rp_pages[0]));
}
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, __GFP_IO);
if (mo != NULL) {
struct lu_object *o;
struct lu_object_header *h;
OBD_ALLOC(kernbuf, count + 1);
if (kernbuf == NULL)
GOTO(failed, rc = -ENOMEM);
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
GOTO(failed, rc = -EFAULT);
/* Remove any extraneous bits from the upcall (e.g. linefeeds) */
if (param == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(param, buffer, size)) {
+ if (copy_from_user(param, buffer, size)) {
CERROR("%s: bad identity data\n", mdt_obd_name(mdt));
GOTO(out, rc = -EFAULT);
}
* bytes into kbuf, to ensure that the string is NUL-terminated.
* UUID_MAX should include a trailing NUL already.
*/
- if (cfs_copy_from_user(kbuf, buffer,
- min_t(unsigned long, BUFLEN - 1, count))) {
+ if (copy_from_user(kbuf, buffer,
+ min_t(unsigned long, BUFLEN - 1, count))) {
count = -EFAULT;
goto out;
}
errmsg = "string too long";
GOTO(failed, rc = -EINVAL);
}
- if (cfs_copy_from_user(kernbuf, buffer, count)) {
+ if (copy_from_user(kernbuf, buffer, count)) {
errmsg = "bad address";
GOTO(failed, rc = -EFAULT);
}
errmsg = "no memory";
GOTO(failed, rc = -ENOMEM);
}
- if (cfs_copy_from_user(kernbuf, buffer, count)) {
+ if (copy_from_user(kernbuf, buffer, count)) {
errmsg = "bad address";
GOTO(failed, rc = -EFAULT);
}
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
* bytes into kbuf, to ensure that the string is NUL-terminated.
* UUID_MAX should include a trailing NUL already.
*/
- if (cfs_copy_from_user(kbuf, buffer,
- min_t(unsigned long, UUID_MAX - 1, count))) {
+ if (copy_from_user(kbuf, buffer,
+ min_t(unsigned long, UUID_MAX - 1, count))) {
count = -EFAULT;
goto out;
}
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - CFS_PAGE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- OBD_ALLOC(inst, CFS_PAGE_SIZE);
- if (inst == NULL)
- RETURN(-ENOMEM);
+ OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ if (inst == NULL)
+ RETURN(-ENOMEM);
if (!IS_SERVER(lsi)) {
- pos = snprintf(inst, CFS_PAGE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= CFS_PAGE_SIZE) {
- OBD_FREE(inst, CFS_PAGE_SIZE);
+ pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_CACHE_SIZE) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
return -E2BIG;
}
} else {
LASSERT(IS_MDT(lsi));
rc = server_name2svname(lsi->lsi_svname, inst, NULL,
- CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE);
if (rc) {
- OBD_FREE(inst, CFS_PAGE_SIZE);
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
RETURN(-EINVAL);
}
pos = strlen(inst);
++pos;
buf = inst + pos;
- bufsz = CFS_PAGE_SIZE - pos;
+ bufsz = PAGE_CACHE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > CFS_PAGE_SIZE) {
+ if (entry->mne_length > PAGE_CACHE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
/* continue, even one with error */
}
- OBD_FREE(inst, CFS_PAGE_SIZE);
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
RETURN(rc);
}
struct mgs_config_body *body;
struct mgs_config_res *res;
struct ptlrpc_bulk_desc *desc;
- cfs_page_t **pages;
+ struct page **pages;
int nrpages;
bool eof = true;
bool mne_swab = false;
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++) {
- pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ pages[i] = alloc_page(GFP_IOFS);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
GOTO(out, rc = -E2BIG);
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = CFS_PAGE_SHIFT;
+ body->mcb_bits = PAGE_CACHE_SHIFT;
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
GOTO(out, rc = -ENOMEM);
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (ealen < 0)
GOTO(out, rc = ealen);
- if (ealen > nrpages << CFS_PAGE_SHIFT)
+ if (ealen > nrpages << PAGE_CACHE_SHIFT)
GOTO(out, rc = -EINVAL);
if (ealen == 0) { /* no logs transferred */
int rc2;
void *ptr;
- ptr = cfs_kmap(pages[i]);
- rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, CFS_PAGE_SIZE),
+ ptr = kmap(pages[i]);
+ rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
+ min_t(int, ealen, PAGE_CACHE_SIZE),
mne_swab);
- cfs_kunmap(pages[i]);
- if (rc2 < 0) {
- CWARN("Process recover log %s error %d\n",
- cld->cld_logname, rc2);
- break;
+ kunmap(pages[i]);
+ if (rc2 < 0) {
+ CWARN("Process recover log %s error %d\n",
+ cld->cld_logname, rc2);
+ break;
}
- ealen -= CFS_PAGE_SIZE;
+ ealen -= PAGE_CACHE_SIZE;
}
out:
if (rc == 0 && !eof)
goto again;
- if (pages) {
- for (i = 0; i < nrpages; i++) {
- if (pages[i] == NULL)
- break;
- cfs_free_page(pages[i]);
- }
- OBD_FREE(pages, sizeof(*pages) * nrpages);
- }
- return rc;
+ if (pages) {
+ for (i = 0; i < nrpages; i++) {
+ if (pages[i] == NULL)
+ break;
+ __free_page(pages[i]);
+ }
+ OBD_FREE(pages, sizeof(*pages) * nrpages);
+ }
+ return rc;
}
#ifdef HAVE_LDISKFS_OSD
GOTO(out_pool, rc = -EINVAL);
}
- if (data->ioc_plen1 > CFS_PAGE_SIZE)
+ if (data->ioc_plen1 > PAGE_CACHE_SIZE)
GOTO(out_pool, rc = -E2BIG);
OBD_ALLOC(lcfg, data->ioc_plen1);
if (lcfg == NULL)
GOTO(out_pool, rc = -ENOMEM);
- if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+ if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
GOTO(out_lcfg, rc = -EFAULT);
if (lcfg->lcfg_bufcount < 2)
OBD_ALLOC(lcfg, data->ioc_plen1);
if (lcfg == NULL)
GOTO(out, rc = -ENOMEM);
- if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
+ if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1))
GOTO(out_free, rc = -EFAULT);
if (lcfg->lcfg_bufcount < 1)
* shouldn't cross unit boundaries.
*/
static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl,
- struct mgs_config_res *res, cfs_page_t **pages,
+ struct mgs_config_res *res, struct page **pages,
int nrpages, int units_total, int unit_size)
{
struct mgs_nidtbl_target *tgt;
/* make sure unit_size is power 2 */
LASSERT((unit_size & (unit_size - 1)) == 0);
- LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size);
+ LASSERT(nrpages << PAGE_CACHE_SHIFT >= units_total * unit_size);
mutex_lock(&tbl->mn_lock);
LASSERT(nidtbl_is_sane(tbl));
}
LASSERT((rc & (unit_size - 1)) == 0);
- if (units_in_page == 0) {
- /* allocate a new page */
- pages[index] = cfs_alloc_page(CFS_ALLOC_STD);
- if (pages[index] == NULL) {
- rc = -ENOMEM;
- break;
- }
+ if (units_in_page == 0) {
+ /* allocate a new page */
+ pages[index] = alloc_page(GFP_IOFS);
+ if (pages[index] == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
- /* destroy previous map */
- if (index > 0)
- cfs_kunmap(pages[index - 1]);
+ /* destroy previous map */
+ if (index > 0)
+ kunmap(pages[index - 1]);
- /* reassign buffer */
- buf = cfs_kmap(pages[index]);
- ++index;
+ /* reassign buffer */
+ buf = kmap(pages[index]);
+ ++index;
- units_in_page = CFS_PAGE_SIZE / unit_size;
- LASSERT(units_in_page > 0);
- }
+ units_in_page = PAGE_CACHE_SIZE / unit_size;
+ LASSERT(units_in_page > 0);
+ }
/* allocate an unit */
LASSERT(((long)buf & (unit_size - 1)) == 0);
bytes_in_unit, index, nrpages, units_total);
}
if (index > 0)
- cfs_kunmap(pages[index - 1]);
+ kunmap(pages[index - 1]);
out:
LASSERT(version <= tbl->mn_version);
res->mcr_size = tbl->mn_version;
int bytes;
int page_count;
int nrpages;
- cfs_page_t **pages = NULL;
+ struct page **pages = NULL;
ENTRY;
body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
RETURN(rc);
bufsize = body->mcb_units << body->mcb_bits;
- nrpages = (bufsize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ nrpages = (bufsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (nrpages > PTLRPC_MAX_BRW_PAGES)
RETURN(-EINVAL);
GOTO(out, rc = -EINVAL);
res->mcr_offset = body->mcb_offset;
- unit_size = min_t(int, 1 << body->mcb_bits, CFS_PAGE_SIZE);
+ unit_size = min_t(int, 1 << body->mcb_bits, PAGE_CACHE_SIZE);
bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res,
pages, nrpages, bufsize / unit_size, unit_size);
if (bytes < 0)
GOTO(out, rc = bytes);
/* start bulk transfer */
- page_count = (bytes + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+ page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
LASSERT(page_count <= nrpages);
desc = ptlrpc_prep_bulk_exp(req, page_count, 1,
BULK_PUT_SOURCE, MGS_BULK_PORTAL);
for (i = 0; i < page_count && bytes > 0; i++) {
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0,
- min_t(int, bytes, CFS_PAGE_SIZE));
- bytes -= CFS_PAGE_SIZE;
+ min_t(int, bytes, PAGE_CACHE_SIZE));
+ bytes -= PAGE_CACHE_SIZE;
}
rc = target_bulk_io(req->rq_export, desc, &lwi);
for (i = 0; i < nrpages; i++) {
if (pages[i] == NULL)
break;
- cfs_free_page(pages[i]);
+ __free_page(pages[i]);
}
OBD_FREE(pages, sizeof(*pages) * nrpages);
return rc;
char *ptr;
int rc = 0;
- if (count > CFS_PAGE_SIZE)
+ if (count > PAGE_CACHE_SIZE)
return -EINVAL;
OBD_ALLOC(kbuf, count + 1);
#define NR_CAPAHASH 32
#define CAPA_HASH_SIZE 3000 /* for MDS & OSS */
-cfs_mem_cache_t *capa_cachep = NULL;
+struct kmem_cache *capa_cachep;
#ifdef __KERNEL__
/* lock for capa hash/capa_list/fo_capa_keys */
cfs_hlist_head_t *init_capa_hash(void)
{
- cfs_hlist_head_t *hash;
- int nr_hash, i;
+ cfs_hlist_head_t *hash;
+ int nr_hash, i;
- OBD_ALLOC(hash, CFS_PAGE_SIZE);
- if (!hash)
- return NULL;
+ OBD_ALLOC(hash, PAGE_CACHE_SIZE);
+ if (!hash)
+ return NULL;
- nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t);
- LASSERT(nr_hash > NR_CAPAHASH);
+ nr_hash = PAGE_CACHE_SIZE / sizeof(cfs_hlist_head_t);
+ LASSERT(nr_hash > NR_CAPAHASH);
- for (i = 0; i < NR_CAPAHASH; i++)
- CFS_INIT_HLIST_HEAD(hash + i);
- return hash;
+ for (i = 0; i < NR_CAPAHASH; i++)
+ CFS_INIT_HLIST_HEAD(hash + i);
+ return hash;
}
EXPORT_SYMBOL(init_capa_hash);
}
spin_unlock(&capa_lock);
- OBD_FREE(hash, CFS_PAGE_SIZE);
+ OBD_FREE(hash, PAGE_CACHE_SIZE);
}
EXPORT_SYMBOL(cleanup_capa_hash);
sg_set_page(&sl, virt_to_page(capa),
offsetof(struct lustre_capa, lc_hmac),
- (unsigned long)(capa) % CFS_PAGE_SIZE);
+ (unsigned long)(capa) % PAGE_CACHE_SIZE);
ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
ll_crypto_free_hash(tfm);
GOTO(out, rc);
}
- sg_set_page(&sd, virt_to_page(d), 16,
- (unsigned long)(d) % CFS_PAGE_SIZE);
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
- sg_set_page(&ss, virt_to_page(s), 16,
- (unsigned long)(s) % CFS_PAGE_SIZE);
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
GOTO(out, rc);
}
- sg_set_page(&sd, virt_to_page(d), 16,
- (unsigned long)(d) % CFS_PAGE_SIZE);
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
- sg_set_page(&ss, virt_to_page(s), 16,
- (unsigned long)(s) % CFS_PAGE_SIZE);
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
desc.tfm = tfm;
desc.info = NULL;
/** Lock class of cl_lock::cll_guard */
static struct lock_class_key cl_lock_guard_class;
-static cfs_mem_cache_t *cl_lock_kmem;
+static struct kmem_cache *cl_lock_kmem;
static struct lu_kmem_descr cl_lock_caches[] = {
{
struct lu_object_header *head;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
if (lock != NULL) {
cfs_atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
#include <cl_object.h>
#include "cl_internal.h"
-static cfs_mem_cache_t *cl_env_kmem;
+static struct kmem_cache *cl_env_kmem;
/** Lock class of cl_object_header::coh_page_guard */
static struct lock_class_key cl_page_guard_class;
struct lu_env *env;
struct cl_env *cle;
- OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO);
if (cle != NULL) {
int rc;
ENTRY;
OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
- CFS_ALLOC_IO);
+ __GFP_IO);
if (page != NULL) {
int result = 0;
cfs_atomic_set(&page->cp_ref, 1);
/**
* Returns a VM page associated with a given cl_page.
*/
-cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
+struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
{
const struct cl_page_slice *slice;
/**
* Returns a cl_page associated with a VM page, and given cl_object.
*/
-struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
struct cl_page *top;
struct cl_page *page;
*/
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
{
- /*
- * XXX for now.
- */
- return (loff_t)idx << CFS_PAGE_SHIFT;
+ return (loff_t)idx << PAGE_CACHE_SHIFT;
}
EXPORT_SYMBOL(cl_offset);
*/
pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
{
- /*
- * XXX for now.
- */
- return offset >> CFS_PAGE_SHIFT;
+ return offset >> PAGE_CACHE_SHIFT;
}
EXPORT_SYMBOL(cl_index);
int cl_page_size(const struct cl_object *obj)
{
- return 1 << CFS_PAGE_SHIFT;
+ return 1 << PAGE_CACHE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
"("LPU64" bytes) allocated by Lustre, "
"%d total bytes by LNET\n",
obd_memory_sum(),
- obd_pages_sum() << CFS_PAGE_SHIFT,
+ obd_pages_sum() << PAGE_CACHE_SHIFT,
obd_pages_sum(),
cfs_atomic_read(&libcfs_kmemory));
return 1;
OBD_ALLOC(lcfg, data->ioc_plen1);
if (lcfg == NULL)
GOTO(out, err = -ENOMEM);
- err = cfs_copy_from_user(lcfg, data->ioc_pbuf1,
+ err = copy_from_user(lcfg, data->ioc_pbuf1,
data->ioc_plen1);
if (!err)
err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= CFS_PAGE_SIZE) {
+ if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
CWARN("mask failed: u64val "LPU64" >= "LPU64"\n", u64val,
- (__u64)CFS_PAGE_SIZE);
+ (__u64)PAGE_CACHE_SIZE);
ret = -EINVAL;
}
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (cfs_num_physpages <= 512 << (20 - CFS_PAGE_SHIFT))
- obd_max_dirty_pages = cfs_num_physpages / 4;
- else
- obd_max_dirty_pages = cfs_num_physpages / 2;
+ if (num_physpages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ obd_max_dirty_pages = num_physpages / 4;
+ else
+ obd_max_dirty_pages = num_physpages / 2;
err = obd_init_caches();
if (err)
"niobuf_local: file_offset="LPD64", len=%d, page=%p, rc=%d\n",
nb->lnb_file_offset, nb->len, nb->page, nb->rc);
CDEBUG(D_RPCTRACE, "nb->page: index = %ld\n",
- nb->page ? cfs_page_index(nb->page) : -1);
+ nb->page ? page_index(nb->page) : -1);
}
EXPORT_SYMBOL(dump_lniobuf);
int i;
LASSERT(pageidx < rdpg->rp_npages);
- lp = cfs_kmap(rdpg->rp_pages[pageidx]);
+ lp = kmap(rdpg->rp_pages[pageidx]);
/* fill lu pages */
for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) {
/* end of index */
break;
}
- cfs_kunmap(rdpg->rp_pages[i]);
+ kunmap(rdpg->rp_pages[i]);
}
iops->put(env, it);
extern cfs_list_t obd_types;
spinlock_t obd_types_lock;
-cfs_mem_cache_t *obd_device_cachep;
-cfs_mem_cache_t *obdo_cachep;
+struct kmem_cache *obd_device_cachep;
+struct kmem_cache *obdo_cachep;
EXPORT_SYMBOL(obdo_cachep);
-cfs_mem_cache_t *import_cachep;
+struct kmem_cache *import_cachep;
cfs_list_t obd_zombie_imports;
cfs_list_t obd_zombie_exports;
*/
static struct obd_device *obd_device_alloc(void)
{
- struct obd_device *obd;
+ struct obd_device *obd;
- OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, CFS_ALLOC_IO);
- if (obd != NULL) {
- obd->obd_magic = OBD_DEVICE_MAGIC;
- }
- return obd;
+ OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, __GFP_IO);
+ if (obd != NULL) {
+ obd->obd_magic = OBD_DEVICE_MAGIC;
+ }
+ return obd;
}
static void obd_device_free(struct obd_device *obd)
void obd_cleanup_caches(void)
{
- int rc;
-
ENTRY;
if (obd_device_cachep) {
- rc = cfs_mem_cache_destroy(obd_device_cachep);
- LASSERTF(rc == 0, "Cannot destropy ll_obd_device_cache: rc %d\n", rc);
+ kmem_cache_destroy(obd_device_cachep);
obd_device_cachep = NULL;
}
if (obdo_cachep) {
- rc = cfs_mem_cache_destroy(obdo_cachep);
- LASSERTF(rc == 0, "Cannot destory ll_obdo_cache\n");
+ kmem_cache_destroy(obdo_cachep);
obdo_cachep = NULL;
}
if (import_cachep) {
- rc = cfs_mem_cache_destroy(import_cachep);
- LASSERTF(rc == 0, "Cannot destory ll_import_cache\n");
+ kmem_cache_destroy(import_cachep);
import_cachep = NULL;
}
if (capa_cachep) {
- rc = cfs_mem_cache_destroy(capa_cachep);
- LASSERTF(rc == 0, "Cannot destory capa_cache\n");
+ kmem_cache_destroy(capa_cachep);
capa_cachep = NULL;
}
EXIT;
int obd_init_caches(void)
{
- ENTRY;
+ ENTRY;
- LASSERT(obd_device_cachep == NULL);
- obd_device_cachep = cfs_mem_cache_create("ll_obd_dev_cache",
- sizeof(struct obd_device),
- 0, 0);
- if (!obd_device_cachep)
- GOTO(out, -ENOMEM);
-
- LASSERT(obdo_cachep == NULL);
- obdo_cachep = cfs_mem_cache_create("ll_obdo_cache", sizeof(struct obdo),
- 0, 0);
- if (!obdo_cachep)
- GOTO(out, -ENOMEM);
-
- LASSERT(import_cachep == NULL);
- import_cachep = cfs_mem_cache_create("ll_import_cache",
- sizeof(struct obd_import),
- 0, 0);
- if (!import_cachep)
- GOTO(out, -ENOMEM);
-
- LASSERT(capa_cachep == NULL);
- capa_cachep = cfs_mem_cache_create("capa_cache",
- sizeof(struct obd_capa), 0, 0);
- if (!capa_cachep)
- GOTO(out, -ENOMEM);
+ LASSERT(obd_device_cachep == NULL);
+ obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
+ sizeof(struct obd_device),
+ 0, 0, NULL);
+ if (!obd_device_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(obdo_cachep == NULL);
+ obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo),
+ 0, 0, NULL);
+ if (!obdo_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(import_cachep == NULL);
+ import_cachep = kmem_cache_create("ll_import_cache",
+ sizeof(struct obd_import),
+ 0, 0, NULL);
+ if (!import_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(capa_cachep == NULL);
+ capa_cachep = kmem_cache_create("capa_cache", sizeof(struct obd_capa),
+ 0, 0, NULL);
+ if (!capa_cachep)
+ GOTO(out, -ENOMEM);
- RETURN(0);
- out:
- obd_cleanup_caches();
- RETURN(-ENOMEM);
+ RETURN(0);
+out:
+ obd_cleanup_caches();
+ RETURN(-ENOMEM);
}
int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf)
{
- ldata->ld_buf = lu_buf_check_and_alloc(buf, CFS_PAGE_SIZE);
+ ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_CACHE_SIZE);
if (ldata->ld_buf->lb_buf == NULL)
return -ENOMEM;
ldata->ld_leh = ldata->ld_buf->lb_buf;
int offset = 0;
ENTRY;
- err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+ err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if ( err )
RETURN(err);
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+ err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
if ( err ) {
OBD_FREE_LARGE(*buf, hdr.ioc_len);
RETURN(err);
int obd_ioctl_popdata(void *arg, void *data, int len)
{
- int err;
+ int err;
- err = cfs_copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
- return err;
+ err = copy_to_user(arg, data, len);
+ if (err)
+ err = -EFAULT;
+ return err;
}
EXPORT_SYMBOL(obd_ioctl_popdata);
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
dst->i_blkbits = ffs(src->o_blksize) - 1;
- if (dst->i_blkbits < CFS_PAGE_SHIFT)
- dst->i_blkbits = CFS_PAGE_SHIFT;
+ if (dst->i_blkbits < PAGE_CACHE_SHIFT)
+ dst->i_blkbits = PAGE_CACHE_SHIFT;
/* allocation of space */
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += *lenp;
int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
{
- int rc = 0;
- DECLARE_LL_PROC_PPOS_DECL;
-
- if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
- *lenp = 0;
- return 0;
- }
- if (write) {
- rc = lprocfs_write_frac_helper(buffer, *lenp,
- (unsigned int*)table->data,
- 1 << (20 - CFS_PAGE_SHIFT));
- /* Don't allow them to let dirty pages exceed 90% of system
- * memory and set a hard minimum of 4MB. */
- if (obd_max_dirty_pages > ((cfs_num_physpages / 10) * 9)) {
- CERROR("Refusing to set max dirty pages to %u, which "
- "is more than 90%% of available RAM; setting "
- "to %lu\n", obd_max_dirty_pages,
- ((cfs_num_physpages / 10) * 9));
- obd_max_dirty_pages = ((cfs_num_physpages / 10) * 9);
- } else if (obd_max_dirty_pages < 4 << (20 - CFS_PAGE_SHIFT)) {
- obd_max_dirty_pages = 4 << (20 - CFS_PAGE_SHIFT);
- }
- } else {
- char buf[21];
- int len;
-
- len = lprocfs_read_frac_helper(buf, sizeof(buf),
- *(unsigned int*)table->data,
- 1 << (20 - CFS_PAGE_SHIFT));
- if (len > *lenp)
- len = *lenp;
- buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
- return -EFAULT;
- *lenp = len;
- }
- *ppos += *lenp;
- return rc;
+ int rc = 0;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write) {
+ rc = lprocfs_write_frac_helper(buffer, *lenp,
+ (unsigned int *)table->data,
+ 1 << (20 - PAGE_CACHE_SHIFT));
+ /* Don't allow them to let dirty pages exceed 90% of system
+ * memory and set a hard minimum of 4MB. */
+ if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
+ CERROR("Refusing to set max dirty pages to %u, which "
+ "is more than 90%% of available RAM; setting "
+ "to %lu\n", obd_max_dirty_pages,
+ ((num_physpages / 10) * 9));
+ obd_max_dirty_pages = ((num_physpages / 10) * 9);
+ } else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
+ obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
+ }
+ } else {
+ char buf[21];
+ int len;
+
+ len = lprocfs_read_frac_helper(buf, sizeof(buf),
+ *(unsigned int *)table->data,
+ 1 << (20 - PAGE_CACHE_SHIFT));
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ }
+ *ppos += *lenp;
+ return rc;
}
#ifdef RANDOM_FAIL_ALLOC
if (len > *lenp)
len = *lenp;
buf[len] = '\0';
- if (cfs_copy_to_user(buffer, buf, len))
+ if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
}
char *page, *start = NULL;
int rc = 0, eof = 1, count;
- if (*ppos >= CFS_PAGE_SIZE)
+ if (*ppos >= PAGE_CACHE_SIZE)
return 0;
page = (char *)__get_free_page(GFP_KERNEL);
OBD_FAIL_TIMEOUT(OBD_FAIL_LPROC_REMOVE, 10);
if (dp->read_proc)
- rc = dp->read_proc(page, &start, *ppos, CFS_PAGE_SIZE,
+ rc = dp->read_proc(page, &start, *ppos, PAGE_CACHE_SIZE,
&eof, dp->data);
LPROCFS_EXIT();
if (rc <= 0)
}
count = (rc < size) ? rc : size;
- if (cfs_copy_to_user(buf, start, count)) {
+ if (copy_to_user(buf, start, count)) {
rc = -EFAULT;
goto out;
}
unsigned long tmp;
dummy[MAX_STRING_SIZE] = '\0';
- if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
return -EFAULT;
tmp = simple_strtoul(dummy, &end, 0);
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
what we need to read */
*start = page + off;
- /* We know we are allocated a page here.
- Also we know that this function will
- not need to write more than a page
- so we can truncate at CFS_PAGE_SIZE. */
- size = min(count + (int)off + 1, (int)CFS_PAGE_SIZE);
+ /*
+ * We know we are allocated a page here.
+ * Also we know that this function will
+ * not need to write more than a page
+ * so we can truncate at PAGE_CACHE_SIZE.
+ */
+ size = min(count + (int)off + 1, (int)PAGE_CACHE_SIZE);
/* Initialize the page */
memset(page, 0, size);
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = cfs_num_physpages;
+ cache_size = num_physpages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
- cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
+ if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
+ cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
#endif
/* clear off unreasonable cache setting. */
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
}
cache_size = cache_size / 100 * lu_cache_percent *
- (CFS_PAGE_SIZE / 1024);
+ (PAGE_CACHE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;
}
EXPORT_SYMBOL(lu_env_refill_by_tags);
-static struct cfs_shrinker *lu_site_shrinker = NULL;
+static struct shrinker *lu_site_shrinker;
typedef struct lu_site_stats{
unsigned lss_populated;
* inode, one for ea. Unfortunately setting this high value results in
* lu_object/inode cache consuming all the memory.
*/
- lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
+ lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
if (lu_site_shrinker == NULL)
return -ENOMEM;
void lu_global_fini(void)
{
if (lu_site_shrinker != NULL) {
- cfs_remove_shrinker(lu_site_shrinker);
+ remove_shrinker(lu_site_shrinker);
lu_site_shrinker = NULL;
}
struct lu_kmem_descr *iter = caches;
for (result = 0; iter->ckd_cache != NULL; ++iter) {
- *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
- iter->ckd_size,
- 0, 0);
+ *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
+ iter->ckd_size,
+ 0, 0, NULL);
if (*iter->ckd_cache == NULL) {
result = -ENOMEM;
/* free all previously allocated caches */
*/
void lu_kmem_fini(struct lu_kmem_descr *caches)
{
- int rc;
-
for (; caches->ckd_cache != NULL; ++caches) {
if (*caches->ckd_cache != NULL) {
- rc = cfs_mem_cache_destroy(*caches->ckd_cache);
- LASSERTF(rc == 0, "couldn't destroy %s slab\n",
- caches->ckd_name);
+ kmem_cache_destroy(*caches->ckd_cache);
*caches->ckd_cache = NULL;
}
}
} \
} while (0)
-static cfs_mem_cache_t *lu_ref_link_kmem;
+static struct kmem_cache *lu_ref_link_kmem;
static struct lu_kmem_descr lu_ref_caches[] = {
{
void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
{
cfs_might_sleep();
- lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
+ lu_ref_add_context(ref, GFP_IOFS, scope, source);
}
EXPORT_SYMBOL(lu_ref_add);
void lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
const void *source)
{
- lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source);
+ lu_ref_add_context(ref, GFP_ATOMIC, scope, source);
}
EXPORT_SYMBOL(lu_ref_add_atomic);
#define ECHO_INIT_OID 0x10000000ULL
#define ECHO_HANDLE_MAGIC 0xabcd0123fedc9876ULL
-#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> CFS_PAGE_SHIFT)
-static cfs_page_t *echo_persistent_pages[ECHO_PERSISTENT_PAGES];
+#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_CACHE_SHIFT)
+static struct page *echo_persistent_pages[ECHO_PERSISTENT_PAGES];
enum {
LPROC_ECHO_READ_BYTES = 1,
}
static void
-echo_page_debug_setup(cfs_page_t *page, int rw, obd_id id,
- __u64 offset, int len)
+echo_page_debug_setup(struct page *page, int rw, obd_id id,
+ __u64 offset, int len)
{
- int page_offset = offset & ~CFS_PAGE_MASK;
- char *addr = ((char *)cfs_kmap(page)) + page_offset;
+ int page_offset = offset & ~CFS_PAGE_MASK;
+ char *addr = ((char *)kmap(page)) + page_offset;
if (len % OBD_ECHO_BLOCK_SIZE != 0)
CERROR("Unexpected block size %d\n", len);
len -= OBD_ECHO_BLOCK_SIZE;
}
- cfs_kunmap(page);
+ kunmap(page);
}
static int
-echo_page_debug_check(cfs_page_t *page, obd_id id,
- __u64 offset, int len)
+echo_page_debug_check(struct page *page, obd_id id,
+ __u64 offset, int len)
{
- int page_offset = offset & ~CFS_PAGE_MASK;
- char *addr = ((char *)cfs_kmap(page)) + page_offset;
- int rc = 0;
- int rc2;
+ int page_offset = offset & ~CFS_PAGE_MASK;
+ char *addr = ((char *)kmap(page)) + page_offset;
+ int rc = 0;
+ int rc2;
if (len % OBD_ECHO_BLOCK_SIZE != 0)
CERROR("Unexpected block size %d\n", len);
len -= OBD_ECHO_BLOCK_SIZE;
}
- cfs_kunmap(page);
+ kunmap(page);
- return (rc);
+ return rc;
}
/* This allows us to verify that desc_private is passed unmolested */
struct niobuf_local *lb, int cmd, int *left)
{
int gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ?
- CFS_ALLOC_HIGHUSER : CFS_ALLOC_STD;
+ GFP_HIGHUSER : GFP_IOFS;
int ispersistent = ostid_id(&obj->ioo_oid) == ECHO_PERSISTENT_OBJID;
int debug_setup = (!ispersistent &&
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
obd_off offset = nb->offset;
int len = nb->len;
- while (len > 0) {
- int plen = CFS_PAGE_SIZE - (offset & (CFS_PAGE_SIZE-1));
- if (len < plen)
- plen = len;
+ while (len > 0) {
+ int plen = PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1));
+ if (len < plen)
+ plen = len;
/* check for local buf overflow */
if (*left == 0)
res->lnb_file_offset = offset;
res->len = plen;
LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) + res->len <=
- CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE);
if (ispersistent &&
- ((res->lnb_file_offset >> CFS_PAGE_SHIFT) <
+ ((res->lnb_file_offset >> PAGE_CACHE_SHIFT) <
ECHO_PERSISTENT_PAGES)) {
res->page =
echo_persistent_pages[res->lnb_file_offset >>
- CFS_PAGE_SHIFT];
- /* Take extra ref so __free_pages() can be called OK */
- cfs_get_page (res->page);
- } else {
+ PAGE_CACHE_SHIFT];
+ /* Take extra ref so __free_pages() can be called OK */
+ get_page (res->page);
+ } else {
OBD_PAGE_ALLOC(res->page, gfp_mask);
if (res->page == NULL) {
CERROR("can't get page for id " DOSTID"\n",
}
static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj,
- struct niobuf_remote *rb, int *pgs,
- struct niobuf_local *lb, int verify)
+ struct niobuf_remote *rb, int *pgs,
+ struct niobuf_local *lb, int verify)
{
- struct niobuf_local *res = lb;
- obd_off start = rb->offset >> CFS_PAGE_SHIFT;
- obd_off end = (rb->offset + rb->len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- int count = (int)(end - start);
- int rc = 0;
- int i;
+ struct niobuf_local *res = lb;
+ obd_off start = rb->offset >> PAGE_CACHE_SHIFT;
+ obd_off end = (rb->offset + rb->len + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ int count = (int)(end - start);
+ int rc = 0;
+ int i;
- for (i = 0; i < count; i++, (*pgs) ++, res++) {
- cfs_page_t *page = res->page;
- void *addr;
+ for (i = 0; i < count; i++, (*pgs) ++, res++) {
+ struct page *page = res->page;
+ void *addr;
if (page == NULL) {
CERROR("null page objid "LPU64":%p, buf %d/%d\n",
return -EFAULT;
}
- addr = cfs_kmap(page);
+ addr = kmap(page);
- CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n",
+ CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n",
res->page, addr, res->lnb_file_offset);
if (verify) {
rc = vrc;
}
- cfs_kunmap(page);
- /* NB see comment above regarding persistent pages */
- OBD_PAGE_FREE(page);
- }
+ kunmap(page);
+ /* NB see comment above regarding persistent pages */
+ OBD_PAGE_FREE(page);
+ }
- return rc;
+ return rc;
}
static int echo_preprw(const struct lu_env *env, int cmd,
*/
CERROR("cleaning up %u pages (%d obdos)\n", *pages, objcount);
for (i = 0; i < *pages; i++) {
- cfs_kunmap(res[i].page);
+ kunmap(res[i].page);
/* NB if this is a persistent page, __free_pages will just
* lose the extra ref gained above */
OBD_PAGE_FREE(res[i].page);
niocount - pgs - 1, objcount);
while (pgs < niocount) {
- cfs_page_t *page = res[pgs++].page;
+ struct page *page = res[pgs++].page;
if (page == NULL)
continue;
int echo_persistent_pages_init(void)
{
- cfs_page_t *pg;
- int i;
+ struct page *pg;
+ int i;
- for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
- int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
- CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
+ for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
+ int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
+ GFP_IOFS : GFP_HIGHUSER;
- OBD_PAGE_ALLOC(pg, gfp_mask);
- if (pg == NULL) {
- echo_persistent_pages_fini ();
- return (-ENOMEM);
- }
+ OBD_PAGE_ALLOC(pg, gfp_mask);
+ if (pg == NULL) {
+ echo_persistent_pages_fini();
+ return -ENOMEM;
+ }
- memset (cfs_kmap (pg), 0, CFS_PAGE_SIZE);
- cfs_kunmap (pg);
+ memset (kmap (pg), 0, PAGE_CACHE_SIZE);
+ kunmap (pg);
- echo_persistent_pages[i] = pg;
- }
+ echo_persistent_pages[i] = pg;
+ }
- return (0);
+ return 0;
}
struct echo_page {
struct cl_page_slice ep_cl;
struct mutex ep_lock;
- cfs_page_t *ep_vmpage;
+ struct page *ep_vmpage;
};
struct echo_lock {
obd_off end, int mode, __u64 *cookie);
static int cl_echo_cancel (struct echo_device *d, __u64 cookie);
static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
- cfs_page_t **pages, int npages, int async);
+ struct page **pages, int npages, int async);
static struct echo_thread_info *echo_env_info(const struct lu_env *env);
unsigned long dummy;
};
-static cfs_mem_cache_t *echo_lock_kmem;
-static cfs_mem_cache_t *echo_object_kmem;
-static cfs_mem_cache_t *echo_thread_kmem;
-static cfs_mem_cache_t *echo_session_kmem;
-//static cfs_mem_cache_t *echo_req_kmem;
+static struct kmem_cache *echo_lock_kmem;
+static struct kmem_cache *echo_object_kmem;
+static struct kmem_cache *echo_thread_kmem;
+static struct kmem_cache *echo_session_kmem;
+/* static struct kmem_cache *echo_req_kmem; */
static struct lu_kmem_descr echo_caches[] = {
{
*
* @{
*/
-static cfs_page_t *echo_page_vmpage(const struct lu_env *env,
+static struct page *echo_page_vmpage(const struct lu_env *env,
const struct cl_page_slice *slice)
{
return cl2echo_page(slice)->ep_vmpage;
}
static void echo_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
+ struct cl_page_slice *slice)
{
- struct echo_page *ep = cl2echo_page(slice);
- struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- cfs_page_t *vmpage = ep->ep_vmpage;
- ENTRY;
+ struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
+ ENTRY;
- cfs_atomic_dec(&eco->eo_npages);
- page_cache_release(vmpage);
- EXIT;
+ cfs_atomic_dec(&eco->eo_npages);
+ page_cache_release(cl2echo_page(slice)->ep_vmpage);
+ EXIT;
}
static int echo_page_prep(const struct lu_env *env,
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
struct echo_lock *el;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
if (el != NULL) {
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
/* we're the top dev. */
LASSERT(hdr == NULL);
- OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, __GFP_IO);
if (eco != NULL) {
struct cl_object_header *hdr = &eco->eo_hdr;
{
struct echo_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, __GFP_IO);
if (info == NULL)
info = ERR_PTR(-ENOMEM);
return info;
{
struct echo_session_info *session;
- OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, __GFP_IO);
if (session == NULL)
session = ERR_PTR(-ENOMEM);
return session;
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
- cfs_page_t **pages, int npages, int async)
+ struct page **pages, int npages, int async)
{
struct lu_env *env;
struct echo_thread_info *info;
rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * CFS_PAGE_SIZE - 1,
+ offset + npages * PAGE_CACHE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
if (nob > ulsm_nob)
return (-EINVAL);
- if (cfs_copy_to_user (ulsm, lsm, sizeof(ulsm)))
+ if (copy_to_user (ulsm, lsm, sizeof(ulsm)))
return (-EFAULT);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (cfs_copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+ if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
sizeof(lsm->lsm_oinfo[0])))
return (-EFAULT);
}
if (ulsm_nob < sizeof (*lsm))
return (-EINVAL);
- if (cfs_copy_from_user (lsm, ulsm, sizeof (*lsm)))
+ if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
return (-EFAULT);
if (lsm->lsm_stripe_count > ec->ec_nstripes ||
for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (cfs_copy_from_user(lsm->lsm_oinfo[i],
+ if (copy_from_user(lsm->lsm_oinfo[i],
((struct lov_stripe_md *)ulsm)-> \
lsm_oinfo[i],
sizeof(lsm->lsm_oinfo[0])))
OBD_ALLOC(name, namelen + 1);
if (name == NULL)
GOTO(out_put, rc = -ENOMEM);
- if (cfs_copy_from_user(name, data->ioc_pbuf2, namelen))
+ if (copy_from_user(name, data->ioc_pbuf2, namelen))
GOTO(out_name, rc = -EFAULT);
}
lsm->lsm_stripe_count = ec->ec_nstripes;
if (lsm->lsm_stripe_size == 0)
- lsm->lsm_stripe_size = CFS_PAGE_SIZE;
+ lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
idx = cfs_rand();
static void
echo_client_page_debug_setup(struct lov_stripe_md *lsm,
- cfs_page_t *page, int rw, obd_id id,
+ struct page *page, int rw, obd_id id,
obd_off offset, obd_off count)
{
char *addr;
int delta;
/* no partial pages on the client */
- LASSERT(count == CFS_PAGE_SIZE);
+ LASSERT(count == PAGE_CACHE_SIZE);
- addr = cfs_kmap(page);
+ addr = kmap(page);
- for (delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
stripe_off, stripe_id);
}
- cfs_kunmap(page);
+ kunmap(page);
}
static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
- cfs_page_t *page, obd_id id,
+ struct page *page, obd_id id,
obd_off offset, obd_off count)
{
obd_off stripe_off;
int rc2;
/* no partial pages on the client */
- LASSERT(count == CFS_PAGE_SIZE);
+ LASSERT(count == PAGE_CACHE_SIZE);
- addr = cfs_kmap(page);
+ addr = kmap(page);
- for (rc = delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
echo_get_stripe_off_id (lsm, &stripe_off, &stripe_id);
}
}
- cfs_kunmap(page);
+ kunmap(page);
return rc;
}
obd_count npages;
struct brw_page *pga;
struct brw_page *pgp;
- cfs_page_t **pages;
+ struct page **pages;
obd_off off;
int i;
int rc;
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
(oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
- gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
+ gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
LASSERT(lsm != NULL);
RETURN(-EINVAL);
/* XXX think again with misaligned I/O */
- npages = count >> CFS_PAGE_SHIFT;
+ npages = count >> PAGE_CACHE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
for (i = 0, pgp = pga, off = offset;
i < npages;
- i++, pgp++, off += CFS_PAGE_SIZE) {
+ i++, pgp++, off += PAGE_CACHE_SIZE) {
LASSERT (pgp->pg == NULL); /* for cleanup */
goto out;
pages[i] = pgp->pg;
- pgp->count = CFS_PAGE_SIZE;
+ pgp->count = PAGE_CACHE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
(lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
RETURN(-EINVAL);
- npages = batch >> CFS_PAGE_SHIFT;
- tot_pages = count >> CFS_PAGE_SHIFT;
+ npages = batch >> PAGE_CACHE_SHIFT;
+ tot_pages = count >> PAGE_CACHE_SHIFT;
OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += CFS_PAGE_SIZE) {
+ for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
rnb[i].offset = off;
- rnb[i].len = CFS_PAGE_SIZE;
+ rnb[i].len = PAGE_CACHE_SIZE;
rnb[i].flags = brw_flags;
}
LASSERT(lpages == npages);
for (i = 0; i < lpages; i++) {
- cfs_page_t *page = lnb[i].page;
+ struct page *page = lnb[i].page;
/* read past eof? */
if (page == NULL && lnb[i].rc == 0)
if (dir == NULL)
GOTO(out, rc = -ENOMEM);
- if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
+ if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
OBD_FREE(dir, data->ioc_plen1 + 1);
GOTO(out, rc = -EFAULT);
}
GOTO(out, rc);
}
- if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
+ if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
return -EFAULT;
max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
- if (cfs_copy_to_user(data->ioc_pbuf2, &max_count,
+ if (copy_to_user(data->ioc_pbuf2, &max_count,
data->ioc_plen2))
return -EFAULT;
GOTO(out, rc);
ENTRY;
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
- LASSERT(CFS_PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+ LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
lprocfs_echo_init_vars(&lvars);
# endif
#else /* ! __KERNEL__ */
/* Kludge here, define some functions and macros needed by liblustre -jay */
-static inline void page_cache_get(struct page *page)
-{
-}
-
-static inline void page_cache_release(struct page *page)
-{
-}
#define READ 0
#define WRITE 1
#include "ofd_internal.h"
/* Slab for OFD object allocation */
-static cfs_mem_cache_t *ofd_object_kmem;
+static struct kmem_cache *ofd_object_kmem;
static struct lu_kmem_descr ofd_caches[] = {
{
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, __GFP_IO);
if (of != NULL) {
struct lu_object *o;
struct lu_object_header *h;
#include "ofd_internal.h"
-static cfs_mem_cache_t *ll_fmd_cachep;
+static struct kmem_cache *ll_fmd_cachep;
/* drop fmd reference, free it if last ref. must be called with fed_lock held.*/
static inline void ofd_fmd_put_nolock(struct obd_export *exp,
int ofd_fmd_init(void)
{
- ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache",
- sizeof(struct ofd_mod_data),
- 0, 0);
+ ll_fmd_cachep = kmem_cache_create("ll_fmd_cache",
+ sizeof(struct ofd_mod_data),
+ 0, 0, NULL);
if (!ll_fmd_cachep)
return -ENOMEM;
else
void ofd_fmd_exit(void)
{
if (ll_fmd_cachep) {
- int rc = cfs_mem_cache_destroy(ll_fmd_cachep);
-
- LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc);
+ kmem_cache_destroy(ll_fmd_cachep);
ll_fmd_cachep = NULL;
}
}
struct ofd_device *ofd)
{
/* Clients which don't support OBD_CONNECT_GRANT_PARAM cannot handle
- * a block size > page size and consume CFS_PAGE_SIZE of grant when
+ * a block size > page size and consume PAGE_CACHE_SIZE of grant when
* dirtying a page regardless of the block size */
return !!(ofd_obd(ofd)->obd_self_export != exp &&
ofd->ofd_blockbits > COMPAT_BSIZE_SHIFT &&
/* When ofd_grant_compat_disable is set, we don't grant any space to
* clients not supporting OBD_CONNECT_GRANT_PARAM.
* Otherwise, space granted to such a client is inflated since it
- * consumes CFS_PAGE_SIZE of grant space per block */
+ * consumes PAGE_CACHE_SIZE of grant space per block */
return !!(ofd_grant_compat(exp, ofd) && ofd->ofd_grant_compat_disable);
}
GOTO(out, rc = -EROFS);
#ifdef USE_HEALTH_CHECK_WRITE
- OBD_ALLOC(info->fti_buf.lb_buf, CFS_PAGE_SIZE);
+ OBD_ALLOC(info->fti_buf.lb_buf, PAGE_CACHE_SIZE);
if (info->fti_buf.lb_buf == NULL)
GOTO(out, rc = -ENOMEM);
- info->fti_buf.lb_len = CFS_PAGE_SIZE;
+ info->fti_buf.lb_len = PAGE_CACHE_SIZE;
info->fti_off = 0;
th = dt_trans_create(&env, ofd->ofd_osd);
}
dt_trans_stop(&env, ofd->ofd_osd, th);
- OBD_FREE(info->fti_buf.lb_buf, CFS_PAGE_SIZE);
+ OBD_FREE(info->fti_buf.lb_buf, PAGE_CACHE_SIZE);
CDEBUG(D_INFO, "write 1 page synchronously for checking io rc %d\n",rc);
#endif
}
static int osc_wr_max_dirty_mb(struct file *file, const char *buffer,
- unsigned long count, void *data)
+ unsigned long count, void *data)
{
- struct obd_device *dev = data;
- struct client_obd *cli = &dev->u.cli;
- int pages_number, mult, rc;
+ struct obd_device *dev = data;
+ struct client_obd *cli = &dev->u.cli;
+ int pages_number, mult, rc;
- mult = 1 << (20 - CFS_PAGE_SHIFT);
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
- if (rc)
- return rc;
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
- if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - CFS_PAGE_SHIFT) ||
- pages_number > cfs_num_physpages / 4) /* 1/4 of RAM */
- return -ERANGE;
+ if (pages_number <= 0 ||
+ pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+ pages_number > num_physpages / 4) /* 1/4 of RAM */
+ return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max = (obd_count)(pages_number << CFS_PAGE_SHIFT);
- osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_dirty_max = (obd_count)(pages_number << PAGE_CACHE_SHIFT);
+ osc_wake_cache_waiters(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
- return count;
+ return count;
}
static int osc_rd_cached_mb(char *page, char **start, off_t off, int count,
{
struct obd_device *dev = data;
struct client_obd *cli = &dev->u.cli;
- int shift = 20 - CFS_PAGE_SHIFT;
+ int shift = 20 - PAGE_CACHE_SHIFT;
int rc;
rc = snprintf(page, count,
struct client_obd *cli = &dev->u.cli;
int pages_number, mult, rc;
- mult = 1 << (20 - CFS_PAGE_SHIFT);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer = lprocfs_find_named_value(buffer, "used_mb:", &count);
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
if (rc)
if (count > sizeof(kernbuf) - 1)
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
if (count > 0 && kernbuf[count - 1] == '\n')
kernbuf[count - 1] = '\0';
/* if the max_pages is specified in bytes, convert to pages */
if (val >= ONE_MB_BRW_SIZE)
- val >>= CFS_PAGE_SHIFT;
+ val >>= PAGE_CACHE_SHIFT;
LPROCFS_CLIMP_CHECK(dev);
- chunk_mask = ~((1 << (cli->cl_chunkbits - CFS_PAGE_SHIFT)) - 1);
+ chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> CFS_PAGE_SHIFT) {
+ if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
LPROCFS_CLIMP_EXIT(dev);
return -ERANGE;
}
{
struct osc_extent *ext;
- OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, CFS_ALLOC_STD);
+ OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS);
if (ext == NULL)
return NULL;
return -ERANGE;
LASSERT(cur->oe_osclock == victim->oe_osclock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - CFS_PAGE_SHIFT;
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
LASSERT(lock != NULL);
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- LASSERT(cli->cl_chunkbits >= CFS_PAGE_SHIFT);
- ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT;
+ LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
+ ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
chunk_mask = ~((1 << ppc_bits) - 1);
chunksize = 1 << cli->cl_chunkbits;
chunk = index >> ppc_bits;
if (!sent) {
lost_grant = ext->oe_grants;
- } else if (blocksize < CFS_PAGE_SIZE &&
- last_count != CFS_PAGE_SIZE) {
+ } else if (blocksize < PAGE_CACHE_SIZE &&
+ last_count != PAGE_CACHE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
if (end)
count += blocksize - end;
- lost_grant = CFS_PAGE_SIZE - count;
+ lost_grant = PAGE_CACHE_SIZE - count;
}
if (ext->oe_grants > 0)
osc_free_grant(cli, nr_pages, lost_grant);
struct osc_async_page *oap;
struct osc_async_page *tmp;
int pages_in_chunk = 0;
- int ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits -
+ PAGE_CACHE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= CFS_PAGE_SIZE);
+ LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
}
* because it's known they are not the last page */
cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = CFS_PAGE_SIZE - oap->oap_page_off;
+ oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
}
}
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
return 0;
else if (cl_offset(obj, page->cp_index + 1) > kms)
/* catch sub-page write at end of file */
- return kms % CFS_PAGE_SIZE;
+ return kms % PAGE_CACHE_SIZE;
else
- return CFS_PAGE_SIZE;
+ return PAGE_CACHE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
cfs_atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += CFS_PAGE_SIZE;
+ cli->cl_dirty += PAGE_CACHE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- CFS_PAGE_SIZE, pga, pga->pg);
+ PAGE_CACHE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
cfs_atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= CFS_PAGE_SIZE;
+ cli->cl_dirty -= PAGE_CACHE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
cfs_atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= CFS_PAGE_SIZE;
+ cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
}
EXIT;
}
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than CFS_PAGE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
client_obd_list_lock(&cli->cl_loi_list_lock);
cfs_atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << CFS_PAGE_SHIFT;
+ cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
if (rc < 0)
return 0;
- if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
+ if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += CFS_PAGE_SIZE;
+ cli->cl_dirty_transit += PAGE_CACHE_SIZE;
cfs_atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < CFS_PAGE_SIZE ||
+ cli->cl_dirty_max < PAGE_CACHE_SIZE ||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
GOTO(out, rc = -EDQUOT);
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
+ if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
(cfs_atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
}
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- cfs_page_t *page, loff_t offset)
+ struct page *page, loff_t offset)
{
struct obd_export *exp = osc_export(osc);
struct osc_async_page *oap = &ops->ops_oap;
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
spin_unlock(&oap->oap_lock);
- if (cfs_memory_pressure_get())
+ if (memory_pressure_get())
ext->oe_memalloc = 1;
ext->oe_urgent = 1;
struct cl_lock *ops_lock;
};
-extern cfs_mem_cache_t *osc_lock_kmem;
-extern cfs_mem_cache_t *osc_object_kmem;
-extern cfs_mem_cache_t *osc_thread_kmem;
-extern cfs_mem_cache_t *osc_session_kmem;
-extern cfs_mem_cache_t *osc_req_kmem;
-extern cfs_mem_cache_t *osc_extent_kmem;
+extern struct kmem_cache *osc_lock_kmem;
+extern struct kmem_cache *osc_object_kmem;
+extern struct kmem_cache *osc_thread_kmem;
+extern struct kmem_cache *osc_session_kmem;
+extern struct kmem_cache *osc_req_kmem;
+extern struct kmem_cache *osc_extent_kmem;
extern struct lu_device_type osc_device_type;
extern struct lu_context_key osc_key;
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage);
+ struct cl_page *page, struct page *vmpage);
void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
pgoff_t start, pgoff_t end);
int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
obd_flag async_flags);
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
- cfs_page_t *page, loff_t offset);
+ struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
* @{
*/
-cfs_mem_cache_t *osc_lock_kmem;
-cfs_mem_cache_t *osc_object_kmem;
-cfs_mem_cache_t *osc_thread_kmem;
-cfs_mem_cache_t *osc_session_kmem;
-cfs_mem_cache_t *osc_req_kmem;
-cfs_mem_cache_t *osc_extent_kmem;
-cfs_mem_cache_t *osc_quota_kmem;
+struct kmem_cache *osc_lock_kmem;
+struct kmem_cache *osc_object_kmem;
+struct kmem_cache *osc_thread_kmem;
+struct kmem_cache *osc_session_kmem;
+struct kmem_cache *osc_req_kmem;
+struct kmem_cache *osc_extent_kmem;
+struct kmem_cache *osc_quota_kmem;
struct lu_kmem_descr osc_caches[] = {
{
*/
static void *osc_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct osc_thread_info *info;
+ struct osc_thread_info *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, CFS_ALLOC_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void osc_key_fini(const struct lu_context *ctx,
};
static void *osc_session_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
- struct osc_session *info;
+ struct osc_session *info;
- OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, CFS_ALLOC_IO);
- if (info == NULL)
- info = ERR_PTR(-ENOMEM);
- return info;
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
}
static void osc_session_fini(const struct lu_context *ctx,
int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
-extern cfs_mem_cache_t *osc_quota_kmem;
+extern struct kmem_cache *osc_quota_kmem;
struct osc_quota_info {
/** linkage for quota hash table */
cfs_hlist_node_t oqi_hash;
#ifdef __linux__
{
- cfs_page_t *vmpage = cl_page_vmpage(env, page);
+ struct page *vmpage = cl_page_vmpage(env, page);
if (PageLocked(vmpage))
CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
ops, page->cp_index,
struct osc_req *or;
int result;
- OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, __GFP_IO);
if (or != NULL) {
cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
result = 0;
};
int osc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused)
{
- struct osc_lock *clk;
- int result;
+ struct osc_lock *clk;
+ int result;
- OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
- if (clk != NULL) {
+ OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO);
+ if (clk != NULL) {
__u32 enqflags = lock->cll_descr.cld_enq_flags;
osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
lock, clk, clk->ols_flags);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
}
int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
struct osc_object *osc;
struct lu_object *obj;
- OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, __GFP_IO);
if (osc != NULL) {
obj = osc2lu(osc);
lu_object_init(obj, NULL, dev);
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, cfs_page_t *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
int result;
opg->ops_from = 0;
- opg->ops_to = CFS_PAGE_SIZE;
+ opg->ops_to = PAGE_CACHE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
cl_offset(obj, page->cp_index));
static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - CFS_PAGE_SHIFT); /* 2M */
+static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finsih. */
-static const int lru_shrink_max = 32 << (20 - CFS_PAGE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
* cl_lru_shrinkers is to avoid recursive call in case
* we're already in the context of osc_lru_shrink(). */
if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !cfs_memory_pressure_get())
+ !memory_pressure_get())
osc_lru_shrink(cli, osc_cache_too_much(cli));
cfs_waitq_signal(&osc_lru_waitq);
}
oa->o_undirty = 0;
} else {
long max_in_flight = (cli->cl_max_pages_per_rpc <<
- CFS_PAGE_SHIFT)*
+ PAGE_CACHE_SHIFT) *
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
}
static int osc_shrink_grant(struct client_obd *cli)
{
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT);
+ (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
client_obd_list_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance. */
- if (target_bytes < cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
+ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
/* Get the current RPC size directly, instead of going via:
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching. */
- int brw_size = client->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
+ int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > brw_size)
}
/* determine the appropriate chunk size used by osc_extent. */
- cli->cl_chunkbits = max_t(int, CFS_PAGE_SHIFT, ocd->ocd_blocksize);
+ cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
while (nob_read > 0) {
LASSERT (page_count > 0);
- if (pga[i]->count > nob_read) {
- /* EOF inside this page */
- ptr = cfs_kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
- memset(ptr + nob_read, 0, pga[i]->count - nob_read);
- cfs_kunmap(pga[i]->pg);
- page_count--;
- i++;
- break;
- }
+ if (pga[i]->count > nob_read) {
+ /* EOF inside this page */
+ ptr = kmap(pga[i]->pg) +
+ (pga[i]->off & ~CFS_PAGE_MASK);
+ memset(ptr + nob_read, 0, pga[i]->count - nob_read);
+ kunmap(pga[i]->pg);
+ page_count--;
+ i++;
+ break;
+ }
nob_read -= pga[i]->count;
page_count--;
i++;
}
- /* zero remaining pages */
- while (page_count-- > 0) {
- ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
- memset(ptr, 0, pga[i]->count);
- cfs_kunmap(pga[i]->pg);
- i++;
- }
+ /* zero remaining pages */
+ while (page_count-- > 0) {
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ memset(ptr, 0, pga[i]->count);
+ kunmap(pga[i]->pg);
+ i++;
+ }
}
static int check_write_rcs(struct ptlrpc_request *req,
* simulate an OST->client data error */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
- unsigned char *ptr = cfs_kmap(pga[i]->pg);
+ unsigned char *ptr = kmap(pga[i]->pg);
int off = pga[i]->off & ~CFS_PAGE_MASK;
memcpy(ptr + off, "bad1", min(4, nob));
- cfs_kunmap(pga[i]->pg);
+ kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
pga[i]->off & ~CFS_PAGE_MASK,
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
- LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
- ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == CFS_PAGE_SIZE) &&
- ergo(i == page_count - 1, poff == 0)),
- "i: %d/%d pg: %p off: "LPU64", count: %u\n",
- i, page_count, pg, pg->off, pg->count);
+ LASSERTF(page_count == 1 ||
+ (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+ ergo(i > 0 && i < page_count - 1,
+ poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
+ ergo(i == page_count - 1, poff == 0)),
+ "i: %d/%d pg: %p off: "LPU64", count: %u\n",
+ i, page_count, pg, pg->off, pg->count);
#ifdef __linux__
LASSERTF(i == 0 || pg->off > pg_prev->off,
"i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
if (pages == 0) /* that's all */
return count;
- if (offset + pg[i]->count < CFS_PAGE_SIZE)
+ if (offset + pg[i]->count < PAGE_CACHE_SIZE)
return count; /* doesn't end on page boundary */
i++;
oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count ==
- CFS_PAGE_SIZE);
+ PAGE_CACHE_SIZE);
}
}
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, cfs_page_index(oap->oap_page), oap,
+ pga[i]->pg, page_index(oap->oap_page), oap,
pga[i]->flag);
i++;
cl_req_page_add(env, clerq, page);
tmp->oap_request = ptlrpc_request_addref(req);
client_obd_list_lock(&cli->cl_loi_list_lock);
- starting_offset >>= CFS_PAGE_SHIFT;
+ starting_offset >>= PAGE_CACHE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- if (cfs_copy_from_user(&lum, lump, lum_size))
+ if (copy_from_user(&lum, lump, lum_size))
RETURN(-EFAULT);
if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
}
lumk->lmm_oi = lsm->lsm_oi;
- lumk->lmm_stripe_count = 1;
+ lumk->lmm_stripe_count = 1;
- if (cfs_copy_to_user(lump, lumk, lum_size))
- rc = -EFAULT;
+ if (copy_to_user(lump, lumk, lum_size))
+ rc = -EFAULT;
- if (lumk != &lum)
- OBD_FREE(lumk, lum_size);
+ if (lumk != &lum)
+ OBD_FREE(lumk, lum_size);
- RETURN(rc);
+ RETURN(rc);
}
memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
- err = cfs_copy_to_user((void *)uarg, buf, len);
+ err = copy_to_user((void *)uarg, buf, len);
if (err)
err = -EFAULT;
obd_ioctl_freedata(buf, len);
CFS_PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + CFS_PAGE_SIZE - 1)
+ fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
policy.l_extent.end = OBD_OBJECT_EOF;
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- CFS_PAGE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0);
th = ERR_PTR(-ENOMEM);
- OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO);
+ OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO);
if (oh != NULL) {
oh->ot_quota_trans = &oti->oti_quota_trans;
memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans));
RETURN(-ENOTSUPP);
}
- OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD);
+ OBD_PAGE_ALLOC(__page, GFP_IOFS);
if (__page == NULL)
GOTO(out, rc = -ENOMEM);
if (str)
lmd_flags = simple_strtoul(str + 1, NULL, 0);
opts = lustre_cfg_string(cfg, 3);
- page = (unsigned long)cfs_page_address(__page);
+ page = (unsigned long)page_address(__page);
options = (char *)page;
*options = '\0';
if (opts == NULL)
/* Glom up mount options */
if (*options != '\0')
strcat(options, ",");
- strlcat(options, "no_mbcache", CFS_PAGE_SIZE);
+ strlcat(options, "no_mbcache", PAGE_CACHE_SIZE);
type = get_fs_type("ldiskfs");
if (!type) {
* there would be one ext3 readdir for every mdd readdir page.
*/
-#define OSD_IT_EA_BUFSIZE (CFS_PAGE_SIZE + CFS_PAGE_SIZE/4)
+#define OSD_IT_EA_BUFSIZE (PAGE_CACHE_SIZE + PAGE_CACHE_SIZE/4)
/**
* This is iterator's in-memory data structure in interoperability
cfs_list_t oiq_list;
};
-#define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
+#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512)
struct osd_iobuf {
cfs_waitq_t dr_wait;
iobuf->dr_rw = rw;
iobuf->dr_init_at = line;
- blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) {
LASSERT(iobuf->dr_pg_buf.lb_len >=
pages * sizeof(iobuf->dr_pages[0]));
CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n",
(unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages);
pages = i;
- blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits);
+ blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits);
iobuf->dr_max_pages = 0;
CDEBUG(D_OTHER, "realloc %u for %u blocks\n",
(unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks);
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
struct osd_iobuf *iobuf)
{
- int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
struct page **pages = iobuf->dr_pages;
int npages = iobuf->dr_npages;
unsigned long *blocks = iobuf->dr_blocks;
*nrpages = 0;
while (len > 0) {
- int poff = offset & (CFS_PAGE_SIZE - 1);
- int plen = CFS_PAGE_SIZE - poff;
+ int poff = offset & (PAGE_CACHE_SIZE - 1);
+ int plen = PAGE_CACHE_SIZE - poff;
if (plen > len)
plen = len;
LASSERT(inode);
- page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
+ page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
GFP_NOFS | __GFP_HIGHMEM);
if (unlikely(page == NULL))
lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1);
RETURN(rc);
isize = i_size_read(inode);
- maxidx = ((isize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
+ maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1;
if (osd->od_writethrough_cache)
cache = 1;
*/
ClearPageUptodate(lnb[i].page);
- if (lnb[i].len == CFS_PAGE_SIZE)
+ if (lnb[i].len == PAGE_CACHE_SIZE)
continue;
if (maxidx >= lnb[i].page->index) {
off = (lnb[i].lnb_page_offset + lnb[i].len) &
~CFS_PAGE_MASK;
if (off)
- memset(p + off, 0, CFS_PAGE_SIZE - off);
+ memset(p + off, 0, PAGE_CACHE_SIZE - off);
kunmap(lnb[i].page);
}
}
extents++;
if (!osd_is_mapped(inode, lnb[i].lnb_file_offset))
- quota_space += CFS_PAGE_SIZE;
+ quota_space += PAGE_CACHE_SIZE;
/* ignore quota for the whole request if any page is from
* client cache or written by root.
if (unlikely(nr_pages == 0))
return;
- blocks_per_page = CFS_PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits;
+ blocks_per_page = PAGE_CACHE_SIZE >> osd_sb(osd)->s_blocksize_bits;
lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages);
static inline dqbuf_t getdqbuf(void)
{
- dqbuf_t buf = cfs_alloc(LUSTRE_DQBLKSIZE, CFS_ALLOC_IO);
+ dqbuf_t buf = kmalloc(LUSTRE_DQBLKSIZE, __GFP_IO);
if (!buf)
CWARN("Not enough memory for quota buffers.\n");
return buf;
static inline void freedqbuf(dqbuf_t buf)
{
- cfs_free(buf);
+ kfree(buf);
}
/**
static char *root_tag = "osd_mount, rootdb";
/* Slab for OSD object allocation */
-cfs_mem_cache_t *osd_object_kmem;
+struct kmem_cache *osd_object_kmem;
static struct lu_kmem_descr osd_caches[] = {
{
#include <dt_object.h>
#include <md_object.h>
#include <lustre_quota.h>
+
+#define _SPL_KMEM_H
+#include <sys/kstat.h>
+#define kmem_zalloc(a, b) kzalloc(a, b)
+#define kmem_free(ptr, sz) ((void)(sz), kfree(ptr))
+#ifndef KM_SLEEP
+#define KM_SLEEP GFP_KERNEL
+#endif
+
#include <sys/arc.h>
#include <sys/nvpair.h>
return 0;
}
-static struct page *kmem_to_page(void *addr)
+static inline struct page *kmem_to_page(void *addr)
{
- struct page *page;
-
- if (kmem_virt(addr))
- page = vmalloc_to_page(addr);
+ if (is_vmalloc_addr(addr))
+ return vmalloc_to_page(addr);
else
- page = virt_to_page(addr);
-
- return page;
+ return virt_to_page(addr);
}
static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj,
dbf = (void *) ((unsigned long)dbp[i] | 1);
while (tocpy > 0) {
- thispage = CFS_PAGE_SIZE;
- thispage -= bufoff & (CFS_PAGE_SIZE - 1);
+ thispage = PAGE_CACHE_SIZE;
+ thispage -= bufoff & (PAGE_CACHE_SIZE - 1);
thispage = min(tocpy, thispage);
lnb->rc = 0;
/* go over pages arcbuf contains, put them as
* local niobufs for ptlrpc's bulks */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
/* can't use zerocopy, allocate temp. buffers */
while (sz_in_block > 0) {
- plen = min_t(int, sz_in_block, CFS_PAGE_SIZE);
+ plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE);
lnb[i].lnb_file_offset = off;
lnb[i].lnb_page_offset = 0;
#include <lprocfs_status.h>
#include <lustre/lustre_idl.h>
-#include "udmu.h"
#include "osd_internal.h"
#ifdef LPROCFS
extern struct dt_body_operations osd_body_ops;
static struct dt_object_operations osd_obj_otable_it_ops;
-extern cfs_mem_cache_t *osd_object_kmem;
+extern struct kmem_cache *osd_object_kmem;
static void
osd_object_sa_fini(struct osd_object *obj)
{
struct osd_object *mo;
- OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, __GFP_IO);
if (mo != NULL) {
struct lu_object *l;
#include <lustre_quota.h>
#include <obd.h>
-#include "udmu.h"
#include "osd_internal.h"
/**
#define _DMU_H
#include <sys/zap.h>
-#include <sys/vnode.h>
#include <sys/mode.h>
#include <sys/sa.h>
#include "osp_internal.h"
/* Slab for OSP object allocation */
-cfs_mem_cache_t *osp_object_kmem;
+struct kmem_cache *osp_object_kmem;
static struct lu_kmem_descr osp_caches[] = {
{
struct osp_object *o;
struct lu_object *l;
- OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, __GFP_IO);
if (o != NULL) {
l = &o->opo_obj.do_lu;
cfs_proc_dir_entry_t *opd_symlink;
};
-extern cfs_mem_cache_t *osp_object_kmem;
+extern struct kmem_cache *osp_object_kmem;
/* this is a top object */
struct osp_object {
if (size < 0)
GOTO(out, rc = size);
- LASSERT(size > 0 && size < CFS_PAGE_SIZE);
+ LASSERT(size > 0 && size < PAGE_CACHE_SIZE);
LASSERT(ea_buf != NULL);
rc = size;
if ((remote_nb[0].flags & OBD_BRW_MEMALLOC) &&
(exp->exp_connection->c_peer.nid == exp->exp_connection->c_self))
- cfs_memory_pressure_set();
+ memory_pressure_set();
if (body->oa.o_valid & OBD_MD_FLOSSCAPA) {
capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp), rc);
}
- cfs_memory_pressure_clr();
+ memory_pressure_clr();
RETURN(rc);
}
int rc;
ENTRY;
- ost_page_to_corrupt = cfs_alloc_page(CFS_ALLOC_STD);
+ ost_page_to_corrupt = alloc_page(GFP_IOFS);
lprocfs_ost_init_vars(&lvars);
rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars,
* tunables for per-thread page pool (bug 5137)
*/
#define OST_THREAD_POOL_SIZE PTLRPC_MAX_BRW_PAGES /* pool size in pages */
-#define OST_THREAD_POOL_GFP CFS_ALLOC_HIGHUSER /* GFP mask for pool pages */
+#define OST_THREAD_POOL_GFP GFP_HIGHUSER /* GFP mask for pool pages */
struct page;
struct niobuf_local;
}
EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
-/**
+/*
* Add a page \a page to the bulk descriptor \a desc.
* Data to transfer in the page starts at offset \a pageoffset and
* amount of data to transfer from the page is \a len
*/
void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
- cfs_page_t *page, int pageoffset, int len, int pin)
+ struct page *page, int pageoffset, int len, int pin)
{
- LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page != NULL);
- LASSERT(pageoffset >= 0);
- LASSERT(len > 0);
- LASSERT(pageoffset + len <= CFS_PAGE_SIZE);
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(page != NULL);
+ LASSERT(pageoffset >= 0);
+ LASSERT(len > 0);
+ LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
- desc->bd_nob += len;
+ desc->bd_nob += len;
if (pin)
- cfs_page_pin(page);
+ page_cache_get(page);
- ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+ ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
if (unpin) {
for (i = 0; i < desc->bd_iov_count ; i++)
- cfs_page_unpin(desc->bd_iov[i].kiov_page);
+ page_cache_release(desc->bd_iov[i].kiov_page);
}
OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
/* We moaned above already... */
return;
}
- OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY);
+ OBD_ALLOC_GFP(req, sizeof(*req), ALLOC_ATOMIC_TRY);
if (req == NULL) {
CERROR("Can't allocate incoming request descriptor: "
"Dropping %s RPC from %s\n",
/* 4. now the token */
LASSERT(size >= (sizeof(__u32) + token_size));
*p++ = cpu_to_le32(((__u32) token_size));
- if (cfs_copy_from_user(p, token, token_size)) {
+ if (copy_from_user(p, token, token_size)) {
CERROR("can't copy token\n");
return -EFAULT;
}
status = 0;
effective = 0;
- if (cfs_copy_to_user(outbuf, &status, 4))
- return -EFAULT;
- outbuf += 4;
- if (cfs_copy_to_user(outbuf, &ghdr->gh_major, 4))
- return -EFAULT;
- outbuf += 4;
- if (cfs_copy_to_user(outbuf, &ghdr->gh_minor, 4))
- return -EFAULT;
- outbuf += 4;
- if (cfs_copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
- return -EFAULT;
- outbuf += 4;
- effective += 4 * 4;
-
- /* handle */
- obj_len = ghdr->gh_handle.len;
- round_len = (obj_len + 3) & ~ 3;
- if (cfs_copy_to_user(outbuf, &obj_len, 4))
- return -EFAULT;
- outbuf += 4;
- if (cfs_copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
- return -EFAULT;
- outbuf += round_len;
- effective += 4 + round_len;
-
- /* out token */
- obj_len = msg->lm_buflens[2];
- round_len = (obj_len + 3) & ~ 3;
- if (cfs_copy_to_user(outbuf, &obj_len, 4))
- return -EFAULT;
- outbuf += 4;
- if (cfs_copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
- return -EFAULT;
- outbuf += round_len;
- effective += 4 + round_len;
-
- return effective;
+ if (copy_to_user(outbuf, &status, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_major, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_minor, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
+ return -EFAULT;
+ outbuf += 4;
+ effective += 4 * 4;
+
+ /* handle */
+ obj_len = ghdr->gh_handle.len;
+ round_len = (obj_len + 3) & ~3;
+ if (copy_to_user(outbuf, &obj_len, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
+ return -EFAULT;
+ outbuf += round_len;
+ effective += 4 + round_len;
+
+ /* out token */
+ obj_len = msg->lm_buflens[2];
+ round_len = (obj_len + 3) & ~3;
+ if (copy_to_user(outbuf, &obj_len, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
+ return -EFAULT;
+ outbuf += round_len;
+ effective += 4 + round_len;
+
+ return effective;
}
/* XXX move to where lgssd could see */
"version\n", count, (unsigned long) sizeof(param));
RETURN(-EINVAL);
}
- if (cfs_copy_from_user(¶m, buffer, sizeof(param))) {
+ if (copy_from_user(¶m, buffer, sizeof(param))) {
CERROR("failed copy data from lgssd\n");
RETURN(-EFAULT);
}
param.reply_length = lsize;
out_copy:
- if (cfs_copy_to_user(buffer, ¶m, sizeof(param)))
+ if (copy_to_user(buffer, ¶m, sizeof(param)))
rc = -EFAULT;
else
rc = 0;
}
if (desc->bd_iov[i].kiov_len % blocksize != 0) {
- memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
+ memcpy(page_address(desc->bd_iov[i].kiov_page) +
desc->bd_iov[i].kiov_offset,
- cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
+ page_address(desc->bd_enc_iov[i].kiov_page) +
desc->bd_iov[i].kiov_offset,
desc->bd_iov[i].kiov_len);
}
if (mlen > buflen)
mlen = buflen;
- left = cfs_copy_to_user(dst, data, mlen);
+ left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
RETURN(left);
if (!buf)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(buf, src, mlen)) {
+ if (copy_from_user(buf, src, mlen)) {
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> CFS_PAGE_SHIFT,
+ min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
cli->cl_max_pages_per_rpc);
else if (imp->imp_connect_op == MDS_CONNECT ||
imp->imp_connect_op == MGS_CONNECT)
/* This sanity check is more of an insanity check; we can still
* hose a kernel by allowing the request history to grow too
* far. */
- bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
- if (val > cfs_num_physpages/(2 * bufpages))
+ bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+ if (val > num_physpages/(2 * bufpages))
return -ERANGE;
spin_lock(&svc->srv_lock);
*/
cmd_copy = cmd;
- if (cfs_copy_from_user(cmd, buffer, count))
+ if (copy_from_user(cmd, buffer, count))
GOTO(out, rc = -EFAULT);
cmd[count] = '\0';
* bytes into kbuf, to ensure that the string is NUL-terminated.
* UUID_MAX should include a trailing NUL already.
*/
- if (cfs_copy_from_user(kbuf, buffer,
+ if (copy_from_user(kbuf, buffer,
min_t(unsigned long, BUFLEN - 1, count))) {
count = -EFAULT;
goto out;
const char prefix[] = "connection=";
const int prefix_len = sizeof(prefix) - 1;
- if (count > CFS_PAGE_SIZE - 1 || count <= prefix_len)
+ if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
return -EINVAL;
OBD_ALLOC(kbuf, count + 1);
if (kbuf == NULL)
return -ENOMEM;
- if (cfs_copy_from_user(kbuf, buffer, count))
+ if (copy_from_user(kbuf, buffer, count))
GOTO(out, count = -EFAULT);
kbuf[count] = 0;
LASSERT(desc->pd_compat != NULL);
OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable,
- svcpt->scp_cpt, sizeof(*policy), CFS_ALLOC_IO);
+ svcpt->scp_cpt, sizeof(*policy), __GFP_IO);
if (policy == NULL)
RETURN(-ENOMEM);
goto out;
OBD_CPT_ALLOC_GFP(cli, nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- sizeof(*cli), moving_req ? CFS_ALLOC_ATOMIC :
- CFS_ALLOC_IO);
+ sizeof(*cli), moving_req ? GFP_ATOMIC :
+ __GFP_IO);
if (cli == NULL)
return -ENOMEM;
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
/**
* Slab cache for NRS ORR/TRR objects.
*/
- orrd->od_cache = cfs_mem_cache_create(orrd->od_objname,
- sizeof(struct nrs_orr_object),
- 0, 0);
+ orrd->od_cache = kmem_cache_create(orrd->od_objname,
+ sizeof(struct nrs_orr_object),
+ 0, 0, NULL);
if (orrd->od_cache == NULL)
GOTO(failed, rc = -ENOMEM);
failed:
if (orrd->od_cache) {
- rc = cfs_mem_cache_destroy(orrd->od_cache);
+ kmem_cache_destroy(orrd->od_cache);
LASSERTF(rc == 0, "Could not destroy od_cache slab\n");
}
if (orrd->od_binheap != NULL)
cfs_binheap_destroy(orrd->od_binheap);
cfs_hash_putref(orrd->od_obj_hash);
- cfs_mem_cache_destroy(orrd->od_cache);
+ kmem_cache_destroy(orrd->od_cache);
OBD_FREE_PTR(orrd);
}
OBD_SLAB_CPT_ALLOC_PTR_GFP(orro, orrd->od_cache,
nrs_pol2cptab(policy), nrs_pol2cptid(policy),
- (moving_req ? CFS_ALLOC_ATOMIC :
- CFS_ALLOC_IO));
+ (moving_req ? GFP_ATOMIC :
+ __GFP_IO));
if (orro == NULL)
RETURN(-ENOMEM);
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
if (count > (sizeof(kernbuf) - 1))
return -EINVAL;
- if (cfs_copy_from_user(kernbuf, buffer, count))
+ if (copy_from_user(kernbuf, buffer, count))
return -EFAULT;
kernbuf[count] = '\0';
md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
}
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
int pageoffset, int len)
{
lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
return 0;
}
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
int pageoffset, int len)
{
lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count];
/* pers.c */
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
int mdcnt);
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
int pageoffset, int len);
/* pack_generic.c */
cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list,
rq_list) {
- LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON,
+ LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (!ptlrpc_no_resend(req))
#ifdef __KERNEL__
-#define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *))
+#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (PTRS_PER_PAGE)
#define IDLE_IDX_MAX (100)
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
cfs_time_t epp_st_max_wait; /* in jeffies */
- /*
- * pointers to pools
- */
- cfs_page_t ***epp_pools;
+ /*
+ * pointers to pools
+ */
+ struct page ***epp_pools;
} page_pools;
/*
* memory shrinker
*/
-const int pools_shrinker_seeks = CFS_DEFAULT_SEEKS;
-static struct cfs_shrinker *pools_shrinker = NULL;
+const int pools_shrinker_seeks = DEFAULT_SEEKS;
+static struct shrinker *pools_shrinker;
/*
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%u\n"
,
- cfs_num_physpages,
+ num_physpages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
LASSERT(page_pools.epp_pools[p_idx]);
LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
- cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
+ __free_page(page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] = NULL;
if (++g_idx == PAGES_PER_POOL) {
/* free unused pools */
while (p_idx_max1 < p_idx_max2) {
LASSERT(page_pools.epp_pools[p_idx_max2]);
- OBD_FREE(page_pools.epp_pools[p_idx_max2], CFS_PAGE_SIZE);
+ OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
page_pools.epp_pools[p_idx_max2] = NULL;
p_idx_max2--;
}
/*
* return how many pages cleaned up.
*/
-static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
+static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
{
- unsigned long cleaned = 0;
- int i, j;
-
- for (i = 0; i < npools; i++) {
- if (pools[i]) {
- for (j = 0; j < PAGES_PER_POOL; j++) {
- if (pools[i][j]) {
- cfs_free_page(pools[i][j]);
- cleaned++;
- }
- }
- OBD_FREE(pools[i], CFS_PAGE_SIZE);
- pools[i] = NULL;
- }
- }
+ unsigned long cleaned = 0;
+ int i, j;
+
+ for (i = 0; i < npools; i++) {
+ if (pools[i]) {
+ for (j = 0; j < PAGES_PER_POOL; j++) {
+ if (pools[i][j]) {
+ __free_page(pools[i][j]);
+ cleaned++;
+ }
+ }
+ OBD_FREE(pools[i], PAGE_CACHE_SIZE);
+ pools[i] = NULL;
+ }
+ }
- return cleaned;
+ return cleaned;
}
/*
* we have options to avoid most memory copy with some tricks. but we choose
* the simplest way to avoid complexity. It's not frequently called.
*/
-static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
+static void enc_pools_insert(struct page ***pools, int npools, int npages)
{
int freeslot;
int op_idx, np_idx, og_idx, ng_idx;
static int enc_pools_add_pages(int npages)
{
static DEFINE_MUTEX(add_pages_mutex);
- cfs_page_t ***pools;
+ struct page ***pools;
int npools, alloced = 0;
int i, j, rc = -ENOMEM;
if (pools == NULL)
goto out;
- for (i = 0; i < npools; i++) {
- OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
- if (pools[i] == NULL)
- goto out_pools;
+ for (i = 0; i < npools; i++) {
+ OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
+ if (pools[i] == NULL)
+ goto out_pools;
- for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
- pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
- CFS_ALLOC_HIGHMEM);
- if (pools[i][j] == NULL)
- goto out_pools;
+ for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
+ pools[i][j] = alloc_page(__GFP_IO |
+ __GFP_HIGHMEM);
+ if (pools[i][j] == NULL)
+ goto out_pools;
- alloced++;
- }
- }
- LASSERT(alloced == npages);
+ alloced++;
+ }
+ }
+ LASSERT(alloced == npages);
enc_pools_insert(pools, npools, npages);
CDEBUG(D_SEC, "added %d pages into pools\n", npages);
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
*/
- page_pools.epp_max_pages = cfs_num_physpages / 8;
+ page_pools.epp_max_pages = num_physpages / 8;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
cfs_waitq_init(&page_pools.epp_waitq);
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks,
+ pools_shrinker = set_shrinker(pools_shrinker_seeks,
enc_pools_shrink);
if (pools_shrinker == NULL) {
enc_pools_free();
LASSERT(page_pools.epp_pools);
LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
- cfs_remove_shrinker(pools_shrinker);
+ remove_shrinker(pools_shrinker);
npools = npages_to_npools(page_pools.epp_total_pages);
cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
#ifdef __KERNEL__
static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
{
- char *ptr;
- unsigned int off, i;
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len == 0)
- continue;
-
- ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- ptr[off] ^= 0x1;
- cfs_kunmap(desc->bd_iov[i].kiov_page);
- return;
- }
+ char *ptr;
+ unsigned int off, i;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len == 0)
+ continue;
+
+ ptr = kmap(desc->bd_iov[i].kiov_page);
+ off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ ptr[off] ^= 0x1;
+ kunmap(desc->bd_iov[i].kiov_page);
+ return;
+ }
}
#else
static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
RETURN(lqe);
}
- OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, __GFP_IO);
if (new == NULL) {
CERROR("Fail to allocate lqe for id:"LPU64", "
"hash:%s\n", qid->qid_uid, site->lqs_hash->hs_name);
void lquota_generate_fid(struct lu_fid *, int, int, int);
int lquota_extract_fid(const struct lu_fid *, int *, int *, int *);
const struct dt_index_features *glb_idx_feature(struct lu_fid *);
-extern cfs_mem_cache_t *lqe_kmem;
+extern struct kmem_cache *lqe_kmem;
/* lquota_entry.c */
/* site create/destroy */
#include "lquota_internal.h"
-cfs_mem_cache_t *lqe_kmem;
+struct kmem_cache *lqe_kmem;
struct lu_kmem_descr lquota_caches[] = {
{
struct quota_body *, bool, int, qsd_req_completion_t,
struct qsd_qtype_info *, struct lquota_lvb *, void *);
int qsd_fetch_index(const struct lu_env *, struct obd_export *,
- struct idx_info *, unsigned int, cfs_page_t **, bool *);
+ struct idx_info *, unsigned int, struct page **, bool *);
/* qsd_writeback.c */
void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
#include <obd_class.h>
#include "qsd_internal.h"
-cfs_mem_cache_t *upd_kmem;
+struct kmem_cache *upd_kmem;
struct lu_kmem_descr qsd_caches[] = {
{
static int qsd_reint_entries(const struct lu_env *env,
struct qsd_qtype_info *qqi,
struct idx_info *ii, bool global,
- cfs_page_t **pages,
+ struct page **pages,
unsigned int npages, bool need_swab)
{
struct qsd_thread_info *qti = qsd_info(env);
size = ii->ii_recsize + ii->ii_keysize;
for (i = 0; i < npages; i++) {
- union lu_page *lip = cfs_kmap(pages[i]);
+ union lu_page *lip = kmap(pages[i]);
for (j = 0; j < LU_PAGE_COUNT; j++) {
if (need_swab)
lip++;
}
out:
- cfs_kunmap(pages[i]);
+ kunmap(pages[i]);
if (rc)
break;
}
struct qsd_instance *qsd = qqi->qqi_qsd;
struct idx_info *ii = &qti->qti_ii;
struct lu_fid *fid;
- cfs_page_t **pages = NULL;
+ struct page **pages = NULL;
unsigned int npages, pg_cnt;
__u64 start_hash = 0, ver = 0;
bool need_swab = false;
/* let's do a 1MB bulk */
npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
- npages /= CFS_PAGE_SIZE;
+ npages /= PAGE_CACHE_SIZE;
/* allocate pages for bulk index read */
OBD_ALLOC(pages, npages * sizeof(*pages));
if (pages == NULL)
GOTO(out, rc = -ENOMEM);
for (i = 0; i < npages; i++) {
- pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
+ pages[i] = alloc_page(GFP_IOFS);
if (pages[i] == NULL)
GOTO(out, rc = -ENOMEM);
}
ver = ii->ii_version;
pg_cnt = (ii->ii_count + (LU_PAGE_COUNT) - 1);
- pg_cnt >>= CFS_PAGE_SHIFT - LU_PAGE_SHIFT;
+ pg_cnt >>= PAGE_CACHE_SHIFT - LU_PAGE_SHIFT;
if (pg_cnt > npages) {
CERROR("%s: master returned more pages than expected, %u > %u"
if (pages != NULL) {
for (i = 0; i < npages; i++)
if (pages[i] != NULL)
- cfs_free_page(pages[i]);
+ __free_page(pages[i]);
OBD_FREE(pages, npages * sizeof(*pages));
}
*/
int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp,
struct idx_info *ii, unsigned int npages,
- cfs_page_t **pages, bool *need_swab)
+ struct page **pages, bool *need_swab)
{
struct ptlrpc_request *req;
struct idx_info *req_ii;
/* req now owns desc and will free it when it gets freed */
for (i = 0; i < npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
/* pack index information in request */
req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
#include "qsd_internal.h"
-extern cfs_mem_cache_t *upd_kmem;
+extern struct kmem_cache *upd_kmem;
/*
* Allocate and fill an qsd_upd_rec structure to be processed by the writeback
{
struct qsd_upd_rec *upd;
- OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, __GFP_IO);
if (upd == NULL) {
CERROR("Failed to allocate upd");
return NULL;
#include <getopt.h>
#ifndef HAVE_FIEMAP
+# include <linux/types.h>
# include <linux/fiemap.h>
#endif
return -ESRCH;
}
- buf = malloc(CFS_PAGE_SIZE);
- for (i = 0; i < glob_info.gl_pathc; i++) {
- char *valuename = NULL;
+ buf = malloc(PAGE_CACHE_SIZE);
+ for (i = 0; i < glob_info.gl_pathc; i++) {
+ char *valuename = NULL;
- memset(buf, 0, CFS_PAGE_SIZE);
+ memset(buf, 0, PAGE_CACHE_SIZE);
/* As listparam_display is used to show param name (with type),
* here "if (only_path)" is ignored.*/
if (popt->show_path) {
continue;
}
- do {
- rc = read(fd, buf, CFS_PAGE_SIZE);
- if (rc == 0)
- break;
+ do {
+ rc = read(fd, buf, PAGE_CACHE_SIZE);
+ if (rc == 0)
+ break;
if (rc < 0) {
fprintf(stderr, "error: get_param: "
"read('%s') failed: %s\n",