X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Finclude%2Flibcfs%2Fuser-mem.h;h=af0e4b0fecac344e58a93f05edaee3643247eded;hb=3120c92d0581d884d29f428dbf8eb9d7dfab97b0;hp=423ba8ea4398ed9b23be865cc9b98c93f719558a;hpb=78fc98c662e5a7d98663f318077d621448070070;p=fs%2Flustre-release.git diff --git a/libcfs/include/libcfs/user-mem.h b/libcfs/include/libcfs/user-mem.h index 423ba8e..af0e4b0 100644 --- a/libcfs/include/libcfs/user-mem.h +++ b/libcfs/include/libcfs/user-mem.h @@ -1,3 +1,25 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License version 2 for more details. A copy is + * included in the COPYING file that accompanied this code. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * GPL HEADER END + */ + #ifndef __LIBCFS_USER_MEM_H__ #define __LIBCFS_USER_MEM_H__ @@ -15,103 +37,145 @@ */ #define LIBLUSTRE_HANDLE_UNALIGNED_PAGE -typedef struct page { - void *addr; - unsigned long index; - cfs_list_t list; - unsigned long private; +struct page { + void *addr; + unsigned long index; + struct list_head list; + unsigned long private; - /* internally used by liblustre file i/o */ - int _offset; - int _count; + /* internally used by liblustre file i/o */ + int _offset; + int _count; #ifdef LIBLUSTRE_HANDLE_UNALIGNED_PAGE - int _managed; + int _managed; #endif - cfs_list_t _node; -} cfs_page_t; + struct list_head _node; +}; /* 4K */ -#define CFS_PAGE_SHIFT 12 -#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) -#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) - -cfs_page_t *cfs_alloc_page(unsigned int flags); -void cfs_free_page(cfs_page_t *pg); -void *cfs_page_address(cfs_page_t *pg); -void *cfs_kmap(cfs_page_t *pg); -void cfs_kunmap(cfs_page_t *pg); - -#define cfs_get_page(p) __I_should_not_be_called__(at_all) -#define cfs_page_count(p) __I_should_not_be_called__(at_all) -#define cfs_page_index(p) ((p)->index) -#define cfs_page_pin(page) do {} while (0) -#define cfs_page_unpin(page) do {} while (0) +#define PAGE_CACHE_SHIFT 12 +#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT) +#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) + +struct page *alloc_page(unsigned int flags); +void __free_page(struct page *pg); +void *page_address(struct page *pg); +void *kmap(struct page *pg); +void kunmap(struct page *pg); + +#define get_page(p) __I_should_not_be_called__(at_all) +#define page_count(p) __I_should_not_be_called__(at_all) +#define page_index(p) ((p)->index) +#define page_cache_get(page) do { } while (0) +#define page_cache_release(page) do { } while (0) + +#define inc_zone_page_state(page, state) do {} while (0) +#define dec_zone_page_state(page, state) do {} while (0) /* * Memory allocator * Inline function, so utils can use them without linking of libcfs */ -#define __ALLOC_ZERO (1 << 2) -static inline void *cfs_alloc(size_t nr_bytes, u_int32_t flags) + +/* + * Universal memory allocator API + */ +enum cfs_alloc_flags { + /* allocation is not allowed to block */ + GFP_ATOMIC = 0x1, + /* allocation is allowed to block */ + __GFP_WAIT = 0x2, + /* allocation should return zeroed memory */ + __GFP_ZERO = 0x4, + /* allocation is allowed to call file-system code to free/clean + * memory */ + __GFP_FS = 0x8, + /* allocation is allowed to do io to free/clean memory */ + __GFP_IO = 0x10, + /* don't report allocation failure to the console */ + __GFP_NOWARN = 0x20, + /* standard allocator flag combination */ + GFP_IOFS = __GFP_FS | __GFP_IO, + GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO, + GFP_NOFS = __GFP_WAIT | __GFP_IO, + GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS, +}; + +/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */ +enum cfs_alloc_page_flags { + /* allow to return page beyond KVM. It has to be mapped into KVM by + * kmap() and unmapped with kunmap(). */ + __GFP_HIGHMEM = 0x40, + GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO | + __GFP_HIGHMEM, +}; + +static inline void *kmalloc(size_t nr_bytes, u_int32_t flags) { - void *result; + void *result; - result = malloc(nr_bytes); - if (result != NULL && (flags & __ALLOC_ZERO)) - memset(result, 0, nr_bytes); - return result; + result = malloc(nr_bytes); + if (result != NULL && (flags & __GFP_ZERO)) + memset(result, 0, nr_bytes); + return result; } -#define cfs_free(addr) free(addr) -#define cfs_alloc_large(nr_bytes) cfs_alloc(nr_bytes, 0) -#define cfs_free_large(addr) cfs_free(addr) +#define kfree(addr) free(addr) +#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0) +#define vfree(addr) free(addr) -#define CFS_ALLOC_ATOMIC_TRY (0) +#define ALLOC_ATOMIC_TRY (0) /* * SLAB allocator */ -typedef struct { +struct kmem_cache { int size; -} cfs_mem_cache_t; +}; -#define CFS_SLAB_HWCACHE_ALIGN 0 +#define SLAB_HWCACHE_ALIGN 0 #define SLAB_DESTROY_BY_RCU 0 -#define CFS_SLAB_KERNEL 0 -#define CFS_SLAB_NOFS 0 +#define SLAB_KERNEL 0 +#define SLAB_NOFS 0 -cfs_mem_cache_t * -cfs_mem_cache_create(const char *, size_t, size_t, unsigned long); -int cfs_mem_cache_destroy(cfs_mem_cache_t *c); -void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp); -void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr); -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem); +#define memory_pressure_get() (0) +#define memory_pressure_set() do {} while (0) +#define memory_pressure_clr() do {} while (0) + +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, void *); +void kmem_cache_destroy(struct kmem_cache *c); +void *kmem_cache_alloc(struct kmem_cache *c, int gfp); +void kmem_cache_free(struct kmem_cache *c, void *addr); +int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem); /* * NUMA allocators */ #define cfs_cpt_malloc(cptab, cpt, bytes, flags) \ - cfs_alloc(bytes, flags) + kmalloc(bytes, flags) #define cfs_cpt_vmalloc(cptab, cpt, bytes) \ - cfs_alloc(bytes) + kmalloc(bytes) #define cfs_page_cpt_alloc(cptab, cpt, mask) \ - cfs_alloc_page(mask) + alloc_page(mask) #define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \ - cfs_mem_cache_alloc(cache, gfp) + kmem_cache_alloc(cache, gfp) + +#define smp_rmb() do {} while (0) /* * Copy to/from user */ -static inline int cfs_copy_from_user(void *a,void *b, int c) +static inline int copy_from_user(void *a, void *b, int c) { - memcpy(a,b,c); - return 0; + memcpy(a, b, c); + return 0; } -static inline int cfs_copy_to_user(void *a,void *b, int c) +static inline int copy_to_user(void *a, void *b, int c) { - memcpy(a,b,c); - return 0; + memcpy(a,b,c); + return 0; } #endif