4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/highmem.h>
40 #include <libcfs/libcfs.h>
42 static unsigned int cfs_alloc_flags_to_gfp(u_int32_t flags)
44 unsigned int mflags = 0;
46 if (flags & CFS_ALLOC_ATOMIC)
50 if (flags & CFS_ALLOC_NOWARN)
51 mflags |= __GFP_NOWARN;
52 if (flags & CFS_ALLOC_IO)
54 if (flags & CFS_ALLOC_FS)
56 if (flags & CFS_ALLOC_HIGHMEM)
57 mflags |= __GFP_HIGHMEM;
62 cfs_alloc(size_t nr_bytes, u_int32_t flags)
66 ptr = kmalloc(nr_bytes, cfs_alloc_flags_to_gfp(flags));
67 if (ptr != NULL && (flags & CFS_ALLOC_ZERO))
68 memset(ptr, 0, nr_bytes);
79 cfs_alloc_large(size_t nr_bytes)
81 return vmalloc(nr_bytes);
85 cfs_free_large(void *addr)
90 cfs_page_t *cfs_alloc_page(unsigned int flags)
93 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
94 * from here: this will lead to infinite recursion.
96 return alloc_page(cfs_alloc_flags_to_gfp(flags));
99 void cfs_free_page(cfs_page_t *page)
105 cfs_mem_cache_create (const char *name, size_t size, size_t offset,
108 #ifdef HAVE_KMEM_CACHE_CREATE_DTOR
109 return kmem_cache_create(name, size, offset, flags, NULL, NULL);
111 return kmem_cache_create(name, size, offset, flags, NULL);
116 cfs_mem_cache_destroy (cfs_mem_cache_t * cachep)
118 #ifdef HAVE_KMEM_CACHE_DESTROY_INT
119 return kmem_cache_destroy(cachep);
121 kmem_cache_destroy(cachep);
127 cfs_mem_cache_alloc(cfs_mem_cache_t *cachep, int flags)
129 return kmem_cache_alloc(cachep, cfs_alloc_flags_to_gfp(flags));
133 cfs_mem_cache_free(cfs_mem_cache_t *cachep, void *objp)
135 return kmem_cache_free(cachep, objp);
139 * Returns true if \a addr is an address of an allocated object in a slab \a
140 * kmem. Used in assertions. This check is optimistically imprecise, i.e., it
141 * occasionally returns true for the incorrect addresses, but if it returns
142 * false, then the addresses is guaranteed to be incorrect.
144 int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
150 * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other
151 * allocators, like slub and slob.
153 page = virt_to_page(addr);
154 if (unlikely(PageCompound(page)))
155 page = (struct page *)page->private;
156 return PageSlab(page) && ((void *)page->lru.next) == kmem;
161 EXPORT_SYMBOL(cfs_mem_is_in_cache);
164 EXPORT_SYMBOL(cfs_alloc);
165 EXPORT_SYMBOL(cfs_free);
166 EXPORT_SYMBOL(cfs_alloc_large);
167 EXPORT_SYMBOL(cfs_free_large);
168 EXPORT_SYMBOL(cfs_alloc_page);
169 EXPORT_SYMBOL(cfs_free_page);
170 EXPORT_SYMBOL(cfs_mem_cache_create);
171 EXPORT_SYMBOL(cfs_mem_cache_destroy);
172 EXPORT_SYMBOL(cfs_mem_cache_alloc);
173 EXPORT_SYMBOL(cfs_mem_cache_free);
176 * NB: we will rename some of above functions in another patch:
177 * - rename cfs_alloc to cfs_malloc
178 * - rename cfs_alloc/free_page to cfs_page_alloc/free
179 * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
183 cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
184 size_t nr_bytes, unsigned int flags)
188 ptr = kmalloc_node(nr_bytes, cfs_alloc_flags_to_gfp(flags),
189 cfs_cpt_spread_node(cptab, cpt));
190 if (ptr != NULL && (flags & CFS_ALLOC_ZERO) != 0)
191 memset(ptr, 0, nr_bytes);
195 EXPORT_SYMBOL(cfs_cpt_malloc);
198 cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
200 return vmalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt));
202 EXPORT_SYMBOL(cfs_cpt_vmalloc);
205 cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
207 return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt),
208 cfs_alloc_flags_to_gfp(flags), 0);
210 EXPORT_SYMBOL(cfs_page_cpt_alloc);
213 cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, struct cfs_cpt_table *cptab,
214 int cpt, unsigned int flags)
216 return kmem_cache_alloc_node(cachep, cfs_alloc_flags_to_gfp(flags),
217 cfs_cpt_spread_node(cptab, cpt));
219 EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc);