* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
{
unsigned int mflags = 0;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- if (flags & CFS_ALLOC_ATOMIC)
- mflags |= __GFP_HIGH;
- else if (flags & CFS_ALLOC_WAIT)
- mflags |= __GFP_WAIT;
- else
- mflags |= (__GFP_HIGH | __GFP_WAIT);
- if (flags & CFS_ALLOC_IO)
- mflags |= __GFP_IO | __GFP_HIGHIO;
-#else
if (flags & CFS_ALLOC_ATOMIC)
mflags |= __GFP_HIGH;
else
mflags |= __GFP_NOWARN;
if (flags & CFS_ALLOC_IO)
mflags |= __GFP_IO;
-#endif
if (flags & CFS_ALLOC_FS)
mflags |= __GFP_FS;
if (flags & CFS_ALLOC_HIGH)
vfree(addr);
}
-cfs_page_t *cfs_alloc_pages(unsigned int flags, unsigned int order)
+cfs_page_t *cfs_alloc_page(unsigned int flags)
{
/*
* XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
* from here: this will lead to infinite recursion.
*/
- return alloc_pages(cfs_alloc_flags_to_gfp(flags), order);
+ return alloc_page(cfs_alloc_flags_to_gfp(flags));
}
-void __cfs_free_pages(cfs_page_t *page, unsigned int order)
+void cfs_free_page(cfs_page_t *page)
{
- __free_pages(page, order);
+ __free_page(page);
}
cfs_mem_cache_t *
return kmem_cache_free(cachep, objp);
}
+/**
+ * Returns true if \a addr is an address of an allocated object in a slab \a
+ * kmem. Used in assertions. This check is optimistically imprecise, i.e., it
+ * occasionally returns true for the incorrect addresses, but if it returns
+ * false, then the addresses is guaranteed to be incorrect.
+ */
+int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+{
+#ifdef CONFIG_SLAB
+ struct page *page;
+
+ /*
+ * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other
+ * allocators, like slub and slob.
+ */
+ page = virt_to_page(addr);
+ if (unlikely(PageCompound(page)))
+ page = (struct page *)page->private;
+ return PageSlab(page) && ((void *)page->lru.next) == kmem;
+#else
+ return 1;
+#endif
+}
+EXPORT_SYMBOL(cfs_mem_is_in_cache);
+
+
EXPORT_SYMBOL(cfs_alloc);
EXPORT_SYMBOL(cfs_free);
EXPORT_SYMBOL(cfs_alloc_large);
EXPORT_SYMBOL(cfs_free_large);
-EXPORT_SYMBOL(cfs_alloc_pages);
-EXPORT_SYMBOL(__cfs_free_pages);
+EXPORT_SYMBOL(cfs_alloc_page);
+EXPORT_SYMBOL(cfs_free_page);
EXPORT_SYMBOL(cfs_mem_cache_create);
EXPORT_SYMBOL(cfs_mem_cache_destroy);
EXPORT_SYMBOL(cfs_mem_cache_alloc);