#include <libcfs/linux/linux-time.h>
#include <libcfs/linux/linux-wait.h>
#include <libcfs/linux/linux-misc.h>
+#ifndef HAVE_XARRAY_SUPPORT
+#include <libcfs/linux/xarray.h>
+#endif
#ifndef HAVE_KTIME_GET_TS64
void ktime_get_ts64(struct timespec64 *ts)
EXPORT_SYMBOL_GPL(cfs_apply_workqueue_attrs);
#ifndef HAVE_XARRAY_SUPPORT
-struct kmem_cache (*radix_tree_node_cachep);
+struct kmem_cache *xarray_cachep;
+
+static void xarray_node_ctor(void *arg)
+{
+ struct xa_node *node = arg;
+
+ memset(node, 0, sizeof(*node));
+ INIT_LIST_HEAD(&node->private_list);
+}
#endif
void __init cfs_arch_init(void)
cfs_apply_workqueue_attrs_t =
(void *)cfs_kallsyms_lookup_name("apply_workqueue_attrs");
#ifndef HAVE_XARRAY_SUPPORT
- radix_tree_node_cachep =
- (void *)cfs_kallsyms_lookup_name("radix_tree_node_cachep");
+ xarray_cachep = kmem_cache_create("xarray_cache",
+ sizeof(struct xa_node), 0,
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ xarray_node_ctor);
#endif
}
#include <linux/export.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/radix-tree.h>
#include <libcfs/linux/xarray.h>
/*
EXPORT_SYMBOL_GPL(xas_load);
/* Move the radix tree node cache here */
-extern struct kmem_cache *radix_tree_node_cachep;
+extern struct kmem_cache *xarray_cachep;
-static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
+static inline void tag_clear(struct xa_node *node, unsigned int tag,
int offset)
{
__clear_bit(offset, node->tags[tag]);
}
-static void radix_tree_node_rcu_free(struct rcu_head *head)
+static void xarray_node_rcu_free(struct rcu_head *head)
{
- struct radix_tree_node *node =
- container_of(head, struct radix_tree_node, rcu_head);
+ struct xa_node *node =
+ container_of(head, struct xa_node, rcu_head);
int i;
/*
* can leave us with a non-NULL entry in the first slot, so clear
* that here to make sure.
*/
- for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
+ for (i = 0; i < XA_MAX_MARKS; i++)
tag_clear(node, i, 0);
node->slots[0] = NULL;
node->count = 0;
- kmem_cache_free(radix_tree_node_cachep, node);
+ kmem_cache_free(xarray_cachep, node);
}
#define XA_RCU_FREE ((struct xarray *)1)
{
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->array = XA_RCU_FREE;
- call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ call_rcu(&node->rcu_head, xarray_node_rcu_free);
}
/*
if (!node)
return;
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
+ kmem_cache_free(xarray_cachep, node);
xas->xa_alloc = NULL;
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
#endif
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xarray_cachep, gfp);
if (!xas->xa_alloc)
return false;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
#endif
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xarray_cachep, gfp);
xas_lock_type(xas, lock_type);
} else {
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xarray_cachep, gfp);
}
if (!xas->xa_alloc)
return false;
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
#endif
- node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ node = kmem_cache_alloc(xarray_cachep, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM);
return NULL;