*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Author: liang@whamcloud.com
*/
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/workqueue.h>
#include <libcfs/libcfs.h>
+#include <lustre_compat.h>
struct cfs_var_array {
unsigned int va_count; /* # of buffers */
}
EXPORT_SYMBOL(cfs_percpt_number);
+
/*
- * return memory block shadowed from current CPU
+ * This is opencoding of vfree_atomic from Linux kernel added in 4.10 with
+ * minimum changes needed to work on older kernels too.
*/
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
+#ifndef llist_for_each_safe
+#define llist_for_each_safe(pos, n, node) \
+ for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
+#endif
- return arr->va_ptrs[cpt];
-}
+struct vfree_deferred {
+ struct llist_head list;
+ struct work_struct wq;
+};
+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
-void *
-cfs_percpt_index(void *vars, int idx)
+static void free_work(struct work_struct *w)
{
- struct cfs_var_array *arr;
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+ struct llist_node *t, *llnode;
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
+ llist_for_each_safe(llnode, t, llist_del_all(&p->list))
+ vfree((void *)llnode);
}
-/*
- * free variable array, see more detail in cfs_array_alloc
- */
-void
-cfs_array_free(void *vars)
+void libcfs_vfree_atomic(const void *addr)
{
- struct cfs_var_array *arr;
- int i;
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+ if (!addr)
+ return;
- for (i = 0; i < arr->va_count; i++) {
- if (arr->va_ptrs[i] == NULL)
- continue;
-
- LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
- }
- LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
- va_ptrs[arr->va_count]));
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
}
-EXPORT_SYMBOL(cfs_array_free);
+EXPORT_SYMBOL(libcfs_vfree_atomic);
-/*
- * allocate a variable array, returned value is an array of pointers.
- * Caller can specify length of array by @count, @size is size of each
- * memory block in array.
- */
-void *
-cfs_array_alloc(int count, unsigned int size)
+void __init init_libcfs_vfree_atomic(void)
{
- struct cfs_var_array *arr;
- int i;
-
- LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
- if (arr == NULL)
- return NULL;
+ int i;
- arr->va_count = count;
- arr->va_size = size;
+ for_each_possible_cpu(i) {
+ struct vfree_deferred *p;
- for (i = 0; i < count; i++) {
- LIBCFS_ALLOC(arr->va_ptrs[i], size);
-
- if (arr->va_ptrs[i] == NULL) {
- cfs_array_free((void *)&arr->va_ptrs[0]);
- return NULL;
- }
+ p = &per_cpu(vfree_deferred, i);
+ init_llist_head(&p->list);
+ INIT_WORK(&p->wq, free_work);
}
+}
- return (void *)&arr->va_ptrs[0];
+void __exit exit_libcfs_vfree_atomic(void)
+{
+ flush_scheduled_work();
}
-EXPORT_SYMBOL(cfs_array_alloc);