*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: liang@whamcloud.com
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/workqueue.h>
#include <libcfs/libcfs.h>
+#include <lustre_compat.h>
struct cfs_var_array {
unsigned int va_count; /* # of buffers */
if (arr == NULL)
return NULL;
- arr->va_size = size = CFS_L1_CACHE_ALIGN(size);
+ arr->va_size = size = L1_CACHE_ALIGN(size);
arr->va_count = count;
arr->va_cptab = cptab;
EXPORT_SYMBOL(cfs_percpt_number);
/*
- * return memory block shadowed from current CPU
- */
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
-
- return arr->va_ptrs[cpt];
-}
-EXPORT_SYMBOL(cfs_percpt_current);
-
-void *
-cfs_percpt_index(void *vars, int idx)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
-}
-EXPORT_SYMBOL(cfs_percpt_index);
-
-/*
* free variable array, see more detail in cfs_array_alloc
*/
void
return (void *)&arr->va_ptrs[0];
}
EXPORT_SYMBOL(cfs_array_alloc);
+
+/*
+ * This is opencoding of vfree_atomic from Linux kernel added in 4.10 with
+ * minimum changes needed to work on older kernels too.
+ */
+
+#ifndef llist_for_each_safe
+#define llist_for_each_safe(pos, n, node) \
+ for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
+#endif
+
+struct vfree_deferred {
+ struct llist_head list;
+ struct work_struct wq;
+};
+static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
+
+static void free_work(struct work_struct *w)
+{
+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
+ struct llist_node *t, *llnode;
+
+ llist_for_each_safe(llnode, t, llist_del_all(&p->list))
+ vfree((void *)llnode);
+}
+
+void libcfs_vfree_atomic(const void *addr)
+{
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+ if (!addr)
+ return;
+
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
+}
+EXPORT_SYMBOL(libcfs_vfree_atomic);
+
+void __init init_libcfs_vfree_atomic(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct vfree_deferred *p;
+
+ p = &per_cpu(vfree_deferred, i);
+ init_llist_head(&p->list);
+ INIT_WORK(&p->wq, free_work);
+ }
+}
+
+void __exit exit_libcfs_vfree_atomic(void)
+{
+ flush_scheduled_work();
+}