4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2012, Intel Corporation.
28 * This file is part of Lustre, http://www.lustre.org/
29 * Lustre is a trademark of Sun Microsystems, Inc.
31 * Author: liang@whamcloud.com
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/workqueue.h>
37 #include <libcfs/libcfs.h>
38 #include <lustre_compat.h>
40 struct cfs_var_array {
41 unsigned int va_count; /* # of buffers */
42 unsigned int va_size; /* size of each var */
43 struct cfs_cpt_table *va_cptab; /* cpu partition table */
44 void *va_ptrs[0]; /* buffer addresses */
48 * free per-cpu data, see more detail in cfs_percpt_free
51 cfs_percpt_free(void *vars)
53 struct cfs_var_array *arr;
56 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
58 for (i = 0; i < arr->va_count; i++) {
59 if (arr->va_ptrs[i] != NULL)
60 LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
63 LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
64 va_ptrs[arr->va_count]));
66 EXPORT_SYMBOL(cfs_percpt_free);
69 * allocate per cpu-partition variables, returned value is an array of pointers,
70 * variable can be indexed by CPU partition ID, i.e:
72 * arr = cfs_percpt_alloc(cfs_cpu_pt, size);
73 * then caller can access memory block for CPU 0 by arr[0],
74 * memory block for CPU 1 by arr[1]...
75 * memory block for CPU N by arr[N]...
80 cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
82 struct cfs_var_array *arr;
86 count = cfs_cpt_number(cptab);
88 LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
92 arr->va_size = size = L1_CACHE_ALIGN(size);
93 arr->va_count = count;
94 arr->va_cptab = cptab;
96 for (i = 0; i < count; i++) {
97 LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
98 if (arr->va_ptrs[i] == NULL) {
99 cfs_percpt_free((void *)&arr->va_ptrs[0]);
104 return (void *)&arr->va_ptrs[0];
106 EXPORT_SYMBOL(cfs_percpt_alloc);
109 * return number of CPUs (or number of elements in per-cpu data)
110 * according to cptab of @vars
113 cfs_percpt_number(void *vars)
115 struct cfs_var_array *arr;
117 arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
119 return arr->va_count;
121 EXPORT_SYMBOL(cfs_percpt_number);
125 * This is opencoding of vfree_atomic from Linux kernel added in 4.10 with
126 * minimum changes needed to work on older kernels too.
129 #ifndef llist_for_each_safe
130 #define llist_for_each_safe(pos, n, node) \
131 for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
134 struct vfree_deferred {
135 struct llist_head list;
136 struct work_struct wq;
138 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
140 static void free_work(struct work_struct *w)
142 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
143 struct llist_node *t, *llnode;
145 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
146 vfree((void *)llnode);
149 void libcfs_vfree_atomic(const void *addr)
151 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
156 if (llist_add((struct llist_node *)addr, &p->list))
157 schedule_work(&p->wq);
159 EXPORT_SYMBOL(libcfs_vfree_atomic);
161 void __init init_libcfs_vfree_atomic(void)
165 for_each_possible_cpu(i) {
166 struct vfree_deferred *p;
168 p = &per_cpu(vfree_deferred, i);
169 init_llist_head(&p->list);
170 INIT_WORK(&p->wq, free_work);
174 void __exit exit_libcfs_vfree_atomic(void)
176 flush_scheduled_work();