4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * libcfs/include/libcfs/libcfs_private.h
33 * Various defines for libcfs.
37 #ifndef __LIBCFS_PRIVATE_H__
38 #define __LIBCFS_PRIVATE_H__
40 #ifndef DEBUG_SUBSYSTEM
41 # define DEBUG_SUBSYSTEM S_UNDEFINED
44 #include <linux/slab.h>
45 #include <linux/vmalloc.h>
50 * When this is on, LASSERT macro includes check for assignment used instead
51 * of equality check, but doesn't have unlikely(). Turn this on from time to
52 * time to make test-builds. This shouldn't be on for production release.
54 #define LASSERT_CHECKED (0)
60 * Strange construction with empty "then" clause is used to trigger compiler
61 * warnings on the assertions of the form LASSERT(a = b);
63 * "warning: suggest parentheses around assignment used as truth value"
65 * requires -Wall. Unfortunately this rules out use of likely/unlikely.
67 #define LASSERTF(cond, fmt, ...) \
72 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
73 libcfs_debug_msg(&__msg_data, \
74 "ASSERTION( %s ) failed: " fmt, #cond, \
76 lbug_with_loc(&__msg_data); \
80 #define LASSERT(cond) LASSERTF(cond, "\n")
82 #else /* !LASSERT_CHECKED */
84 #define LASSERTF(cond, fmt, ...) \
86 if (unlikely(!(cond))) { \
87 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
88 libcfs_debug_msg(&__msg_data, \
89 "ASSERTION( %s ) failed: " fmt, #cond, \
91 lbug_with_loc(&__msg_data); \
95 #define LASSERT(cond) LASSERTF(cond, "\n")
96 #endif /* !LASSERT_CHECKED */
97 #else /* !LIBCFS_DEBUG */
98 /* sizeof is to use expression without evaluating it. */
99 # define LASSERT(e) ((void)sizeof!!(e))
100 # define LASSERTF(cond, ...) ((void)sizeof!!(cond))
101 #endif /* !LIBCFS_DEBUG */
103 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
105 * This is for more expensive checks that one doesn't want to be enabled all
106 * the time. LINVRNT() has to be explicitly enabled by --enable-invariants
109 # define LINVRNT(exp) LASSERT(exp)
111 # define LINVRNT(exp) ((void)sizeof!!(exp))
114 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
118 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
119 lbug_with_loc(&msgdata); \
127 extern atomic64_t libcfs_kmem;
129 # define libcfs_kmem_inc(ptr, size) \
131 atomic64_add(size, &libcfs_kmem); \
134 # define libcfs_kmem_dec(ptr, size) \
136 atomic64_sub(size, &libcfs_kmem); \
139 # define libcfs_kmem_read() \
140 (long long)atomic64_read(&libcfs_kmem)
143 # define libcfs_kmem_inc(ptr, size) do {} while (0)
144 # define libcfs_kmem_dec(ptr, size) do {} while (0)
145 # define libcfs_kmem_read() (0)
146 #endif /* LIBCFS_DEBUG */
148 #ifndef LIBCFS_VMALLOC_SIZE
149 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
152 #define LIBCFS_ALLOC_PRE(size, mask) \
154 LASSERT(!in_interrupt() || \
155 ((size) <= LIBCFS_VMALLOC_SIZE && \
156 ((mask) & GFP_ATOMIC)) != 0); \
159 #define LIBCFS_ALLOC_POST(ptr, size) \
161 if (unlikely((ptr) == NULL)) { \
162 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
163 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
164 CERROR("LNET: %lld total bytes allocated by lnet\n", \
165 libcfs_kmem_read()); \
167 libcfs_kmem_inc((ptr), (size)); \
168 CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %lld).\n", \
169 (int)(size), (ptr), libcfs_kmem_read()); \
174 * allocate memory with GFP flags @mask
175 * The allocated memory is zeroed-out.
177 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
179 LIBCFS_ALLOC_PRE((size), (mask)); \
180 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
181 kzalloc((size), (mask)) : vzalloc(size); \
182 LIBCFS_ALLOC_POST((ptr), (size)); \
188 #define LIBCFS_ALLOC(ptr, size) \
189 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
192 * non-sleeping allocator
194 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
195 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
198 * allocate memory for specified CPU partition
199 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
200 * \a cptab == NULL, \a cpt is HW NUMA node id
201 * The allocated memory is zeroed-out.
203 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
205 LIBCFS_ALLOC_PRE((size), (mask)); \
206 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
207 cfs_cpt_malloc((cptab), (cpt), (size), (mask) | __GFP_ZERO) : \
208 cfs_cpt_vzalloc((cptab), (cpt), (size)); \
209 LIBCFS_ALLOC_POST((ptr), (size)); \
212 /** default numa allocator */
213 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
214 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
216 void init_libcfs_vfree_atomic(void);
217 void exit_libcfs_vfree_atomic(void);
219 #define LIBCFS_FREE(ptr, size) \
222 if (unlikely((ptr) == NULL)) { \
223 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
224 "%s:%d\n", s, __FILE__, __LINE__); \
227 libcfs_kmem_dec((ptr), s); \
228 CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %lld).\n", \
229 s, (ptr), libcfs_kmem_read()); \
230 if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
231 libcfs_vfree_atomic(ptr); \
236 /******************************************************************************/
238 void libcfs_debug_dumplog(void);
239 int libcfs_debug_init(unsigned long bufsize);
240 int libcfs_debug_cleanup(void);
241 int libcfs_debug_clear_buffer(void);
242 int libcfs_debug_mark_buffer(const char *text);
244 #define LASSERT_ATOMIC_ENABLED (1)
246 #if LASSERT_ATOMIC_ENABLED
248 /** assert value of @a is equal to @v */
249 #define LASSERT_ATOMIC_EQ(a, v) \
250 LASSERTF(atomic_read(a) == v, "value: %d\n", atomic_read((a)));
252 /** assert value of @a is unequal to @v */
253 #define LASSERT_ATOMIC_NE(a, v) \
254 LASSERTF(atomic_read(a) != v, "value: %d\n", atomic_read((a)));
256 /** assert value of @a is little than @v */
257 #define LASSERT_ATOMIC_LT(a, v) \
258 LASSERTF(atomic_read(a) < v, "value: %d\n", atomic_read((a)));
260 /** assert value of @a is little/equal to @v */
261 #define LASSERT_ATOMIC_LE(a, v) \
262 LASSERTF(atomic_read(a) <= v, "value: %d\n", atomic_read((a)));
264 /** assert value of @a is great than @v */
265 #define LASSERT_ATOMIC_GT(a, v) \
266 LASSERTF(atomic_read(a) > v, "value: %d\n", atomic_read((a)));
268 /** assert value of @a is great/equal to @v */
269 #define LASSERT_ATOMIC_GE(a, v) \
270 LASSERTF(atomic_read(a) >= v, "value: %d\n", atomic_read((a)));
272 /** assert value of @a is great than @v1 and little than @v2 */
273 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
275 int __v = atomic_read(a); \
276 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v);\
279 /** assert value of @a is great than @v1 and little/equal to @v2 */
280 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
282 int __v = atomic_read(a); \
283 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v);\
286 /** assert value of @a is great/equal to @v1 and little than @v2 */
287 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
289 int __v = atomic_read(a); \
290 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v);\
293 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
294 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
296 int __v = atomic_read(a); \
297 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
300 #else /* !LASSERT_ATOMIC_ENABLED */
302 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
303 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
304 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
305 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
306 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
307 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
308 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
309 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
310 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
311 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
313 #endif /* LASSERT_ATOMIC_ENABLED */
315 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
316 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
318 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)));
319 #define CFS_ALLOC_PTR_ARRAY(ptr, count) \
320 LIBCFS_ALLOC(ptr, (count) * sizeof(*(ptr)))
322 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)));
323 #define CFS_FREE_PTR_ARRAY(ptr, count) \
324 LIBCFS_FREE(ptr, (count) * sizeof(*(ptr)))
327 #define ergo(a, b) (!(a) || (b))
328 /* logical equivalence */
329 #define equi(a, b) (!!(a) == !!(b))
331 #define MKSTR(ptr) ((ptr))? (ptr) : ""
333 #ifndef HAVE_CFS_SIZE_ROUND
334 static inline size_t cfs_size_round(size_t val)
336 return (val + 7) & (~0x7);
338 #define HAVE_CFS_SIZE_ROUND