4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_ref.c
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
46 # include <libcfs/libcfs.h>
48 # include <liblustre.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
59 * Asserts a condition for a given lu_ref. Must be called with
60 * lu_ref::lf_guard held.
62 #define REFASSERT(ref, expr) do { \
63 struct lu_ref *__tmp = (ref); \
65 if (unlikely(!(expr))) { \
66 lu_ref_print(__tmp); \
67 spin_unlock(&__tmp->lf_guard); \
70 spin_lock(&__tmp->lf_guard); \
74 static struct kmem_cache *lu_ref_link_kmem;
76 static struct lu_kmem_descr lu_ref_caches[] = {
78 .ckd_cache = &lu_ref_link_kmem,
79 .ckd_name = "lu_ref_link_kmem",
80 .ckd_size = sizeof (struct lu_ref_link)
88 * Global list of active (initialized, but not finalized) lu_ref's.
90 * Protected by lu_ref_refs_guard.
92 static CFS_LIST_HEAD(lu_ref_refs);
93 static spinlock_t lu_ref_refs_guard;
94 static struct lu_ref lu_ref_marker = {
95 .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard),
96 .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
97 .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
100 void lu_ref_print(const struct lu_ref *ref)
102 struct lu_ref_link *link;
104 CERROR("lu_ref: %p %d %d %s:%d\n",
105 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
106 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
107 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
110 EXPORT_SYMBOL(lu_ref_print);
112 static int lu_ref_is_marker(const struct lu_ref *ref)
114 return (ref == &lu_ref_marker);
117 void lu_ref_print_all(void)
121 spin_lock(&lu_ref_refs_guard);
122 cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
123 if (lu_ref_is_marker(ref))
126 spin_lock(&ref->lf_guard);
128 spin_unlock(&ref->lf_guard);
130 spin_unlock(&lu_ref_refs_guard);
132 EXPORT_SYMBOL(lu_ref_print_all);
134 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
139 spin_lock_init(&ref->lf_guard);
140 CFS_INIT_LIST_HEAD(&ref->lf_list);
141 spin_lock(&lu_ref_refs_guard);
142 cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
143 spin_unlock(&lu_ref_refs_guard);
145 EXPORT_SYMBOL(lu_ref_init_loc);
147 void lu_ref_fini(struct lu_ref *ref)
149 REFASSERT(ref, cfs_list_empty(&ref->lf_list));
150 REFASSERT(ref, ref->lf_refs == 0);
151 spin_lock(&lu_ref_refs_guard);
152 cfs_list_del_init(&ref->lf_linkage);
153 spin_unlock(&lu_ref_refs_guard);
155 EXPORT_SYMBOL(lu_ref_fini);
157 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
158 enum cfs_alloc_flags flags,
162 struct lu_ref_link *link;
165 if (lu_ref_link_kmem != NULL) {
166 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
169 link->ll_scope = scope;
170 link->ll_source = source;
171 spin_lock(&ref->lf_guard);
172 cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
174 spin_unlock(&ref->lf_guard);
179 spin_lock(&ref->lf_guard);
181 spin_unlock(&ref->lf_guard);
182 link = ERR_PTR(-ENOMEM);
188 void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
191 lu_ref_add_context(ref, GFP_IOFS, scope, source);
193 EXPORT_SYMBOL(lu_ref_add);
195 void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link,
196 const char *scope, const void *source)
199 link->ll_scope = scope;
200 link->ll_source = source;
201 spin_lock(&ref->lf_guard);
202 cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
204 spin_unlock(&ref->lf_guard);
206 EXPORT_SYMBOL(lu_ref_add_at);
209 * Version of lu_ref_add() to be used in non-blockable contexts.
211 void lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
214 lu_ref_add_context(ref, GFP_ATOMIC, scope, source);
216 EXPORT_SYMBOL(lu_ref_add_atomic);
218 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
219 const char *scope, const void *source)
221 return link->ll_source == source && !strcmp(link->ll_scope, scope);
225 * Maximal chain length seen so far.
227 static unsigned lu_ref_chain_max_length = 127;
230 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
232 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
235 struct lu_ref_link *link;
239 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
241 if (lu_ref_link_eq(link, scope, source)) {
242 if (iterations > lu_ref_chain_max_length) {
243 CWARN("Long lu_ref chain %d \"%s\":%p\n",
244 iterations, scope, source);
245 lu_ref_chain_max_length = iterations * 3 / 2;
253 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
255 struct lu_ref_link *link;
257 spin_lock(&ref->lf_guard);
258 link = lu_ref_find(ref, scope, source);
260 cfs_list_del(&link->ll_linkage);
262 spin_unlock(&ref->lf_guard);
263 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
265 REFASSERT(ref, ref->lf_failed > 0);
267 spin_unlock(&ref->lf_guard);
270 EXPORT_SYMBOL(lu_ref_del);
272 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
274 const void *source0, const void *source1)
276 REFASSERT(ref, link != NULL && !IS_ERR(link));
278 spin_lock(&ref->lf_guard);
279 REFASSERT(ref, link->ll_ref == ref);
280 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
281 link->ll_source = source1;
282 spin_unlock(&ref->lf_guard);
284 EXPORT_SYMBOL(lu_ref_set_at);
286 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
287 const char *scope, const void *source)
289 REFASSERT(ref, link != NULL && !IS_ERR(link));
290 spin_lock(&ref->lf_guard);
291 REFASSERT(ref, link->ll_ref == ref);
292 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
293 cfs_list_del(&link->ll_linkage);
295 spin_unlock(&ref->lf_guard);
297 EXPORT_SYMBOL(lu_ref_del_at);
299 #if defined(__KERNEL__) && defined(LPROCFS)
301 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
303 struct lu_ref *ref = seq->private;
305 spin_lock(&lu_ref_refs_guard);
306 if (cfs_list_empty(&ref->lf_linkage))
308 spin_unlock(&lu_ref_refs_guard);
313 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
315 struct lu_ref *ref = p;
318 LASSERT(seq->private == p);
319 LASSERT(!cfs_list_empty(&ref->lf_linkage));
321 spin_lock(&lu_ref_refs_guard);
322 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
323 if (&next->lf_linkage == &lu_ref_refs) {
327 cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
329 spin_unlock(&lu_ref_refs_guard);
333 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
339 static int lu_ref_seq_show(struct seq_file *seq, void *p)
341 struct lu_ref *ref = p;
344 spin_lock(&lu_ref_refs_guard);
345 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
346 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
347 spin_unlock(&lu_ref_refs_guard);
351 /* print the entry */
352 spin_lock(&next->lf_guard);
353 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
354 next, next->lf_refs, next->lf_failed,
355 next->lf_func, next->lf_line);
356 if (next->lf_refs > 64) {
357 seq_printf(seq, " too many references, skip\n");
359 struct lu_ref_link *link;
362 cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
363 seq_printf(seq, " #%d link: %s %p\n",
364 i++, link->ll_scope, link->ll_source);
366 spin_unlock(&next->lf_guard);
367 spin_unlock(&lu_ref_refs_guard);
372 static struct seq_operations lu_ref_seq_ops = {
373 .start = lu_ref_seq_start,
374 .stop = lu_ref_seq_stop,
375 .next = lu_ref_seq_next,
376 .show = lu_ref_seq_show
379 static int lu_ref_seq_open(struct inode *inode, struct file *file)
381 struct lu_ref *marker = &lu_ref_marker;
384 result = seq_open(file, &lu_ref_seq_ops);
386 spin_lock(&lu_ref_refs_guard);
387 if (!cfs_list_empty(&marker->lf_linkage))
390 cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
391 spin_unlock(&lu_ref_refs_guard);
394 struct seq_file *f = file->private_data;
397 seq_release(inode, file);
404 static int lu_ref_seq_release(struct inode *inode, struct file *file)
406 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
408 spin_lock(&lu_ref_refs_guard);
409 cfs_list_del_init(&ref->lf_linkage);
410 spin_unlock(&lu_ref_refs_guard);
412 return seq_release(inode, file);
415 static struct file_operations lu_ref_dump_fops = {
416 .owner = THIS_MODULE,
417 .open = lu_ref_seq_open,
420 .release = lu_ref_seq_release
425 int lu_ref_global_init(void)
430 "lu_ref tracking is enabled. Performance isn't.\n");
432 spin_lock_init(&lu_ref_refs_guard);
433 result = lu_kmem_init(lu_ref_caches);
435 #if defined(__KERNEL__) && defined(LPROCFS)
437 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
438 0444, &lu_ref_dump_fops, NULL);
440 lu_kmem_fini(lu_ref_caches);
447 void lu_ref_global_fini(void)
449 #if defined(__KERNEL__) && defined(LPROCFS)
450 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
452 lu_kmem_fini(lu_ref_caches);
455 #endif /* USE_LU_REF */