4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_ref.c
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
46 # include <libcfs/libcfs.h>
48 # include <liblustre.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
59 * Asserts a condition for a given lu_ref. Must be called with
60 * lu_ref::lf_guard held.
62 #define REFASSERT(ref, expr) do { \
63 struct lu_ref *__tmp = (ref); \
65 if (unlikely(!(expr))) { \
66 lu_ref_print(__tmp); \
67 spin_unlock(&__tmp->lf_guard); \
70 spin_lock(&__tmp->lf_guard); \
75 struct lu_ref *ll_ref;
76 cfs_list_t ll_linkage;
78 const void *ll_source;
81 static cfs_mem_cache_t *lu_ref_link_kmem;
83 static struct lu_kmem_descr lu_ref_caches[] = {
85 .ckd_cache = &lu_ref_link_kmem,
86 .ckd_name = "lu_ref_link_kmem",
87 .ckd_size = sizeof (struct lu_ref_link)
95 * Global list of active (initialized, but not finalized) lu_ref's.
97 * Protected by lu_ref_refs_guard.
99 static CFS_LIST_HEAD(lu_ref_refs);
100 static spinlock_t lu_ref_refs_guard;
101 static struct lu_ref lu_ref_marker = {
102 .lf_guard = DEFINE_SPINLOCK(lu_ref_marker.lf_guard),
103 .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
104 .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
107 void lu_ref_print(const struct lu_ref *ref)
109 struct lu_ref_link *link;
111 CERROR("lu_ref: %p %d %d %s:%d\n",
112 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
113 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
114 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
117 EXPORT_SYMBOL(lu_ref_print);
119 static int lu_ref_is_marker(const struct lu_ref *ref)
121 return (ref == &lu_ref_marker);
124 void lu_ref_print_all(void)
128 spin_lock(&lu_ref_refs_guard);
129 cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
130 if (lu_ref_is_marker(ref))
133 spin_lock(&ref->lf_guard);
135 spin_unlock(&ref->lf_guard);
137 spin_unlock(&lu_ref_refs_guard);
139 EXPORT_SYMBOL(lu_ref_print_all);
141 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
146 spin_lock_init(&ref->lf_guard);
147 CFS_INIT_LIST_HEAD(&ref->lf_list);
148 spin_lock(&lu_ref_refs_guard);
149 cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
150 spin_unlock(&lu_ref_refs_guard);
152 EXPORT_SYMBOL(lu_ref_init_loc);
154 void lu_ref_fini(struct lu_ref *ref)
156 REFASSERT(ref, cfs_list_empty(&ref->lf_list));
157 REFASSERT(ref, ref->lf_refs == 0);
158 spin_lock(&lu_ref_refs_guard);
159 cfs_list_del_init(&ref->lf_linkage);
160 spin_unlock(&lu_ref_refs_guard);
162 EXPORT_SYMBOL(lu_ref_fini);
164 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
165 enum cfs_alloc_flags flags,
169 struct lu_ref_link *link;
172 if (lu_ref_link_kmem != NULL) {
173 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
176 link->ll_scope = scope;
177 link->ll_source = source;
178 spin_lock(&ref->lf_guard);
179 cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
181 spin_unlock(&ref->lf_guard);
186 spin_lock(&ref->lf_guard);
188 spin_unlock(&ref->lf_guard);
189 link = ERR_PTR(-ENOMEM);
195 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
199 return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
201 EXPORT_SYMBOL(lu_ref_add);
204 * Version of lu_ref_add() to be used in non-blockable contexts.
206 struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
209 return lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source);
211 EXPORT_SYMBOL(lu_ref_add_atomic);
213 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
214 const char *scope, const void *source)
216 return link->ll_source == source && !strcmp(link->ll_scope, scope);
220 * Maximal chain length seen so far.
222 static unsigned lu_ref_chain_max_length = 127;
225 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
227 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
230 struct lu_ref_link *link;
234 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
236 if (lu_ref_link_eq(link, scope, source)) {
237 if (iterations > lu_ref_chain_max_length) {
238 CWARN("Long lu_ref chain %d \"%s\":%p\n",
239 iterations, scope, source);
240 lu_ref_chain_max_length = iterations * 3 / 2;
248 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
250 struct lu_ref_link *link;
252 spin_lock(&ref->lf_guard);
253 link = lu_ref_find(ref, scope, source);
255 cfs_list_del(&link->ll_linkage);
257 spin_unlock(&ref->lf_guard);
258 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
260 REFASSERT(ref, ref->lf_failed > 0);
262 spin_unlock(&ref->lf_guard);
265 EXPORT_SYMBOL(lu_ref_del);
267 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
269 const void *source0, const void *source1)
271 spin_lock(&ref->lf_guard);
272 if (link != ERR_PTR(-ENOMEM)) {
273 REFASSERT(ref, link->ll_ref == ref);
274 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
275 link->ll_source = source1;
277 REFASSERT(ref, ref->lf_failed > 0);
279 spin_unlock(&ref->lf_guard);
281 EXPORT_SYMBOL(lu_ref_set_at);
283 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
284 const char *scope, const void *source)
286 if (link != ERR_PTR(-ENOMEM)) {
287 spin_lock(&ref->lf_guard);
288 REFASSERT(ref, link->ll_ref == ref);
289 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
290 cfs_list_del(&link->ll_linkage);
292 spin_unlock(&ref->lf_guard);
293 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
295 spin_lock(&ref->lf_guard);
296 REFASSERT(ref, ref->lf_failed > 0);
298 spin_unlock(&ref->lf_guard);
301 EXPORT_SYMBOL(lu_ref_del_at);
303 #if defined(__KERNEL__) && defined(LPROCFS)
305 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
307 struct lu_ref *ref = seq->private;
309 spin_lock(&lu_ref_refs_guard);
310 if (cfs_list_empty(&ref->lf_linkage))
312 spin_unlock(&lu_ref_refs_guard);
317 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
319 struct lu_ref *ref = p;
322 LASSERT(seq->private == p);
323 LASSERT(!cfs_list_empty(&ref->lf_linkage));
325 spin_lock(&lu_ref_refs_guard);
326 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
327 if (&next->lf_linkage == &lu_ref_refs) {
331 cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
333 spin_unlock(&lu_ref_refs_guard);
337 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
343 static int lu_ref_seq_show(struct seq_file *seq, void *p)
345 struct lu_ref *ref = p;
348 spin_lock(&lu_ref_refs_guard);
349 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
350 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
351 spin_unlock(&lu_ref_refs_guard);
355 /* print the entry */
356 spin_lock(&next->lf_guard);
357 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
358 next, next->lf_refs, next->lf_failed,
359 next->lf_func, next->lf_line);
360 if (next->lf_refs > 64) {
361 seq_printf(seq, " too many references, skip\n");
363 struct lu_ref_link *link;
366 cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
367 seq_printf(seq, " #%d link: %s %p\n",
368 i++, link->ll_scope, link->ll_source);
370 spin_unlock(&next->lf_guard);
371 spin_unlock(&lu_ref_refs_guard);
376 static struct seq_operations lu_ref_seq_ops = {
377 .start = lu_ref_seq_start,
378 .stop = lu_ref_seq_stop,
379 .next = lu_ref_seq_next,
380 .show = lu_ref_seq_show
383 static int lu_ref_seq_open(struct inode *inode, struct file *file)
385 struct lu_ref *marker = &lu_ref_marker;
388 result = seq_open(file, &lu_ref_seq_ops);
390 spin_lock(&lu_ref_refs_guard);
391 if (!cfs_list_empty(&marker->lf_linkage))
394 cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
395 spin_unlock(&lu_ref_refs_guard);
398 struct seq_file *f = file->private_data;
401 seq_release(inode, file);
408 static int lu_ref_seq_release(struct inode *inode, struct file *file)
410 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
412 spin_lock(&lu_ref_refs_guard);
413 cfs_list_del_init(&ref->lf_linkage);
414 spin_unlock(&lu_ref_refs_guard);
416 return seq_release(inode, file);
419 static struct file_operations lu_ref_dump_fops = {
420 .owner = THIS_MODULE,
421 .open = lu_ref_seq_open,
424 .release = lu_ref_seq_release
429 int lu_ref_global_init(void)
434 "lu_ref tracking is enabled. Performance isn't.\n");
436 spin_lock_init(&lu_ref_refs_guard);
437 result = lu_kmem_init(lu_ref_caches);
439 #if defined(__KERNEL__) && defined(LPROCFS)
441 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
442 0444, &lu_ref_dump_fops, NULL);
444 lu_kmem_fini(lu_ref_caches);
451 void lu_ref_global_fini(void)
453 #if defined(__KERNEL__) && defined(LPROCFS)
454 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
456 lu_kmem_fini(lu_ref_caches);
459 #endif /* USE_LU_REF */