4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/obdclass/lu_ref.c
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
47 # include <libcfs/libcfs.h>
49 # include <liblustre.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
60 * Asserts a condition for a given lu_ref. Must be called with
61 * lu_ref::lf_guard held.
63 #define REFASSERT(ref, expr) \
65 struct lu_ref *__ref = (ref); \
67 if (unlikely(!(expr))) { \
68 lu_ref_print(__ref); \
69 cfs_spin_unlock(&__ref->lf_guard); \
72 cfs_spin_lock(&__ref->lf_guard); \
77 struct lu_ref *ll_ref;
78 cfs_list_t ll_linkage;
80 const void *ll_source;
83 static cfs_mem_cache_t *lu_ref_link_kmem;
85 static struct lu_kmem_descr lu_ref_caches[] = {
87 .ckd_cache = &lu_ref_link_kmem,
88 .ckd_name = "lu_ref_link_kmem",
89 .ckd_size = sizeof (struct lu_ref_link)
97 * Global list of active (initialized, but not finalized) lu_ref's.
99 * Protected by lu_ref_refs_guard.
101 static CFS_LIST_HEAD(lu_ref_refs);
102 static cfs_spinlock_t lu_ref_refs_guard;
103 static struct lu_ref lu_ref_marker = {
104 .lf_guard = CFS_SPIN_LOCK_UNLOCKED,
105 .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
106 .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
109 void lu_ref_print(const struct lu_ref *ref)
111 struct lu_ref_link *link;
113 CERROR("lu_ref: %p %d %d %s:%d\n",
114 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
115 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
116 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
119 EXPORT_SYMBOL(lu_ref_print);
121 static int lu_ref_is_marker(const struct lu_ref *ref)
123 return (ref == &lu_ref_marker);
126 void lu_ref_print_all(void)
130 cfs_spin_lock(&lu_ref_refs_guard);
131 cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
132 if (lu_ref_is_marker(ref))
135 cfs_spin_lock(&ref->lf_guard);
137 cfs_spin_unlock(&ref->lf_guard);
139 cfs_spin_unlock(&lu_ref_refs_guard);
141 EXPORT_SYMBOL(lu_ref_print_all);
143 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
148 cfs_spin_lock_init(&ref->lf_guard);
149 CFS_INIT_LIST_HEAD(&ref->lf_list);
150 cfs_spin_lock(&lu_ref_refs_guard);
151 cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
152 cfs_spin_unlock(&lu_ref_refs_guard);
154 EXPORT_SYMBOL(lu_ref_init_loc);
156 void lu_ref_fini(struct lu_ref *ref)
158 REFASSERT(ref, cfs_list_empty(&ref->lf_list));
159 REFASSERT(ref, ref->lf_refs == 0);
160 cfs_spin_lock(&lu_ref_refs_guard);
161 cfs_list_del_init(&ref->lf_linkage);
162 cfs_spin_unlock(&lu_ref_refs_guard);
164 EXPORT_SYMBOL(lu_ref_fini);
166 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
167 enum cfs_alloc_flags flags,
171 struct lu_ref_link *link;
174 if (lu_ref_link_kmem != NULL) {
175 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
178 link->ll_scope = scope;
179 link->ll_source = source;
180 cfs_spin_lock(&ref->lf_guard);
181 cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
183 cfs_spin_unlock(&ref->lf_guard);
188 cfs_spin_lock(&ref->lf_guard);
190 cfs_spin_unlock(&ref->lf_guard);
191 link = ERR_PTR(-ENOMEM);
197 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
201 return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
203 EXPORT_SYMBOL(lu_ref_add);
206 * Version of lu_ref_add() to be used in non-blockable contexts.
208 struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
211 return lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source);
213 EXPORT_SYMBOL(lu_ref_add_atomic);
215 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
216 const char *scope, const void *source)
218 return link->ll_source == source && !strcmp(link->ll_scope, scope);
222 * Maximal chain length seen so far.
224 static unsigned lu_ref_chain_max_length = 127;
227 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
229 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
232 struct lu_ref_link *link;
236 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
238 if (lu_ref_link_eq(link, scope, source)) {
239 if (iterations > lu_ref_chain_max_length) {
240 CWARN("Long lu_ref chain %d \"%s\":%p\n",
241 iterations, scope, source);
242 lu_ref_chain_max_length = iterations * 3 / 2;
250 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
252 struct lu_ref_link *link;
254 cfs_spin_lock(&ref->lf_guard);
255 link = lu_ref_find(ref, scope, source);
257 cfs_list_del(&link->ll_linkage);
259 cfs_spin_unlock(&ref->lf_guard);
260 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
262 REFASSERT(ref, ref->lf_failed > 0);
264 cfs_spin_unlock(&ref->lf_guard);
267 EXPORT_SYMBOL(lu_ref_del);
269 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
271 const void *source0, const void *source1)
273 cfs_spin_lock(&ref->lf_guard);
274 if (link != ERR_PTR(-ENOMEM)) {
275 REFASSERT(ref, link->ll_ref == ref);
276 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
277 link->ll_source = source1;
279 REFASSERT(ref, ref->lf_failed > 0);
281 cfs_spin_unlock(&ref->lf_guard);
283 EXPORT_SYMBOL(lu_ref_set_at);
285 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
286 const char *scope, const void *source)
288 if (link != ERR_PTR(-ENOMEM)) {
289 cfs_spin_lock(&ref->lf_guard);
290 REFASSERT(ref, link->ll_ref == ref);
291 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
292 cfs_list_del(&link->ll_linkage);
294 cfs_spin_unlock(&ref->lf_guard);
295 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
297 cfs_spin_lock(&ref->lf_guard);
298 REFASSERT(ref, ref->lf_failed > 0);
300 cfs_spin_unlock(&ref->lf_guard);
303 EXPORT_SYMBOL(lu_ref_del_at);
305 #if defined(__KERNEL__) && defined(LPROCFS)
307 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
309 struct lu_ref *ref = seq->private;
311 cfs_spin_lock(&lu_ref_refs_guard);
312 if (cfs_list_empty(&ref->lf_linkage))
314 cfs_spin_unlock(&lu_ref_refs_guard);
319 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
321 struct lu_ref *ref = p;
324 LASSERT(seq->private == p);
325 LASSERT(!cfs_list_empty(&ref->lf_linkage));
327 cfs_spin_lock(&lu_ref_refs_guard);
328 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
329 if (&next->lf_linkage == &lu_ref_refs) {
333 cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
335 cfs_spin_unlock(&lu_ref_refs_guard);
339 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
345 static int lu_ref_seq_show(struct seq_file *seq, void *p)
347 struct lu_ref *ref = p;
350 cfs_spin_lock(&lu_ref_refs_guard);
351 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
352 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
353 cfs_spin_unlock(&lu_ref_refs_guard);
357 /* print the entry */
359 cfs_spin_lock(&next->lf_guard);
360 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
361 next, next->lf_refs, next->lf_failed,
362 next->lf_func, next->lf_line);
363 if (next->lf_refs > 64) {
364 seq_printf(seq, " too many references, skip\n");
366 struct lu_ref_link *link;
369 cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
370 seq_printf(seq, " #%d link: %s %p\n",
371 i++, link->ll_scope, link->ll_source);
373 cfs_spin_unlock(&next->lf_guard);
374 cfs_spin_unlock(&lu_ref_refs_guard);
379 static struct seq_operations lu_ref_seq_ops = {
380 .start = lu_ref_seq_start,
381 .stop = lu_ref_seq_stop,
382 .next = lu_ref_seq_next,
383 .show = lu_ref_seq_show
386 static int lu_ref_seq_open(struct inode *inode, struct file *file)
388 struct lu_ref *marker = &lu_ref_marker;
391 result = seq_open(file, &lu_ref_seq_ops);
393 cfs_spin_lock(&lu_ref_refs_guard);
394 if (!cfs_list_empty(&marker->lf_linkage))
397 cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
398 cfs_spin_unlock(&lu_ref_refs_guard);
401 struct seq_file *f = file->private_data;
404 seq_release(inode, file);
411 static int lu_ref_seq_release(struct inode *inode, struct file *file)
413 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
415 cfs_spin_lock(&lu_ref_refs_guard);
416 cfs_list_del_init(&ref->lf_linkage);
417 cfs_spin_unlock(&lu_ref_refs_guard);
419 return seq_release(inode, file);
422 static struct file_operations lu_ref_dump_fops = {
423 .owner = THIS_MODULE,
424 .open = lu_ref_seq_open,
427 .release = lu_ref_seq_release
432 int lu_ref_global_init(void)
437 "lu_ref tracking is enabled. Performance isn't.\n");
440 cfs_spin_lock_init(&lu_ref_refs_guard);
441 result = lu_kmem_init(lu_ref_caches);
443 #if defined(__KERNEL__) && defined(LPROCFS)
445 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
446 0444, &lu_ref_dump_fops, NULL);
448 lu_kmem_fini(lu_ref_caches);
455 void lu_ref_global_fini(void)
457 #if defined(__KERNEL__) && defined(LPROCFS)
458 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
460 lu_kmem_fini(lu_ref_caches);
463 #endif /* USE_LU_REF */