4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/obdclass/lu_ref.c
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
44 # include <libcfs/libcfs.h>
46 # include <liblustre.h>
50 #include <obd_class.h>
51 #include <obd_support.h>
57 * Asserts a condition for a given lu_ref. Must be called with
58 * lu_ref::lf_guard held.
60 #define REFASSERT(ref, expr) do { \
61 struct lu_ref *__tmp = (ref); \
63 if (unlikely(!(expr))) { \
64 lu_ref_print(__tmp); \
65 spin_unlock(&__tmp->lf_guard); \
68 spin_lock(&__tmp->lf_guard); \
73 struct lu_ref *ll_ref;
74 cfs_list_t ll_linkage;
76 const void *ll_source;
79 static cfs_mem_cache_t *lu_ref_link_kmem;
81 static struct lu_kmem_descr lu_ref_caches[] = {
83 .ckd_cache = &lu_ref_link_kmem,
84 .ckd_name = "lu_ref_link_kmem",
85 .ckd_size = sizeof (struct lu_ref_link)
93 * Global list of active (initialized, but not finalized) lu_ref's.
95 * Protected by lu_ref_refs_guard.
97 static CFS_LIST_HEAD(lu_ref_refs);
98 static spinlock_t lu_ref_refs_guard;
99 static struct lu_ref lu_ref_marker = {
100 .lf_guard = DEFINE_SPINLOCK(lu_ref_marker.lf_guard),
101 .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
102 .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
105 void lu_ref_print(const struct lu_ref *ref)
107 struct lu_ref_link *link;
109 CERROR("lu_ref: %p %d %d %s:%d\n",
110 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
111 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
112 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
115 EXPORT_SYMBOL(lu_ref_print);
117 static int lu_ref_is_marker(const struct lu_ref *ref)
119 return (ref == &lu_ref_marker);
122 void lu_ref_print_all(void)
126 spin_lock(&lu_ref_refs_guard);
127 cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
128 if (lu_ref_is_marker(ref))
131 spin_lock(&ref->lf_guard);
133 spin_unlock(&ref->lf_guard);
135 spin_unlock(&lu_ref_refs_guard);
137 EXPORT_SYMBOL(lu_ref_print_all);
139 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
144 spin_lock_init(&ref->lf_guard);
145 CFS_INIT_LIST_HEAD(&ref->lf_list);
146 spin_lock(&lu_ref_refs_guard);
147 cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
148 spin_unlock(&lu_ref_refs_guard);
150 EXPORT_SYMBOL(lu_ref_init_loc);
152 void lu_ref_fini(struct lu_ref *ref)
154 REFASSERT(ref, cfs_list_empty(&ref->lf_list));
155 REFASSERT(ref, ref->lf_refs == 0);
156 spin_lock(&lu_ref_refs_guard);
157 cfs_list_del_init(&ref->lf_linkage);
158 spin_unlock(&lu_ref_refs_guard);
160 EXPORT_SYMBOL(lu_ref_fini);
162 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
163 enum cfs_alloc_flags flags,
167 struct lu_ref_link *link;
170 if (lu_ref_link_kmem != NULL) {
171 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
174 link->ll_scope = scope;
175 link->ll_source = source;
176 spin_lock(&ref->lf_guard);
177 cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
179 spin_unlock(&ref->lf_guard);
184 spin_lock(&ref->lf_guard);
186 spin_unlock(&ref->lf_guard);
187 link = ERR_PTR(-ENOMEM);
193 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
197 return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
199 EXPORT_SYMBOL(lu_ref_add);
202 * Version of lu_ref_add() to be used in non-blockable contexts.
204 struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
207 return lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source);
209 EXPORT_SYMBOL(lu_ref_add_atomic);
211 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
212 const char *scope, const void *source)
214 return link->ll_source == source && !strcmp(link->ll_scope, scope);
218 * Maximal chain length seen so far.
220 static unsigned lu_ref_chain_max_length = 127;
223 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
225 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
228 struct lu_ref_link *link;
232 cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) {
234 if (lu_ref_link_eq(link, scope, source)) {
235 if (iterations > lu_ref_chain_max_length) {
236 CWARN("Long lu_ref chain %d \"%s\":%p\n",
237 iterations, scope, source);
238 lu_ref_chain_max_length = iterations * 3 / 2;
246 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
248 struct lu_ref_link *link;
250 spin_lock(&ref->lf_guard);
251 link = lu_ref_find(ref, scope, source);
253 cfs_list_del(&link->ll_linkage);
255 spin_unlock(&ref->lf_guard);
256 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
258 REFASSERT(ref, ref->lf_failed > 0);
260 spin_unlock(&ref->lf_guard);
263 EXPORT_SYMBOL(lu_ref_del);
265 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
267 const void *source0, const void *source1)
269 spin_lock(&ref->lf_guard);
270 if (link != ERR_PTR(-ENOMEM)) {
271 REFASSERT(ref, link->ll_ref == ref);
272 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
273 link->ll_source = source1;
275 REFASSERT(ref, ref->lf_failed > 0);
277 spin_unlock(&ref->lf_guard);
279 EXPORT_SYMBOL(lu_ref_set_at);
281 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
282 const char *scope, const void *source)
284 if (link != ERR_PTR(-ENOMEM)) {
285 spin_lock(&ref->lf_guard);
286 REFASSERT(ref, link->ll_ref == ref);
287 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
288 cfs_list_del(&link->ll_linkage);
290 spin_unlock(&ref->lf_guard);
291 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
293 spin_lock(&ref->lf_guard);
294 REFASSERT(ref, ref->lf_failed > 0);
296 spin_unlock(&ref->lf_guard);
299 EXPORT_SYMBOL(lu_ref_del_at);
301 #if defined(__KERNEL__) && defined(LPROCFS)
303 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
305 struct lu_ref *ref = seq->private;
307 spin_lock(&lu_ref_refs_guard);
308 if (cfs_list_empty(&ref->lf_linkage))
310 spin_unlock(&lu_ref_refs_guard);
315 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
317 struct lu_ref *ref = p;
320 LASSERT(seq->private == p);
321 LASSERT(!cfs_list_empty(&ref->lf_linkage));
323 spin_lock(&lu_ref_refs_guard);
324 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
325 if (&next->lf_linkage == &lu_ref_refs) {
329 cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
331 spin_unlock(&lu_ref_refs_guard);
335 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
341 static int lu_ref_seq_show(struct seq_file *seq, void *p)
343 struct lu_ref *ref = p;
346 spin_lock(&lu_ref_refs_guard);
347 next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
348 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
349 spin_unlock(&lu_ref_refs_guard);
353 /* print the entry */
354 spin_lock(&next->lf_guard);
355 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
356 next, next->lf_refs, next->lf_failed,
357 next->lf_func, next->lf_line);
358 if (next->lf_refs > 64) {
359 seq_printf(seq, " too many references, skip\n");
361 struct lu_ref_link *link;
364 cfs_list_for_each_entry(link, &next->lf_list, ll_linkage)
365 seq_printf(seq, " #%d link: %s %p\n",
366 i++, link->ll_scope, link->ll_source);
368 spin_unlock(&next->lf_guard);
369 spin_unlock(&lu_ref_refs_guard);
374 static struct seq_operations lu_ref_seq_ops = {
375 .start = lu_ref_seq_start,
376 .stop = lu_ref_seq_stop,
377 .next = lu_ref_seq_next,
378 .show = lu_ref_seq_show
381 static int lu_ref_seq_open(struct inode *inode, struct file *file)
383 struct lu_ref *marker = &lu_ref_marker;
386 result = seq_open(file, &lu_ref_seq_ops);
388 spin_lock(&lu_ref_refs_guard);
389 if (!cfs_list_empty(&marker->lf_linkage))
392 cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
393 spin_unlock(&lu_ref_refs_guard);
396 struct seq_file *f = file->private_data;
399 seq_release(inode, file);
406 static int lu_ref_seq_release(struct inode *inode, struct file *file)
408 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
410 spin_lock(&lu_ref_refs_guard);
411 cfs_list_del_init(&ref->lf_linkage);
412 spin_unlock(&lu_ref_refs_guard);
414 return seq_release(inode, file);
417 static struct file_operations lu_ref_dump_fops = {
418 .owner = THIS_MODULE,
419 .open = lu_ref_seq_open,
422 .release = lu_ref_seq_release
427 int lu_ref_global_init(void)
432 "lu_ref tracking is enabled. Performance isn't.\n");
434 spin_lock_init(&lu_ref_refs_guard);
435 result = lu_kmem_init(lu_ref_caches);
437 #if defined(__KERNEL__) && defined(LPROCFS)
439 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
440 0444, &lu_ref_dump_fops, NULL);
442 lu_kmem_fini(lu_ref_caches);
449 void lu_ref_global_fini(void)
451 #if defined(__KERNEL__) && defined(LPROCFS)
452 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
454 lu_kmem_fini(lu_ref_caches);
457 #endif /* USE_LU_REF */