4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_ref.c
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
45 #include <libcfs/libcfs.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
54 * Asserts a condition for a given lu_ref. Must be called with
55 * lu_ref::lf_guard held.
57 #define REFASSERT(ref, expr) do { \
58 struct lu_ref *__tmp = (ref); \
60 if (unlikely(!(expr))) { \
61 lu_ref_print(__tmp); \
62 spin_unlock(&__tmp->lf_guard); \
65 spin_lock(&__tmp->lf_guard); \
69 static struct kmem_cache *lu_ref_link_kmem;
71 static struct lu_kmem_descr lu_ref_caches[] = {
73 .ckd_cache = &lu_ref_link_kmem,
74 .ckd_name = "lu_ref_link_kmem",
75 .ckd_size = sizeof (struct lu_ref_link)
83 * Global list of active (initialized, but not finalized) lu_ref's.
85 * Protected by lu_ref_refs_guard.
87 static struct list_head lu_ref_refs;
88 static spinlock_t lu_ref_refs_guard;
89 static struct lu_ref lu_ref_marker = {
90 .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard),
91 .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list),
92 .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
95 void lu_ref_print(const struct lu_ref *ref)
97 struct lu_ref_link *link;
99 CERROR("lu_ref: %p %d %d %s:%d\n",
100 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
101 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
102 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
106 static int lu_ref_is_marker(const struct lu_ref *ref)
108 return (ref == &lu_ref_marker);
111 void lu_ref_print_all(void)
115 spin_lock(&lu_ref_refs_guard);
116 list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
117 if (lu_ref_is_marker(ref))
120 spin_lock(&ref->lf_guard);
122 spin_unlock(&ref->lf_guard);
124 spin_unlock(&lu_ref_refs_guard);
127 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
132 spin_lock_init(&ref->lf_guard);
133 INIT_LIST_HEAD(&ref->lf_list);
134 spin_lock(&lu_ref_refs_guard);
135 list_add(&ref->lf_linkage, &lu_ref_refs);
136 spin_unlock(&lu_ref_refs_guard);
139 void lu_ref_fini(struct lu_ref *ref)
141 spin_lock(&ref->lf_guard);
142 REFASSERT(ref, list_empty(&ref->lf_list));
143 REFASSERT(ref, ref->lf_refs == 0);
144 spin_unlock(&ref->lf_guard);
145 spin_lock(&lu_ref_refs_guard);
146 list_del_init(&ref->lf_linkage);
147 spin_unlock(&lu_ref_refs_guard);
150 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
155 struct lu_ref_link *link;
158 if (lu_ref_link_kmem != NULL) {
159 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
162 link->ll_scope = scope;
163 link->ll_source = source;
164 spin_lock(&ref->lf_guard);
165 list_add_tail(&link->ll_linkage, &ref->lf_list);
167 spin_unlock(&ref->lf_guard);
172 spin_lock(&ref->lf_guard);
174 spin_unlock(&ref->lf_guard);
175 link = ERR_PTR(-ENOMEM);
181 void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
184 lu_ref_add_context(ref, GFP_IOFS, scope, source);
187 void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link,
188 const char *scope, const void *source)
191 link->ll_scope = scope;
192 link->ll_source = source;
193 spin_lock(&ref->lf_guard);
194 list_add_tail(&link->ll_linkage, &ref->lf_list);
196 spin_unlock(&ref->lf_guard);
200 * Version of lu_ref_add() to be used in non-blockable contexts.
202 void lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
205 lu_ref_add_context(ref, GFP_ATOMIC, scope, source);
208 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
209 const char *scope, const void *source)
211 return link->ll_source == source && !strcmp(link->ll_scope, scope);
215 * Maximal chain length seen so far.
217 static unsigned lu_ref_chain_max_length = 127;
220 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
222 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
225 struct lu_ref_link *link;
229 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
231 if (lu_ref_link_eq(link, scope, source)) {
232 if (iterations > lu_ref_chain_max_length) {
233 CWARN("Long lu_ref chain %d \"%s\":%p\n",
234 iterations, scope, source);
235 lu_ref_chain_max_length = iterations * 3 / 2;
243 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
245 struct lu_ref_link *link;
247 spin_lock(&ref->lf_guard);
248 link = lu_ref_find(ref, scope, source);
250 list_del(&link->ll_linkage);
252 spin_unlock(&ref->lf_guard);
253 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
255 REFASSERT(ref, ref->lf_failed > 0);
257 spin_unlock(&ref->lf_guard);
261 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
263 const void *source0, const void *source1)
265 spin_lock(&ref->lf_guard);
266 REFASSERT(ref, link != NULL && !IS_ERR(link));
267 REFASSERT(ref, link->ll_ref == ref);
268 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
269 link->ll_source = source1;
270 spin_unlock(&ref->lf_guard);
273 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
274 const char *scope, const void *source)
276 spin_lock(&ref->lf_guard);
277 REFASSERT(ref, link != NULL && !IS_ERR(link));
278 REFASSERT(ref, link->ll_ref == ref);
279 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
280 list_del(&link->ll_linkage);
282 spin_unlock(&ref->lf_guard);
285 #ifdef CONFIG_PROC_FS
287 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
289 struct lu_ref *ref = seq->private;
291 spin_lock(&lu_ref_refs_guard);
292 if (list_empty(&ref->lf_linkage))
294 spin_unlock(&lu_ref_refs_guard);
299 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
301 struct lu_ref *ref = p;
304 LASSERT(seq->private == p);
305 LASSERT(!list_empty(&ref->lf_linkage));
307 spin_lock(&lu_ref_refs_guard);
308 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
309 if (&next->lf_linkage == &lu_ref_refs) {
313 list_move(&ref->lf_linkage, &next->lf_linkage);
315 spin_unlock(&lu_ref_refs_guard);
319 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
325 static int lu_ref_seq_show(struct seq_file *seq, void *p)
327 struct lu_ref *ref = p;
330 spin_lock(&lu_ref_refs_guard);
331 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
332 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
333 spin_unlock(&lu_ref_refs_guard);
337 /* print the entry */
338 spin_lock(&next->lf_guard);
339 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
340 next, next->lf_refs, next->lf_failed,
341 next->lf_func, next->lf_line);
342 if (next->lf_refs > 64) {
343 seq_printf(seq, " too many references, skip\n");
345 struct lu_ref_link *link;
348 list_for_each_entry(link, &next->lf_list, ll_linkage)
349 seq_printf(seq, " #%d link: %s %p\n",
350 i++, link->ll_scope, link->ll_source);
352 spin_unlock(&next->lf_guard);
353 spin_unlock(&lu_ref_refs_guard);
358 static struct seq_operations lu_ref_seq_ops = {
359 .start = lu_ref_seq_start,
360 .stop = lu_ref_seq_stop,
361 .next = lu_ref_seq_next,
362 .show = lu_ref_seq_show
365 static int lu_ref_seq_open(struct inode *inode, struct file *file)
367 struct lu_ref *marker = &lu_ref_marker;
370 result = seq_open(file, &lu_ref_seq_ops);
372 spin_lock(&lu_ref_refs_guard);
373 if (!list_empty(&marker->lf_linkage))
376 list_add(&marker->lf_linkage, &lu_ref_refs);
377 spin_unlock(&lu_ref_refs_guard);
380 struct seq_file *f = file->private_data;
383 seq_release(inode, file);
390 static int lu_ref_seq_release(struct inode *inode, struct file *file)
392 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
394 spin_lock(&lu_ref_refs_guard);
395 list_del_init(&ref->lf_linkage);
396 spin_unlock(&lu_ref_refs_guard);
398 return seq_release(inode, file);
401 static struct file_operations lu_ref_dump_fops = {
402 .owner = THIS_MODULE,
403 .open = lu_ref_seq_open,
406 .release = lu_ref_seq_release
409 #endif /* CONFIG_PROC_FS */
411 int lu_ref_global_init(void)
416 "lu_ref tracking is enabled. Performance isn't.\n");
418 INIT_LIST_HEAD(&lu_ref_refs);
419 spin_lock_init(&lu_ref_refs_guard);
420 result = lu_kmem_init(lu_ref_caches);
422 #ifdef CONFIG_PROC_FS
424 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
425 0444, &lu_ref_dump_fops, NULL);
427 lu_kmem_fini(lu_ref_caches);
429 #endif /* CONFIG_PROC_FS */
434 void lu_ref_global_fini(void)
436 #ifdef CONFIG_PROC_FS
437 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
438 #endif /* CONFIG_PROC_FS */
439 lu_kmem_fini(lu_ref_caches);
442 #endif /* USE_LU_REF */