1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_ref.c
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
45 # define EXPORT_SYMTAB
49 # include <libcfs/libcfs.h>
51 # include <liblustre.h>
55 #include <obd_class.h>
56 #include <obd_support.h>
62 * Asserts a condition for a given lu_ref. Must be called with
63 * lu_ref::lf_guard held.
65 #define REFASSERT(ref, expr) \
67 struct lu_ref *__ref = (ref); \
69 if (unlikely(!(expr))) { \
70 lu_ref_print(__ref); \
71 spin_unlock(&__ref->lf_guard); \
73 spin_lock(&__ref->lf_guard); \
79 struct lu_ref *ll_ref;
80 struct list_head ll_linkage;
82 const void *ll_source;
85 static cfs_mem_cache_t *lu_ref_link_kmem;
87 static struct lu_kmem_descr lu_ref_caches[] = {
89 .ckd_cache = &lu_ref_link_kmem,
90 .ckd_name = "lu_ref_link_kmem",
91 .ckd_size = sizeof (struct lu_ref_link)
99 * Global list of active (initialized, but not finalized) lu_ref's.
101 * Protected by lu_ref_refs_guard.
103 static CFS_LIST_HEAD(lu_ref_refs);
104 static spinlock_t lu_ref_refs_guard;
105 static struct lu_ref lu_ref_marker = {
106 .lf_guard = SPIN_LOCK_UNLOCKED,
107 .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
108 .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
111 void lu_ref_print(const struct lu_ref *ref)
113 struct lu_ref_link *link;
115 CERROR("lu_ref: %p %d %d %s:%d\n",
116 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
117 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
118 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
121 EXPORT_SYMBOL(lu_ref_print);
123 static int lu_ref_is_marker(const struct lu_ref *ref)
125 return (ref == &lu_ref_marker);
128 void lu_ref_print_all(void)
132 spin_lock(&lu_ref_refs_guard);
133 list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
134 if (lu_ref_is_marker(ref))
137 spin_lock(&ref->lf_guard);
139 spin_unlock(&ref->lf_guard);
141 spin_unlock(&lu_ref_refs_guard);
143 EXPORT_SYMBOL(lu_ref_print_all);
145 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
150 spin_lock_init(&ref->lf_guard);
151 CFS_INIT_LIST_HEAD(&ref->lf_list);
152 spin_lock(&lu_ref_refs_guard);
153 list_add(&ref->lf_linkage, &lu_ref_refs);
154 spin_unlock(&lu_ref_refs_guard);
156 EXPORT_SYMBOL(lu_ref_init_loc);
158 void lu_ref_fini(struct lu_ref *ref)
160 REFASSERT(ref, list_empty(&ref->lf_list));
161 REFASSERT(ref, ref->lf_refs == 0);
162 spin_lock(&lu_ref_refs_guard);
163 list_del_init(&ref->lf_linkage);
164 spin_unlock(&lu_ref_refs_guard);
166 EXPORT_SYMBOL(lu_ref_fini);
168 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
169 enum cfs_alloc_flags flags,
173 struct lu_ref_link *link;
176 if (lu_ref_link_kmem != NULL) {
177 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
180 link->ll_scope = scope;
181 link->ll_source = source;
182 spin_lock(&ref->lf_guard);
183 list_add_tail(&link->ll_linkage, &ref->lf_list);
185 spin_unlock(&ref->lf_guard);
190 spin_lock(&ref->lf_guard);
192 spin_unlock(&ref->lf_guard);
193 link = ERR_PTR(-ENOMEM);
199 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
203 return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source);
205 EXPORT_SYMBOL(lu_ref_add);
208 * Version of lu_ref_add() to be used in non-blockable contexts.
210 struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
213 return lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source);
215 EXPORT_SYMBOL(lu_ref_add_atomic);
217 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
218 const char *scope, const void *source)
220 return link->ll_source == source && !strcmp(link->ll_scope, scope);
224 * Maximal chain length seen so far.
226 static unsigned lu_ref_chain_max_length = 127;
229 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
231 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
234 struct lu_ref_link *link;
238 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
240 if (lu_ref_link_eq(link, scope, source)) {
241 if (iterations > lu_ref_chain_max_length) {
242 CWARN("Long lu_ref chain %i \"%s\":%p\n",
243 iterations, scope, source);
244 lu_ref_chain_max_length = iterations * 3 / 2;
252 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
254 struct lu_ref_link *link;
256 spin_lock(&ref->lf_guard);
257 link = lu_ref_find(ref, scope, source);
259 list_del(&link->ll_linkage);
261 spin_unlock(&ref->lf_guard);
262 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
264 REFASSERT(ref, ref->lf_failed > 0);
266 spin_unlock(&ref->lf_guard);
269 EXPORT_SYMBOL(lu_ref_del);
271 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
273 const void *source0, const void *source1)
275 spin_lock(&ref->lf_guard);
276 if (link != ERR_PTR(-ENOMEM)) {
277 REFASSERT(ref, link->ll_ref == ref);
278 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
279 link->ll_source = source1;
281 REFASSERT(ref, ref->lf_failed > 0);
283 spin_unlock(&ref->lf_guard);
285 EXPORT_SYMBOL(lu_ref_set_at);
287 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
288 const char *scope, const void *source)
290 if (link != ERR_PTR(-ENOMEM)) {
291 spin_lock(&ref->lf_guard);
292 REFASSERT(ref, link->ll_ref == ref);
293 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
294 list_del(&link->ll_linkage);
296 spin_unlock(&ref->lf_guard);
297 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
299 spin_lock(&ref->lf_guard);
300 REFASSERT(ref, ref->lf_failed > 0);
302 spin_unlock(&ref->lf_guard);
305 EXPORT_SYMBOL(lu_ref_del_at);
307 #if defined(__KERNEL__) && defined(LPROCFS)
309 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
311 struct lu_ref *ref = seq->private;
313 spin_lock(&lu_ref_refs_guard);
314 if (list_empty(&ref->lf_linkage))
316 spin_unlock(&lu_ref_refs_guard);
321 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
323 struct lu_ref *ref = p;
326 LASSERT(seq->private == p);
327 LASSERT(!list_empty(&ref->lf_linkage));
329 spin_lock(&lu_ref_refs_guard);
330 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
331 if (&next->lf_linkage == &lu_ref_refs) {
335 list_move(&ref->lf_linkage, &next->lf_linkage);
337 spin_unlock(&lu_ref_refs_guard);
341 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
347 static int lu_ref_seq_show(struct seq_file *seq, void *p)
349 struct lu_ref *ref = p;
352 spin_lock(&lu_ref_refs_guard);
353 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
354 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
355 spin_unlock(&lu_ref_refs_guard);
359 /* print the entry */
361 spin_lock(&next->lf_guard);
362 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
363 next, next->lf_refs, next->lf_failed,
364 next->lf_func, next->lf_line);
365 if (next->lf_refs > 64) {
366 seq_printf(seq, " too many references, skip\n");
368 struct lu_ref_link *link;
371 list_for_each_entry(link, &next->lf_list, ll_linkage)
372 seq_printf(seq, " #%d link: %s %p\n",
373 i++, link->ll_scope, link->ll_source);
375 spin_unlock(&next->lf_guard);
376 spin_unlock(&lu_ref_refs_guard);
381 static struct seq_operations lu_ref_seq_ops = {
382 .start = lu_ref_seq_start,
383 .stop = lu_ref_seq_stop,
384 .next = lu_ref_seq_next,
385 .show = lu_ref_seq_show
388 static int lu_ref_seq_open(struct inode *inode, struct file *file)
390 struct lu_ref *marker = &lu_ref_marker;
393 result = seq_open(file, &lu_ref_seq_ops);
395 spin_lock(&lu_ref_refs_guard);
396 if (!list_empty(&marker->lf_linkage))
399 list_add(&marker->lf_linkage, &lu_ref_refs);
400 spin_unlock(&lu_ref_refs_guard);
403 struct seq_file *f = file->private_data;
406 seq_release(inode, file);
413 static int lu_ref_seq_release(struct inode *inode, struct file *file)
415 struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
417 spin_lock(&lu_ref_refs_guard);
418 list_del_init(&ref->lf_linkage);
419 spin_unlock(&lu_ref_refs_guard);
421 return seq_release(inode, file);
424 static struct file_operations lu_ref_dump_fops = {
425 .owner = THIS_MODULE,
426 .open = lu_ref_seq_open,
429 .release = lu_ref_seq_release
434 int lu_ref_global_init(void)
439 "lu_ref tracking is enabled. Performance isn't.\n");
442 spin_lock_init(&lu_ref_refs_guard);
443 result = lu_kmem_init(lu_ref_caches);
445 #if defined(__KERNEL__) && defined(LPROCFS)
447 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
448 0444, &lu_ref_dump_fops, NULL);
450 lu_kmem_fini(lu_ref_caches);
457 void lu_ref_global_fini(void)
459 #if defined(__KERNEL__) && defined(LPROCFS)
460 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
462 lu_kmem_fini(lu_ref_caches);
465 #endif /* USE_LU_REF */