4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/obdclass/lu_ref.c
35 * Author: Nikita Danilov <nikita.danilov@sun.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include <libcfs/libcfs.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
49 * Asserts a condition for a given lu_ref. Must be called with
50 * lu_ref::lf_guard held.
52 #define REFASSERT(ref, expr) do { \
53 struct lu_ref *__tmp = (ref); \
55 if (unlikely(!(expr))) { \
56 lu_ref_print(__tmp); \
57 spin_unlock(&__tmp->lf_guard); \
60 spin_lock(&__tmp->lf_guard); \
64 static struct kmem_cache *lu_ref_link_kmem;
66 static struct lu_kmem_descr lu_ref_caches[] = {
68 .ckd_cache = &lu_ref_link_kmem,
69 .ckd_name = "lu_ref_link_kmem",
70 .ckd_size = sizeof(struct lu_ref_link)
78 * Global list of active (initialized, but not finalized) lu_ref's.
80 * Protected by lu_ref_refs_guard.
82 static LIST_HEAD(lu_ref_refs);
83 static DEFINE_SPINLOCK(lu_ref_refs_guard);
84 static struct lu_ref lu_ref_marker = {
85 .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard),
86 .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list),
87 .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
90 void lu_ref_print(const struct lu_ref *ref)
92 struct lu_ref_link *link;
94 CERROR("lu_ref: %p %d %d %s:%d\n",
95 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
96 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
97 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
101 static int lu_ref_is_marker(const struct lu_ref *ref)
103 return ref == &lu_ref_marker;
106 void lu_ref_print_all(void)
110 spin_lock(&lu_ref_refs_guard);
111 list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
112 if (lu_ref_is_marker(ref))
115 spin_lock(&ref->lf_guard);
117 spin_unlock(&ref->lf_guard);
119 spin_unlock(&lu_ref_refs_guard);
122 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
127 spin_lock_init(&ref->lf_guard);
128 INIT_LIST_HEAD(&ref->lf_list);
129 spin_lock(&lu_ref_refs_guard);
130 list_add(&ref->lf_linkage, &lu_ref_refs);
131 spin_unlock(&lu_ref_refs_guard);
133 EXPORT_SYMBOL(lu_ref_init_loc);
135 void lu_ref_fini(struct lu_ref *ref)
137 spin_lock(&ref->lf_guard);
138 REFASSERT(ref, list_empty(&ref->lf_list));
139 REFASSERT(ref, ref->lf_refs == 0);
140 spin_unlock(&ref->lf_guard);
141 spin_lock(&lu_ref_refs_guard);
142 list_del_init(&ref->lf_linkage);
143 spin_unlock(&lu_ref_refs_guard);
145 EXPORT_SYMBOL(lu_ref_fini);
147 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
152 struct lu_ref_link *link;
155 if (lu_ref_link_kmem != NULL) {
156 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
159 link->ll_scope = scope;
160 link->ll_source = source;
161 spin_lock(&ref->lf_guard);
162 list_add_tail(&link->ll_linkage, &ref->lf_list);
164 spin_unlock(&ref->lf_guard);
169 spin_lock(&ref->lf_guard);
171 spin_unlock(&ref->lf_guard);
172 link = ERR_PTR(-ENOMEM);
178 void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
181 lu_ref_add_context(ref, GFP_NOFS, scope, source);
183 EXPORT_SYMBOL(lu_ref_add);
185 void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link,
186 const char *scope, const void *source)
189 link->ll_scope = scope;
190 link->ll_source = source;
191 spin_lock(&ref->lf_guard);
192 list_add_tail(&link->ll_linkage, &ref->lf_list);
194 spin_unlock(&ref->lf_guard);
196 EXPORT_SYMBOL(lu_ref_add_at);
199 * Version of lu_ref_add() to be used in non-blockable contexts.
201 void lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
204 lu_ref_add_context(ref, GFP_ATOMIC, scope, source);
206 EXPORT_SYMBOL(lu_ref_add_atomic);
208 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
212 return link->ll_source == source && !strcmp(link->ll_scope, scope);
216 * Maximal chain length seen so far.
218 static unsigned lu_ref_chain_max_length = 127;
221 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
223 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
226 struct lu_ref_link *link;
227 unsigned int iterations;
230 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
232 if (lu_ref_link_eq(link, scope, source)) {
233 if (iterations > lu_ref_chain_max_length) {
234 CWARN("Long lu_ref chain %d \"%s\":%p\n",
235 iterations, scope, source);
236 lu_ref_chain_max_length = iterations * 3 / 2;
244 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
246 struct lu_ref_link *link;
248 spin_lock(&ref->lf_guard);
249 link = lu_ref_find(ref, scope, source);
251 list_del(&link->ll_linkage);
253 spin_unlock(&ref->lf_guard);
254 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
256 REFASSERT(ref, ref->lf_failed > 0);
258 spin_unlock(&ref->lf_guard);
261 EXPORT_SYMBOL(lu_ref_del);
263 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
265 const void *source0, const void *source1)
267 spin_lock(&ref->lf_guard);
268 REFASSERT(ref, link != NULL && !IS_ERR(link));
269 REFASSERT(ref, link->ll_ref == ref);
270 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
271 link->ll_source = source1;
272 spin_unlock(&ref->lf_guard);
274 EXPORT_SYMBOL(lu_ref_set_at);
276 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
277 const char *scope, const void *source)
279 spin_lock(&ref->lf_guard);
280 REFASSERT(ref, link != NULL && !IS_ERR(link));
281 REFASSERT(ref, link->ll_ref == ref);
282 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
283 list_del(&link->ll_linkage);
285 spin_unlock(&ref->lf_guard);
287 EXPORT_SYMBOL(lu_ref_del_at);
289 #ifdef CONFIG_PROC_FS
291 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
293 struct lu_ref *ref = seq->private;
295 spin_lock(&lu_ref_refs_guard);
296 if (list_empty(&ref->lf_linkage))
298 spin_unlock(&lu_ref_refs_guard);
303 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
305 struct lu_ref *ref = p;
308 LASSERT(seq->private == p);
309 LASSERT(!list_empty(&ref->lf_linkage));
312 spin_lock(&lu_ref_refs_guard);
313 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
314 if (&next->lf_linkage == &lu_ref_refs)
317 list_move(&ref->lf_linkage, &next->lf_linkage);
318 spin_unlock(&lu_ref_refs_guard);
323 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
329 static int lu_ref_seq_show(struct seq_file *seq, void *p)
331 struct lu_ref *ref = p;
334 spin_lock(&lu_ref_refs_guard);
335 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
336 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
337 spin_unlock(&lu_ref_refs_guard);
341 /* print the entry */
342 spin_lock(&next->lf_guard);
343 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
344 next, next->lf_refs, next->lf_failed,
345 next->lf_func, next->lf_line);
346 if (next->lf_refs > 64) {
347 seq_puts(seq, " too many references, skip\n");
349 struct lu_ref_link *link;
352 list_for_each_entry(link, &next->lf_list, ll_linkage)
353 seq_printf(seq, " #%d link: %s %p\n",
354 i++, link->ll_scope, link->ll_source);
356 spin_unlock(&next->lf_guard);
357 spin_unlock(&lu_ref_refs_guard);
362 static const struct seq_operations lu_ref_seq_ops = {
363 .start = lu_ref_seq_start,
364 .stop = lu_ref_seq_stop,
365 .next = lu_ref_seq_next,
366 .show = lu_ref_seq_show
369 static int lu_ref_seq_open(struct inode *inode, struct file *file)
371 struct lu_ref *marker = &lu_ref_marker;
374 result = seq_open(file, &lu_ref_seq_ops);
376 spin_lock(&lu_ref_refs_guard);
377 if (!list_empty(&marker->lf_linkage))
380 list_add(&marker->lf_linkage, &lu_ref_refs);
381 spin_unlock(&lu_ref_refs_guard);
384 struct seq_file *f = file->private_data;
388 seq_release(inode, file);
395 static int lu_ref_seq_release(struct inode *inode, struct file *file)
397 struct seq_file *m = file->private_data;
398 struct lu_ref *ref = m->private;
400 spin_lock(&lu_ref_refs_guard);
401 list_del_init(&ref->lf_linkage);
402 spin_unlock(&lu_ref_refs_guard);
404 return seq_release(inode, file);
407 static const struct file_operations lu_ref_dump_fops = {
408 .owner = THIS_MODULE,
409 .open = lu_ref_seq_open,
412 .release = lu_ref_seq_release
415 #endif /* CONFIG_PROC_FS */
417 int lu_ref_global_init(void)
422 "lu_ref tracking is enabled. Performance isn't.\n");
424 result = lu_kmem_init(lu_ref_caches);
426 #ifdef CONFIG_PROC_FS
428 result = lprocfs_seq_create(proc_lustre_root, "lu_refs",
429 0444, &lu_ref_dump_fops, NULL);
431 lu_kmem_fini(lu_ref_caches);
433 #endif /* CONFIG_PROC_FS */
438 void lu_ref_global_fini(void)
440 #ifdef CONFIG_PROC_FS
441 lprocfs_remove_proc_entry("lu_refs", proc_lustre_root);
442 #endif /* CONFIG_PROC_FS */
443 lu_kmem_fini(lu_ref_caches);
446 #endif /* USE_LU_REF */