1 // SPDX-License-Identifier: GPL-2.0
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 only,
9 * as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License version 2 for more details (a copy is included
15 * in the LICENSE file that accompanied this code).
17 * You should have received a copy of the GNU General Public License
18 * version 2 along with this program; If not, see
19 * http://www.gnu.org/licenses/gpl-2.0.html
24 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Use is subject to license terms.
27 * Copyright (c) 2012, 2017, Intel Corporation.
30 * This file is part of Lustre, http://www.lustre.org/
32 * lustre/obdclass/lu_ref.c
36 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 #define DEBUG_SUBSYSTEM S_CLASS
42 #include <obd_class.h>
43 #include <obd_support.h>
46 #ifdef CONFIG_LUSTRE_DEBUG_LU_REF
48 * Asserts a condition for a given lu_ref. Must be called with
49 * lu_ref::lf_guard held.
51 #define REFASSERT(ref, expr) do { \
52 struct lu_ref *__tmp = (ref); \
54 if (unlikely(!(expr))) { \
55 lu_ref_print(__tmp); \
56 spin_unlock(&__tmp->lf_guard); \
59 spin_lock(&__tmp->lf_guard); \
63 static struct kmem_cache *lu_ref_link_kmem;
65 static struct lu_kmem_descr lu_ref_caches[] = {
67 .ckd_cache = &lu_ref_link_kmem,
68 .ckd_name = "lu_ref_link_kmem",
69 .ckd_size = sizeof(struct lu_ref_link)
77 * Global list of active (initialized, but not finalized) lu_ref's.
79 * Protected by lu_ref_refs_guard.
81 static LIST_HEAD(lu_ref_refs);
82 static DEFINE_SPINLOCK(lu_ref_refs_guard);
83 static struct lu_ref lu_ref_marker = {
84 .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard),
85 .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list),
86 .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage)
89 void lu_ref_print(const struct lu_ref *ref)
91 struct lu_ref_link *link;
93 CERROR("lu_ref: %p %d %d %s:%d\n",
94 ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line);
95 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
96 CERROR(" link: %s %p\n", link->ll_scope, link->ll_source);
100 static int lu_ref_is_marker(const struct lu_ref *ref)
102 return ref == &lu_ref_marker;
105 void lu_ref_print_all(void)
109 spin_lock(&lu_ref_refs_guard);
110 list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
111 if (lu_ref_is_marker(ref))
114 spin_lock(&ref->lf_guard);
116 spin_unlock(&ref->lf_guard);
118 spin_unlock(&lu_ref_refs_guard);
121 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
126 spin_lock_init(&ref->lf_guard);
127 INIT_LIST_HEAD(&ref->lf_list);
128 spin_lock(&lu_ref_refs_guard);
129 list_add(&ref->lf_linkage, &lu_ref_refs);
130 spin_unlock(&lu_ref_refs_guard);
132 EXPORT_SYMBOL(lu_ref_init_loc);
134 void lu_ref_fini(struct lu_ref *ref)
136 spin_lock(&ref->lf_guard);
137 REFASSERT(ref, list_empty(&ref->lf_list));
138 REFASSERT(ref, ref->lf_refs == 0);
139 spin_unlock(&ref->lf_guard);
140 spin_lock(&lu_ref_refs_guard);
141 list_del_init(&ref->lf_linkage);
142 spin_unlock(&lu_ref_refs_guard);
144 EXPORT_SYMBOL(lu_ref_fini);
146 static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
151 struct lu_ref_link *link;
154 if (lu_ref_link_kmem) {
155 OBD_SLAB_ALLOC_PTR_GFP(link, lu_ref_link_kmem, flags);
158 link->ll_scope = scope;
159 link->ll_source = source;
160 spin_lock(&ref->lf_guard);
161 list_add_tail(&link->ll_linkage, &ref->lf_list);
163 spin_unlock(&ref->lf_guard);
168 spin_lock(&ref->lf_guard);
170 spin_unlock(&ref->lf_guard);
171 link = ERR_PTR(-ENOMEM);
177 void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source)
180 lu_ref_add_context(ref, GFP_NOFS, scope, source);
182 EXPORT_SYMBOL(lu_ref_add);
184 void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link,
185 const char *scope, const void *source)
188 link->ll_scope = scope;
189 link->ll_source = source;
190 spin_lock(&ref->lf_guard);
191 list_add_tail(&link->ll_linkage, &ref->lf_list);
193 spin_unlock(&ref->lf_guard);
195 EXPORT_SYMBOL(lu_ref_add_at);
198 * Version of lu_ref_add() to be used in non-blockable contexts.
200 void lu_ref_add_atomic(struct lu_ref *ref, const char *scope,
203 lu_ref_add_context(ref, GFP_ATOMIC, scope, source);
205 EXPORT_SYMBOL(lu_ref_add_atomic);
207 static inline int lu_ref_link_eq(const struct lu_ref_link *link,
211 return link->ll_source == source && !strcmp(link->ll_scope, scope);
215 * Maximal chain length seen so far.
217 static unsigned int lu_ref_chain_max_length = 127;
220 * Searches for a lu_ref_link with given [scope, source] within given lu_ref.
222 static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
225 struct lu_ref_link *link;
226 unsigned int iterations;
229 list_for_each_entry(link, &ref->lf_list, ll_linkage) {
231 if (lu_ref_link_eq(link, scope, source)) {
232 if (iterations > lu_ref_chain_max_length) {
233 CWARN("Long lu_ref chain %d \"%s\":%p\n",
234 iterations, scope, source);
235 lu_ref_chain_max_length = iterations * 3 / 2;
243 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
245 struct lu_ref_link *link;
247 spin_lock(&ref->lf_guard);
248 link = lu_ref_find(ref, scope, source);
250 list_del(&link->ll_linkage);
252 spin_unlock(&ref->lf_guard);
253 OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
255 REFASSERT(ref, ref->lf_failed > 0);
257 spin_unlock(&ref->lf_guard);
260 EXPORT_SYMBOL(lu_ref_del);
262 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
264 const void *source0, const void *source1)
266 spin_lock(&ref->lf_guard);
267 REFASSERT(ref, !IS_ERR_OR_NULL(link));
268 REFASSERT(ref, link->ll_ref == ref);
269 REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
270 link->ll_source = source1;
271 spin_unlock(&ref->lf_guard);
273 EXPORT_SYMBOL(lu_ref_set_at);
275 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
276 const char *scope, const void *source)
278 spin_lock(&ref->lf_guard);
279 REFASSERT(ref, !IS_ERR_OR_NULL(link));
280 REFASSERT(ref, link->ll_ref == ref);
281 REFASSERT(ref, lu_ref_link_eq(link, scope, source));
282 list_del(&link->ll_linkage);
284 spin_unlock(&ref->lf_guard);
286 EXPORT_SYMBOL(lu_ref_del_at);
288 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
290 struct lu_ref *ref = seq->private;
292 spin_lock(&lu_ref_refs_guard);
293 if (list_empty(&ref->lf_linkage))
295 spin_unlock(&lu_ref_refs_guard);
300 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
302 struct lu_ref *ref = p;
305 LASSERT(seq->private == p);
306 LASSERT(!list_empty(&ref->lf_linkage));
309 spin_lock(&lu_ref_refs_guard);
310 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
311 if (&next->lf_linkage == &lu_ref_refs)
314 list_move(&ref->lf_linkage, &next->lf_linkage);
315 spin_unlock(&lu_ref_refs_guard);
320 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
326 static int lu_ref_seq_show(struct seq_file *seq, void *p)
328 struct lu_ref *ref = p;
331 spin_lock(&lu_ref_refs_guard);
332 next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
333 if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
334 spin_unlock(&lu_ref_refs_guard);
338 /* print the entry */
339 spin_lock(&next->lf_guard);
340 seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
341 next, next->lf_refs, next->lf_failed,
342 next->lf_func, next->lf_line);
343 if (next->lf_refs > 64) {
344 seq_puts(seq, " too many references, skip\n");
346 struct lu_ref_link *link;
349 list_for_each_entry(link, &next->lf_list, ll_linkage)
350 seq_printf(seq, " #%d link: %s %p\n",
351 i++, link->ll_scope, link->ll_source);
353 spin_unlock(&next->lf_guard);
354 spin_unlock(&lu_ref_refs_guard);
359 static const struct seq_operations lu_ref_seq_ops = {
360 .start = lu_ref_seq_start,
361 .stop = lu_ref_seq_stop,
362 .next = lu_ref_seq_next,
363 .show = lu_ref_seq_show
366 static int lu_ref_seq_open(struct inode *inode, struct file *file)
368 struct lu_ref *marker = &lu_ref_marker;
371 result = seq_open(file, &lu_ref_seq_ops);
373 spin_lock(&lu_ref_refs_guard);
374 if (!list_empty(&marker->lf_linkage))
377 list_add(&marker->lf_linkage, &lu_ref_refs);
378 spin_unlock(&lu_ref_refs_guard);
381 struct seq_file *f = file->private_data;
385 seq_release(inode, file);
392 static int lu_ref_seq_release(struct inode *inode, struct file *file)
394 struct seq_file *m = file->private_data;
395 struct lu_ref *ref = m->private;
397 spin_lock(&lu_ref_refs_guard);
398 list_del_init(&ref->lf_linkage);
399 spin_unlock(&lu_ref_refs_guard);
401 return seq_release(inode, file);
404 static const struct file_operations lu_ref_dump_fops = {
405 .owner = THIS_MODULE,
406 .open = lu_ref_seq_open,
409 .release = lu_ref_seq_release
412 int lu_ref_global_init(void)
417 "lu_ref tracking is enabled. Performance isn't.\n");
419 result = lu_kmem_init(lu_ref_caches);
423 debugfs_create_file("lu_refs", 0444, debugfs_lustre_root,
424 NULL, &lu_ref_dump_fops);
429 void lu_ref_global_fini(void)
431 /* debugfs file gets cleaned up by debugfs_remove_recursive on
432 * debugfs_lustre_root
434 lu_kmem_fini(lu_ref_caches);
437 #endif /* CONFIG_LUSTRE_DEBUG_LU_REF */