X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Flu_ref.c;h=5d2fc065516a3cfdb48345f129bf706e6a581edf;hb=1ab4b0239bbd75b4c05f36b8d2cf04fb371b10c2;hp=73f8cc5ff076167602be792ec75662bbee63f319;hpb=ef6225af104b9138638c71b80e87786b8e5e75e5;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/lu_ref.c b/lustre/obdclass/lu_ref.c index 73f8cc5..5d2fc06 100644 --- a/lustre/obdclass/lu_ref.c +++ b/lustre/obdclass/lu_ref.c @@ -26,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -40,12 +42,7 @@ #define DEBUG_SUBSYSTEM S_CLASS -#ifdef __KERNEL__ -# include -#else -# include -#endif - +#include #include #include #include @@ -57,26 +54,19 @@ * Asserts a condition for a given lu_ref. Must be called with * lu_ref::lf_guard held. */ -#define REFASSERT(ref, expr) do { \ - struct lu_ref *__tmp = (ref); \ - \ - if (unlikely(!(expr))) { \ - lu_ref_print(__tmp); \ - cfs_spin_unlock(&__tmp->lf_guard); \ - lu_ref_print_all(); \ - LASSERT(0); \ - cfs_spin_lock(&__tmp->lf_guard); \ - } \ +#define REFASSERT(ref, expr) do { \ + struct lu_ref *__tmp = (ref); \ + \ + if (unlikely(!(expr))) { \ + lu_ref_print(__tmp); \ + spin_unlock(&__tmp->lf_guard); \ + lu_ref_print_all(); \ + LASSERT(0); \ + spin_lock(&__tmp->lf_guard); \ + } \ } while (0) -struct lu_ref_link { - struct lu_ref *ll_ref; - cfs_list_t ll_linkage; - const char *ll_scope; - const void *ll_source; -}; - -static cfs_mem_cache_t *lu_ref_link_kmem; +static struct kmem_cache *lu_ref_link_kmem; static struct lu_kmem_descr lu_ref_caches[] = { { @@ -94,12 +84,12 @@ static struct lu_kmem_descr lu_ref_caches[] = { * * Protected by lu_ref_refs_guard. */ -static CFS_LIST_HEAD(lu_ref_refs); -static cfs_spinlock_t lu_ref_refs_guard; +static struct list_head lu_ref_refs; +static spinlock_t lu_ref_refs_guard; static struct lu_ref lu_ref_marker = { - .lf_guard = DEFINE_SPINLOCK(lu_ref_marker.lf_guard), - .lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list), - .lf_linkage = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_linkage) + .lf_guard = __SPIN_LOCK_UNLOCKED(lu_ref_marker.lf_guard), + .lf_list = LIST_HEAD_INIT(lu_ref_marker.lf_list), + .lf_linkage = LIST_HEAD_INIT(lu_ref_marker.lf_linkage) }; void lu_ref_print(const struct lu_ref *ref) @@ -108,7 +98,7 @@ void lu_ref_print(const struct lu_ref *ref) CERROR("lu_ref: %p %d %d %s:%d\n", ref, ref->lf_refs, ref->lf_failed, ref->lf_func, ref->lf_line); - cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) { + list_for_each_entry(link, &ref->lf_list, ll_linkage) { CERROR(" link: %s %p\n", link->ll_scope, link->ll_source); } } @@ -121,46 +111,46 @@ static int lu_ref_is_marker(const struct lu_ref *ref) void lu_ref_print_all(void) { - struct lu_ref *ref; - - cfs_spin_lock(&lu_ref_refs_guard); - cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) { - if (lu_ref_is_marker(ref)) - continue; - - cfs_spin_lock(&ref->lf_guard); - lu_ref_print(ref); - cfs_spin_unlock(&ref->lf_guard); - } - cfs_spin_unlock(&lu_ref_refs_guard); + struct lu_ref *ref; + + spin_lock(&lu_ref_refs_guard); + list_for_each_entry(ref, &lu_ref_refs, lf_linkage) { + if (lu_ref_is_marker(ref)) + continue; + + spin_lock(&ref->lf_guard); + lu_ref_print(ref); + spin_unlock(&ref->lf_guard); + } + spin_unlock(&lu_ref_refs_guard); } EXPORT_SYMBOL(lu_ref_print_all); void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line) { - ref->lf_refs = 0; - ref->lf_func = func; - ref->lf_line = line; - cfs_spin_lock_init(&ref->lf_guard); - CFS_INIT_LIST_HEAD(&ref->lf_list); - cfs_spin_lock(&lu_ref_refs_guard); - cfs_list_add(&ref->lf_linkage, &lu_ref_refs); - cfs_spin_unlock(&lu_ref_refs_guard); + ref->lf_refs = 0; + ref->lf_func = func; + ref->lf_line = line; + spin_lock_init(&ref->lf_guard); + INIT_LIST_HEAD(&ref->lf_list); + spin_lock(&lu_ref_refs_guard); + list_add(&ref->lf_linkage, &lu_ref_refs); + spin_unlock(&lu_ref_refs_guard); } EXPORT_SYMBOL(lu_ref_init_loc); void lu_ref_fini(struct lu_ref *ref) { - REFASSERT(ref, cfs_list_empty(&ref->lf_list)); - REFASSERT(ref, ref->lf_refs == 0); - cfs_spin_lock(&lu_ref_refs_guard); - cfs_list_del_init(&ref->lf_linkage); - cfs_spin_unlock(&lu_ref_refs_guard); + REFASSERT(ref, list_empty(&ref->lf_list)); + REFASSERT(ref, ref->lf_refs == 0); + spin_lock(&lu_ref_refs_guard); + list_del_init(&ref->lf_linkage); + spin_unlock(&lu_ref_refs_guard); } EXPORT_SYMBOL(lu_ref_fini); static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref, - enum cfs_alloc_flags flags, + int flags, const char *scope, const void *source) { @@ -173,38 +163,50 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref, link->ll_ref = ref; link->ll_scope = scope; link->ll_source = source; - cfs_spin_lock(&ref->lf_guard); - cfs_list_add_tail(&link->ll_linkage, &ref->lf_list); - ref->lf_refs++; - cfs_spin_unlock(&ref->lf_guard); - } - } - - if (link == NULL) { - cfs_spin_lock(&ref->lf_guard); - ref->lf_failed++; - cfs_spin_unlock(&ref->lf_guard); - link = ERR_PTR(-ENOMEM); - } - - return link; + spin_lock(&ref->lf_guard); + list_add_tail(&link->ll_linkage, &ref->lf_list); + ref->lf_refs++; + spin_unlock(&ref->lf_guard); + } + } + + if (link == NULL) { + spin_lock(&ref->lf_guard); + ref->lf_failed++; + spin_unlock(&ref->lf_guard); + link = ERR_PTR(-ENOMEM); + } + + return link; } -struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope, - const void *source) +void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source) { - cfs_might_sleep(); - return lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source); + might_sleep(); + lu_ref_add_context(ref, GFP_IOFS, scope, source); } EXPORT_SYMBOL(lu_ref_add); +void lu_ref_add_at(struct lu_ref *ref, struct lu_ref_link *link, + const char *scope, const void *source) +{ + link->ll_ref = ref; + link->ll_scope = scope; + link->ll_source = source; + spin_lock(&ref->lf_guard); + list_add_tail(&link->ll_linkage, &ref->lf_list); + ref->lf_refs++; + spin_unlock(&ref->lf_guard); +} +EXPORT_SYMBOL(lu_ref_add_at); + /** * Version of lu_ref_add() to be used in non-blockable contexts. */ -struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref, const char *scope, - const void *source) +void lu_ref_add_atomic(struct lu_ref *ref, const char *scope, + const void *source) { - return lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source); + lu_ref_add_context(ref, GFP_ATOMIC, scope, source); } EXPORT_SYMBOL(lu_ref_add_atomic); @@ -229,7 +231,7 @@ static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope, unsigned iterations; iterations = 0; - cfs_list_for_each_entry(link, &ref->lf_list, ll_linkage) { + list_for_each_entry(link, &ref->lf_list, ll_linkage) { ++iterations; if (lu_ref_link_eq(link, scope, source)) { if (iterations > lu_ref_chain_max_length) { @@ -245,71 +247,62 @@ static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope, void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source) { - struct lu_ref_link *link; - - cfs_spin_lock(&ref->lf_guard); - link = lu_ref_find(ref, scope, source); - if (link != NULL) { - cfs_list_del(&link->ll_linkage); - ref->lf_refs--; - cfs_spin_unlock(&ref->lf_guard); - OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link)); - } else { - REFASSERT(ref, ref->lf_failed > 0); - ref->lf_failed--; - cfs_spin_unlock(&ref->lf_guard); - } + struct lu_ref_link *link; + + spin_lock(&ref->lf_guard); + link = lu_ref_find(ref, scope, source); + if (link != NULL) { + list_del(&link->ll_linkage); + ref->lf_refs--; + spin_unlock(&ref->lf_guard); + OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link)); + } else { + REFASSERT(ref, ref->lf_failed > 0); + ref->lf_failed--; + spin_unlock(&ref->lf_guard); + } } EXPORT_SYMBOL(lu_ref_del); void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link, - const char *scope, - const void *source0, const void *source1) + const char *scope, + const void *source0, const void *source1) { - cfs_spin_lock(&ref->lf_guard); - if (link != ERR_PTR(-ENOMEM)) { - REFASSERT(ref, link->ll_ref == ref); - REFASSERT(ref, lu_ref_link_eq(link, scope, source0)); - link->ll_source = source1; - } else { - REFASSERT(ref, ref->lf_failed > 0); - } - cfs_spin_unlock(&ref->lf_guard); + REFASSERT(ref, link != NULL && !IS_ERR(link)); + + spin_lock(&ref->lf_guard); + REFASSERT(ref, link->ll_ref == ref); + REFASSERT(ref, lu_ref_link_eq(link, scope, source0)); + link->ll_source = source1; + spin_unlock(&ref->lf_guard); } EXPORT_SYMBOL(lu_ref_set_at); void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link, - const char *scope, const void *source) + const char *scope, const void *source) { - if (link != ERR_PTR(-ENOMEM)) { - cfs_spin_lock(&ref->lf_guard); - REFASSERT(ref, link->ll_ref == ref); - REFASSERT(ref, lu_ref_link_eq(link, scope, source)); - cfs_list_del(&link->ll_linkage); - ref->lf_refs--; - cfs_spin_unlock(&ref->lf_guard); - OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link)); - } else { - cfs_spin_lock(&ref->lf_guard); - REFASSERT(ref, ref->lf_failed > 0); - ref->lf_failed--; - cfs_spin_unlock(&ref->lf_guard); - } + REFASSERT(ref, link != NULL && !IS_ERR(link)); + spin_lock(&ref->lf_guard); + REFASSERT(ref, link->ll_ref == ref); + REFASSERT(ref, lu_ref_link_eq(link, scope, source)); + list_del(&link->ll_linkage); + ref->lf_refs--; + spin_unlock(&ref->lf_guard); } EXPORT_SYMBOL(lu_ref_del_at); -#if defined(__KERNEL__) && defined(LPROCFS) +#ifdef LPROCFS static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos) { - struct lu_ref *ref = seq->private; + struct lu_ref *ref = seq->private; - cfs_spin_lock(&lu_ref_refs_guard); - if (cfs_list_empty(&ref->lf_linkage)) - ref = NULL; - cfs_spin_unlock(&lu_ref_refs_guard); + spin_lock(&lu_ref_refs_guard); + if (list_empty(&ref->lf_linkage)) + ref = NULL; + spin_unlock(&lu_ref_refs_guard); - return ref; + return ref; } static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos) @@ -318,18 +311,18 @@ static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos) struct lu_ref *next; LASSERT(seq->private == p); - LASSERT(!cfs_list_empty(&ref->lf_linkage)); - - cfs_spin_lock(&lu_ref_refs_guard); - next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); - if (&next->lf_linkage == &lu_ref_refs) { - p = NULL; - } else { - (*pos)++; - cfs_list_move(&ref->lf_linkage, &next->lf_linkage); - } - cfs_spin_unlock(&lu_ref_refs_guard); - return p; + LASSERT(!list_empty(&ref->lf_linkage)); + + spin_lock(&lu_ref_refs_guard); + next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); + if (&next->lf_linkage == &lu_ref_refs) { + p = NULL; + } else { + (*pos)++; + list_move(&ref->lf_linkage, &next->lf_linkage); + } + spin_unlock(&lu_ref_refs_guard); + return p; } static void lu_ref_seq_stop(struct seq_file *seq, void *p) @@ -340,19 +333,18 @@ static void lu_ref_seq_stop(struct seq_file *seq, void *p) static int lu_ref_seq_show(struct seq_file *seq, void *p) { - struct lu_ref *ref = p; - struct lu_ref *next; - - cfs_spin_lock(&lu_ref_refs_guard); - next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); - if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) { - cfs_spin_unlock(&lu_ref_refs_guard); - return 0; - } - - /* print the entry */ - - cfs_spin_lock(&next->lf_guard); + struct lu_ref *ref = p; + struct lu_ref *next; + + spin_lock(&lu_ref_refs_guard); + next = list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage); + if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) { + spin_unlock(&lu_ref_refs_guard); + return 0; + } + + /* print the entry */ + spin_lock(&next->lf_guard); seq_printf(seq, "lu_ref: %p %d %d %s:%d\n", next, next->lf_refs, next->lf_failed, next->lf_func, next->lf_line); @@ -362,14 +354,14 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p) struct lu_ref_link *link; int i = 0; - cfs_list_for_each_entry(link, &next->lf_list, ll_linkage) + list_for_each_entry(link, &next->lf_list, ll_linkage) seq_printf(seq, " #%d link: %s %p\n", i++, link->ll_scope, link->ll_source); } - cfs_spin_unlock(&next->lf_guard); - cfs_spin_unlock(&lu_ref_refs_guard); + spin_unlock(&next->lf_guard); + spin_unlock(&lu_ref_refs_guard); - return 0; + return 0; } static struct seq_operations lu_ref_seq_ops = { @@ -381,17 +373,17 @@ static struct seq_operations lu_ref_seq_ops = { static int lu_ref_seq_open(struct inode *inode, struct file *file) { - struct lu_ref *marker = &lu_ref_marker; - int result = 0; - - result = seq_open(file, &lu_ref_seq_ops); - if (result == 0) { - cfs_spin_lock(&lu_ref_refs_guard); - if (!cfs_list_empty(&marker->lf_linkage)) - result = -EAGAIN; - else - cfs_list_add(&marker->lf_linkage, &lu_ref_refs); - cfs_spin_unlock(&lu_ref_refs_guard); + struct lu_ref *marker = &lu_ref_marker; + int result = 0; + + result = seq_open(file, &lu_ref_seq_ops); + if (result == 0) { + spin_lock(&lu_ref_refs_guard); + if (!list_empty(&marker->lf_linkage)) + result = -EAGAIN; + else + list_add(&marker->lf_linkage, &lu_ref_refs); + spin_unlock(&lu_ref_refs_guard); if (result == 0) { struct seq_file *f = file->private_data; @@ -406,13 +398,13 @@ static int lu_ref_seq_open(struct inode *inode, struct file *file) static int lu_ref_seq_release(struct inode *inode, struct file *file) { - struct lu_ref *ref = ((struct seq_file *)file->private_data)->private; + struct lu_ref *ref = ((struct seq_file *)file->private_data)->private; - cfs_spin_lock(&lu_ref_refs_guard); - cfs_list_del_init(&ref->lf_linkage); - cfs_spin_unlock(&lu_ref_refs_guard); + spin_lock(&lu_ref_refs_guard); + list_del_init(&ref->lf_linkage); + spin_unlock(&lu_ref_refs_guard); - return seq_release(inode, file); + return seq_release(inode, file); } static struct file_operations lu_ref_dump_fops = { @@ -423,36 +415,36 @@ static struct file_operations lu_ref_dump_fops = { .release = lu_ref_seq_release }; -#endif +#endif /* LPROCFS */ int lu_ref_global_init(void) { - int result; - - CDEBUG(D_CONSOLE, - "lu_ref tracking is enabled. Performance isn't.\n"); + int result; + CDEBUG(D_CONSOLE, + "lu_ref tracking is enabled. Performance isn't.\n"); - cfs_spin_lock_init(&lu_ref_refs_guard); + INIT_LIST_HEAD(&lu_ref_refs); + spin_lock_init(&lu_ref_refs_guard); result = lu_kmem_init(lu_ref_caches); -#if defined(__KERNEL__) && defined(LPROCFS) +#ifdef LPROCFS if (result == 0) { result = lprocfs_seq_create(proc_lustre_root, "lu_refs", 0444, &lu_ref_dump_fops, NULL); if (result) lu_kmem_fini(lu_ref_caches); } -#endif +#endif /* LPROCFS */ return result; } void lu_ref_global_fini(void) { -#if defined(__KERNEL__) && defined(LPROCFS) +#ifdef LPROCFS lprocfs_remove_proc_entry("lu_refs", proc_lustre_root); -#endif +#endif /* LPROCFS */ lu_kmem_fini(lu_ref_caches); }