From: Vitaly Fertman Date: Tue, 23 Nov 2010 21:03:08 +0000 (+0300) Subject: b=23460 remove expensive memory check X-Git-Tag: 2.1.57.0~31 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=19388c83fd9b430143387ba28276c68697bf369c b=23460 remove expensive memory check i=green remove cfs_mem_in_cache check when checking validity of pages and locks --- diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 897faf1..76ee194 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -2684,7 +2684,6 @@ cfs_page_t *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); -int cl_is_page (const void *addr); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2883,7 +2882,6 @@ void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); -int cl_is_lock (const void *addr); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index f842af3..0e24306 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -77,9 +77,7 @@ static struct lu_kmem_descr cl_lock_caches[] = { static int cl_lock_invariant_trusted(const struct lu_env *env, const struct cl_lock *lock) { - return - cl_is_lock(lock) && - ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && + return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds && lock->cll_holds >= lock->cll_users && lock->cll_holds >= 0 && @@ -261,7 +259,6 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) { struct cl_object *obj = lock->cll_descr.cld_obj; - LASSERT(cl_is_lock(lock)); LINVRNT(!cl_lock_is_mutexed(lock)); ENTRY; @@ -352,7 +349,6 @@ void cl_lock_get_trust(struct cl_lock *lock) { struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj); - LASSERT(cl_is_lock(lock)); CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n", cfs_atomic_read(&lock->cll_ref), lock, RETIP); if (cfs_atomic_inc_return(&lock->cll_ref) == 1) @@ -513,7 +509,6 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env, cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) { int matched; - LASSERT(cl_is_lock(lock)); matched = cl_lock_ext_match(&lock->cll_descr, need) && lock->cll_state < CLS_FREEING && lock->cll_error == 0 && @@ -2069,20 +2064,6 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) } EXPORT_SYMBOL(cl_locks_prune); -/** - * Returns true if \a addr is an address of an allocated cl_lock. Used in - * assertions. This check is optimistically imprecise, i.e., it occasionally - * returns true for the incorrect addresses, but if it returns false, then the - * address is guaranteed to be incorrect. (Should be named cl_lockp().) - * - * \see cl_is_page() - */ -int cl_is_lock(const void *addr) -{ - return cfs_mem_is_in_cache(addr, cl_lock_kmem); -} -EXPORT_SYMBOL(cl_is_lock); - static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index 76fa7ca..18c5ad3 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -99,7 +99,6 @@ static struct lu_kmem_descr cl_page_caches[] = { */ static struct cl_page *cl_page_top_trusted(struct cl_page *page) { - LASSERT(cl_is_page(page)); while (page->cp_parent != NULL) page = page->cp_parent; return page; @@ -118,7 +117,6 @@ static struct cl_page *cl_page_top_trusted(struct cl_page *page) */ static void cl_page_get_trust(struct cl_page *page) { - LASSERT(cl_is_page(page)); /* * Checkless version for trusted users. */ @@ -171,7 +169,6 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) page = radix_tree_lookup(&hdr->coh_tree, index); if (page != NULL) { - LASSERT(cl_is_page(page)); cl_page_get_trust(page); } return page; @@ -221,7 +218,6 @@ void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, idx = pvec[nr - 1]->cp_index + 1; for (i = 0, j = 0; i < nr; ++i) { page = pvec[i]; - PASSERT(env, page, cl_is_page(page)); pvec[i] = NULL; if (page->cp_index > end) break; @@ -288,7 +284,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_object *obj = page->cp_obj; struct cl_site *site = cl_object_site(obj); - PASSERT(env, page, cl_is_page(page)); PASSERT(env, page, cfs_list_empty(&page->cp_batch)); PASSERT(env, page, page->cp_owner == NULL); PASSERT(env, page, page->cp_req == NULL); @@ -535,7 +530,6 @@ static inline int cl_page_invariant(const struct cl_page *pg) struct cl_page *child; struct cl_io *owner; - LASSERT(cl_is_page(pg)); /* * Page invariant is protected by a VM lock. */ @@ -764,7 +758,7 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) } } cfs_spin_unlock(&hdr->coh_page_guard); - LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE)); + LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE)); RETURN(page); } EXPORT_SYMBOL(cl_vmpage_page); @@ -780,20 +774,6 @@ struct cl_page *cl_page_top(struct cl_page *page) } EXPORT_SYMBOL(cl_page_top); -/** - * Returns true if \a addr is an address of an allocated cl_page. Used in - * assertions. This check is optimistically imprecise, i.e., it occasionally - * returns true for the incorrect addresses, but if it returns false, then the - * address is guaranteed to be incorrect. (Should be named cl_pagep().) - * - * \see cl_is_lock() - */ -int cl_is_page(const void *addr) -{ - return cfs_mem_is_in_cache(addr, cl_page_kmem); -} -EXPORT_SYMBOL(cl_is_page); - const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype) {