struct cl_page *page);
struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj);
struct cl_page *cl_page_top (struct cl_page *page);
-int cl_is_page (const void *addr);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-int cl_is_lock (const void *addr);
unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
static int cl_lock_invariant_trusted(const struct lu_env *env,
const struct cl_lock *lock)
{
- return
- cl_is_lock(lock) &&
- ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
+ return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
lock->cll_holds >= lock->cll_users &&
lock->cll_holds >= 0 &&
{
struct cl_object *obj = lock->cll_descr.cld_obj;
- LASSERT(cl_is_lock(lock));
LINVRNT(!cl_lock_is_mutexed(lock));
ENTRY;
{
struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
- LASSERT(cl_is_lock(lock));
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
cfs_atomic_read(&lock->cll_ref), lock, RETIP);
if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
- LASSERT(cl_is_lock(lock));
matched = cl_lock_ext_match(&lock->cll_descr, need) &&
lock->cll_state < CLS_FREEING &&
lock->cll_error == 0 &&
}
EXPORT_SYMBOL(cl_locks_prune);
-/**
- * Returns true if \a addr is an address of an allocated cl_lock. Used in
- * assertions. This check is optimistically imprecise, i.e., it occasionally
- * returns true for the incorrect addresses, but if it returns false, then the
- * address is guaranteed to be incorrect. (Should be named cl_lockp().)
- *
- * \see cl_is_page()
- */
-int cl_is_lock(const void *addr)
-{
- return cfs_mem_is_in_cache(addr, cl_lock_kmem);
-}
-EXPORT_SYMBOL(cl_is_lock);
-
static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
const struct cl_io *io,
const struct cl_lock_descr *need,
*/
static struct cl_page *cl_page_top_trusted(struct cl_page *page)
{
- LASSERT(cl_is_page(page));
while (page->cp_parent != NULL)
page = page->cp_parent;
return page;
*/
static void cl_page_get_trust(struct cl_page *page)
{
- LASSERT(cl_is_page(page));
/*
* Checkless version for trusted users.
*/
page = radix_tree_lookup(&hdr->coh_tree, index);
if (page != NULL) {
- LASSERT(cl_is_page(page));
cl_page_get_trust(page);
}
return page;
idx = pvec[nr - 1]->cp_index + 1;
for (i = 0, j = 0; i < nr; ++i) {
page = pvec[i];
- PASSERT(env, page, cl_is_page(page));
pvec[i] = NULL;
if (page->cp_index > end)
break;
struct cl_object *obj = page->cp_obj;
struct cl_site *site = cl_object_site(obj);
- PASSERT(env, page, cl_is_page(page));
PASSERT(env, page, cfs_list_empty(&page->cp_batch));
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, page->cp_req == NULL);
struct cl_page *child;
struct cl_io *owner;
- LASSERT(cl_is_page(pg));
/*
* Page invariant is protected by a VM lock.
*/
}
}
cfs_spin_unlock(&hdr->coh_page_guard);
- LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
+ LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
RETURN(page);
}
EXPORT_SYMBOL(cl_vmpage_page);
}
EXPORT_SYMBOL(cl_page_top);
-/**
- * Returns true if \a addr is an address of an allocated cl_page. Used in
- * assertions. This check is optimistically imprecise, i.e., it occasionally
- * returns true for the incorrect addresses, but if it returns false, then the
- * address is guaranteed to be incorrect. (Should be named cl_pagep().)
- *
- * \see cl_is_lock()
- */
-int cl_is_page(const void *addr)
-{
- return cfs_mem_is_in_cache(addr, cl_page_kmem);
-}
-EXPORT_SYMBOL(cl_is_page);
-
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype)
{