if (ok) {
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
+ cl_lock_put(env, lock);
}
cl_lock_mutex_put(env, lock);
if (!ok) {
* and head->coh_nesting == 1 check assumes two level top-sub
* hierarchy.
*/
- LASSERT(ergo(head->coh_nesting == 1 &&
- list_empty(&head->coh_locks), !head->coh_pages));
+ /*
+ * The count of pages of this object may NOT be zero because
+ * we don't cleanup the pages if they are in CPS_FREEING state.
+ * See cl_page_gang_lookup().
+ *
+ * It is safe to leave the CPS_FREEING pages in cache w/o
+ * a lock, because those page must not be uptodate.
+ * See cl_page_delete0 for details.
+ */
+ /* LASSERT(!ergo(head->coh_nesting == 1 &&
+ list_empty(&head->coh_locks), !head->coh_pages)); */
spin_unlock(&head->coh_lock_guard);
/*
* From now on, no new references to this lock can be acquired
list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
cl_lock_ext_match(&scan->cll_descr, need) &&
+ scan->cll_state >= CLS_HELD &&
scan->cll_state < CLS_FREEING &&
/*
* This check is racy as the lock can be canceled right