static const struct cl_lock_operations osc_lock_lockless_ops;
static void osc_lock_to_lockless(const struct lu_env *env,
struct osc_lock *ols, int force);
+static int osc_lock_has_pages(struct osc_lock *olck);
int osc_lock_is_lockless(const struct osc_lock *olck)
{
const struct cl_lock_descr *d = &lock->cll_descr;
osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
+ policy->l_extent.gid = d->cld_gid;
}
static int osc_enq2ldlm_flags(__u32 enqflags)
descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
descr->cld_start = cl_index(descr->cld_obj, ext->start);
descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
/*
* tell upper layers the extent of the lock that was actually
* granted
continue;
/* overlapped and living locks. */
+
+ /* We're not supposed to give up group lock. */
+ if (scan->cll_descr.cld_mode == CLM_GROUP) {
+ LASSERT(descr->cld_mode != CLM_GROUP ||
+ descr->cld_gid != scan->cll_descr.cld_gid);
+ continue;
+ }
+
/* A tricky case for lockless pages:
* We need to cancel the compatible locks if we're enqueuing
* a lockless lock, for example:
lock = slice->cls_lock;
LASSERT(lock->cll_state == CLS_CACHED);
LASSERT(lock->cll_users > 0);
- LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
/* set a flag for osc_dlm_blocking_ast0() to signal the
* lock.*/
olck->ols_ast_wait = 1;
cl_env_nested_put(&nest, env);
} else
result = PTR_ERR(env);
- if (result == 0)
+ if (result == 0) {
ols->ols_flush = 1;
+ LINVRNT(!osc_lock_has_pages(ols));
+ }
return result;
}
return result;
}
#else
-# define osc_lock_has_pages(olck) (0)
+static int osc_lock_has_pages(struct osc_lock *olck)
+{
+ return 0;
+}
#endif /* INVARIANT_CHECK */
static void osc_lock_delete(const struct lu_env *env,
struct osc_lock *olck;
olck = cl2osc_lock(slice);
+ if (olck->ols_glimpse) {
+ LASSERT(!olck->ols_hold);
+ LASSERT(!olck->ols_lock);
+ return;
+ }
+
LINVRNT(osc_lock_invariant(olck));
LINVRNT(!osc_lock_has_pages(olck));