struct list_head l_exp_list;
};
+enum ldlm_match_flags {
+ LDLM_MATCH_UNREF = BIT(0),
+ LDLM_MATCH_AST = BIT(1),
+ LDLM_MATCH_AST_ANY = BIT(2),
+};
+
/**
* Describe the overlap between two locks. itree_overlap_cb data.
*/
union ldlm_policy_data *lmd_policy;
__u64 lmd_flags;
__u64 lmd_skip_flags;
- int lmd_unref;
- bool lmd_has_ast_data;
+ enum ldlm_match_flags lmd_match;
};
/** For uncommitted cross-MDT lock, store transno this lock belongs to */
void ldlm_lock_fail_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
+
enum ldlm_mode ldlm_lock_match_with_skip(struct ldlm_namespace *ns,
__u64 flags, __u64 skip_flags,
const struct ldlm_res_id *res_id,
union ldlm_policy_data *policy,
enum ldlm_mode mode,
struct lustre_handle *lh,
- int unref);
+ enum ldlm_match_flags match_flags);
static inline enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns,
__u64 flags,
const struct ldlm_res_id *res_id,
enum ldlm_type type,
union ldlm_policy_data *policy,
enum ldlm_mode mode,
- struct lustre_handle *lh,
- int unref)
+ struct lustre_handle *lh)
{
return ldlm_lock_match_with_skip(ns, flags, 0, res_id, type, policy,
- mode, lh, unref);
+ mode, lh, 0);
}
struct ldlm_lock *search_itree(struct ldlm_resource *res,
struct ldlm_match_data *data);
* Return the lock even if it is being canceled.
*/
OSC_DAP_FL_CANCELING = BIT(1),
+ /**
+ * check ast data is present, requested to cancel cb
+ */
+ OSC_DAP_FL_AST = BIT(2),
};
/*
if (ldlm_is_cbpending(lock) &&
!(data->lmd_flags & LDLM_FL_CBPENDING))
return INTERVAL_ITER_CONT;
- if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+ if (!(data->lmd_match & LDLM_MATCH_UNREF) && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
return INTERVAL_ITER_CONT;
/* When we search for ast_data, we are not doing a traditional match,
* so we don't worry about IBITS or extent matching.
*/
- if (data->lmd_has_ast_data) {
+ if (data->lmd_match & (LDLM_MATCH_AST | LDLM_MATCH_AST_ANY)) {
if (!lock->l_ast_data)
return INTERVAL_ITER_CONT;
- goto matched;
+ if (data->lmd_match & LDLM_MATCH_AST_ANY)
+ goto matched;
}
match = lock->l_req_mode;
/* We match if we have existing lock with same or wider set
of bits. */
- if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+ if (!(data->lmd_match & LDLM_MATCH_UNREF) && LDLM_HAVE_MASK(lock, GONE))
return INTERVAL_ITER_CONT;
if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
enum ldlm_type type,
union ldlm_policy_data *policy,
enum ldlm_mode mode,
- struct lustre_handle *lockh, int unref)
+ struct lustre_handle *lockh,
+ enum ldlm_match_flags match_flags)
{
struct ldlm_match_data data = {
.lmd_old = NULL,
.lmd_policy = policy,
.lmd_flags = flags,
.lmd_skip_flags = skip_flags,
- .lmd_unref = unref,
- .lmd_has_ast_data = false,
+ .lmd_match = match_flags,
};
struct ldlm_resource *res;
struct ldlm_lock *lock;
struct ldlm_res_id *res_id, enum ldlm_type type,
union ldlm_policy_data *policy, enum ldlm_mode mode,
__u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh, int unref)
+ struct lustre_handle *lockh,
+ enum ldlm_match_flags match_flags)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
ENTRY;
- rc = ldlm_lock_match(obd->obd_namespace, lflags,
- res_id, type, policy, mode, lockh, unref);
+ rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
+ res_id, type, policy, mode, lockh, match_flags);
+
if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
RETURN(rc);
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
__u64 flags;
+ enum ldlm_match_flags match_flags = 0;
ENTRY;
if (dap_flags & OSC_DAP_FL_TEST_LOCK)
flags |= LDLM_FL_TEST_LOCK;
+ if (dap_flags & OSC_DAP_FL_AST)
+ match_flags |= LDLM_MATCH_AST;
+
+ if (dap_flags & OSC_DAP_FL_CANCELING)
+ match_flags |= LDLM_MATCH_UNREF;
+
again:
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
* writers can share a single PW lock. */
mode = mdc_dom_lock_match(env, osc_export(obj), resname, LDLM_IBITS,
policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
- obj, &lockh,
- dap_flags & OSC_DAP_FL_CANCELING);
+ obj, &lockh, match_flags);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */
/* refresh non-overlapped index */
tmp = mdc_dlmlock_at_pgoff(env, osc, index,
- OSC_DAP_FL_TEST_LOCK);
+ OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
if (tmp != NULL) {
info->oti_fn_index = CL_PAGE_EOF;
LDLM_LOCK_PUT(tmp);
* such locks should be skipped.
*/
mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh, 0);
+ einfo->ei_type, policy, mode, &lockh);
if (mode) {
struct ldlm_lock *matched;
/* LU-4405: Clear bits not supported by server */
policy->l_inodebits.bits &= exp_connect_ibits(exp);
rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
- &res_id, type, policy, mode, lockh, 0);
+ &res_id, type, policy, mode, lockh);
RETURN(rc);
}
memcpy(&old_lock, lockh, sizeof(*lockh));
if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
- LDLM_IBITS, &policy, LCK_NL, &old_lock, 0)) {
+ LDLM_IBITS, &policy, LCK_NL, &old_lock)) {
ldlm_lock_decref_and_cancel(lockh, it->it_lock_mode);
memcpy(lockh, &old_lock, sizeof(old_lock));
it->it_lock_handle = lockh->cookie;
policy.l_inodebits.bits = MDS_INODELOCK_DOM;
mode = ldlm_lock_match(ns, LDLM_FL_TEST_LOCK,
&res->lr_name, LDLM_IBITS, &policy,
- LCK_PW, &lockh, 0);
+ LCK_PW, &lockh);
/* There is no PW lock on this object; finished. */
if (mode == 0)
lm = (open_flags & MDS_FMODE_WRITE) ? LCK_PW : LCK_PR | LCK_PW;
mode = ldlm_lock_match(mdt->mdt_namespace, LDLM_FL_BLOCK_GRANTED |
LDLM_FL_TEST_LOCK, res_id, LDLM_IBITS, policy,
- lm, &lockh, 0);
+ lm, &lockh);
/* There is no other PW lock on this object; finished. */
if (mode == 0)
/* refresh non-overlapped index */
tmp = osc_dlmlock_at_pgoff(env, osc, index,
- OSC_DAP_FL_TEST_LOCK);
+ OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_AST);
if (tmp != NULL) {
__u64 end = tmp->l_policy_data.l_extent.end;
/* Cache the first-non-overlapped index so as to skip
struct ldlm_res_id *res_id, enum ldlm_type type,
union ldlm_policy_data *policy, enum ldlm_mode mode,
__u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh, int unref);
+ struct lustre_handle *lockh, enum ldlm_match_flags match_flags);
int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
obd_enqueue_update_f upcall, void *cookie,
matchdata.lmd_mode = &mode;
matchdata.lmd_policy = &policy;
matchdata.lmd_flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING;
- matchdata.lmd_unref = 1;
- matchdata.lmd_has_ast_data = true;
+ matchdata.lmd_match = LDLM_MATCH_UNREF | LDLM_MATCH_AST_ANY;
LDLM_LOCK_GET(dlmlock);
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
__u64 flags;
+ enum ldlm_match_flags match_flags = 0;
ENTRY;
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
if (dap_flags & OSC_DAP_FL_TEST_LOCK)
flags |= LDLM_FL_TEST_LOCK;
+
+ if (dap_flags & OSC_DAP_FL_AST)
+ match_flags |= LDLM_MATCH_AST;
+
+ if (dap_flags & OSC_DAP_FL_CANCELING)
+ match_flags |= LDLM_MATCH_UNREF;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too
again:
mode = osc_match_base(env, osc_export(obj), resname, LDLM_EXTENT,
policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
- obj, &lockh, dap_flags & OSC_DAP_FL_CANCELING);
+ obj, &lockh, match_flags);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
&resid, LDLM_EXTENT, &policy,
- LCK_PR | LCK_PW, &lockh, 0);
+ LCK_PR | LCK_PW, &lockh);
if (mode) { /* lock is cached on client */
if (mode != LCK_PR) {
ldlm_lock_addref(&lockh, LCK_PR);
if (intent != 0)
match_flags |= LDLM_FL_BLOCK_GRANTED;
mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh, 0);
+ einfo->ei_type, policy, mode, &lockh);
if (mode) {
struct ldlm_lock *matched;
struct ldlm_res_id *res_id, enum ldlm_type type,
union ldlm_policy_data *policy, enum ldlm_mode mode,
__u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh, int unref)
+ struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
policy->l_extent.end |= ~PAGE_MASK;
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
+ /* Next, search for already existing extent locks that will cover us */
+ /* If we're trying to read, we also search for an existing PW lock. The
+ * VFS and page cache already protect us locally, so lots of readers/
* writers can share a single PW lock. */
- rc = mode;
- if (mode == LCK_PR)
- rc |= LCK_PW;
- rc = ldlm_lock_match(obd->obd_namespace, lflags,
- res_id, type, policy, rc, lockh, unref);
+ rc = mode;
+ if (mode == LCK_PR)
+ rc |= LCK_PW;
+
+ rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
+ res_id, type, policy, rc, lockh,
+ match_flags);
if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
RETURN(rc);