/* lock reference taken by ldlm_handle2lock_long() is
* owned by osc_lock and released in osc_lock_detach()
*/
- lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ lu_ref_add_atomic(&dlmlock->l_reference, "osc_lock", oscl);
oscl->ols_has_ref = 1;
LASSERT(oscl->ols_dlmlock == NULL);
matchdata.lmd_mode = &mode;
matchdata.lmd_policy = &policy;
matchdata.lmd_flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING;
- matchdata.lmd_unref = 1;
- matchdata.lmd_has_ast_data = true;
+ matchdata.lmd_match = LDLM_MATCH_UNREF | LDLM_MATCH_AST_ANY;
LDLM_LOCK_GET(dlmlock);
cl_object_get(obj);
}
unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
+ LDLM_LOCK_RELEASE(dlmlock);
dlmlock = NULL;
}
EXPORT_SYMBOL(osc_ldlm_glimpse_ast);
-static int weigh_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
+static bool weigh_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
{
struct cl_page *page = ops->ops_cl.cpl_page;
if (cl_page_is_vmlocked(env, page) || PageDirty(page->cp_vmpage) ||
PageWriteback(page->cp_vmpage))
- return CLP_GANG_ABORT;
+ return false;
*(pgoff_t *)cbdata = osc_index(ops) + 1;
- return CLP_GANG_OKAY;
+ return true;
}
static unsigned long osc_lock_weight(const struct lu_env *env,
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (result != 0)
- RETURN(result);
+ RETURN(1);
page_index = cl_index(obj, start);
- do {
- result = osc_page_gang_lookup(env, io, oscobj,
- page_index, cl_index(obj, end),
- weigh_cb, (void *)&page_index);
- if (result == CLP_GANG_ABORT)
- break;
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
+
+ if (!osc_page_gang_lookup(env, io, oscobj,
+ page_index, cl_index(obj, end),
+ weigh_cb, (void *)&page_index))
+ result = 1;
cl_io_fini(env, io);
- return result == CLP_GANG_ABORT ? 1 : 0;
+ return result;
}
/**
unlock_res_and_lock(dlmlock);
if (obj == NULL)
- GOTO(out, weight = 1);
+ GOTO(out, weight = 0);
spin_lock(&obj->oo_ol_spin);
list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
RETURN(0);
if ((oscl->ols_flags & LDLM_FL_NO_EXPANSION) &&
- !(exp_connect_lockahead_old(exp) || exp_connect_lockahead(exp))) {
+ !exp_connect_lockahead(exp)) {
result = -EOPNOTSUPP;
- CERROR("%s: server does not support lockahead/locknoexpand:"
- "rc = %d\n", exp->exp_obd->obd_name, result);
+ CERROR("%s: server does not support lockahead/locknoexpand: rc = %d\n",
+ exp->exp_obd->obd_name, result);
RETURN(result);
}
struct ldlm_lock *lock = NULL;
enum ldlm_mode mode;
__u64 flags;
+ enum ldlm_match_flags match_flags = 0;
ENTRY;
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
if (dap_flags & OSC_DAP_FL_TEST_LOCK)
flags |= LDLM_FL_TEST_LOCK;
+
+ if (dap_flags & OSC_DAP_FL_AST)
+ match_flags |= LDLM_MATCH_AST;
+
+ if (dap_flags & OSC_DAP_FL_CANCELING)
+ match_flags |= LDLM_MATCH_UNREF;
+
+ if (dap_flags & OSC_DAP_FL_RIGHT)
+ match_flags |= LDLM_MATCH_RIGHT;
+
/*
* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too
again:
mode = osc_match_base(env, osc_export(obj), resname, LDLM_EXTENT,
policy, LCK_PR | LCK_PW | LCK_GROUP, &flags,
- obj, &lockh, dap_flags & OSC_DAP_FL_CANCELING);
+ obj, &lockh, match_flags);
if (mode != 0) {
lock = ldlm_handle2lock(&lockh);
/* RACE: the lock is cancelled so let's try again */