prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
EXIT;
- return;
}
/**
}
/**
- * Describe the overlap between two locks. itree_overlap_cb data.
- */
-struct lock_match_data {
- struct ldlm_lock *lmd_old;
- struct ldlm_lock *lmd_lock;
- enum ldlm_mode *lmd_mode;
- union ldlm_policy_data *lmd_policy;
- __u64 lmd_flags;
- __u64 lmd_skip_flags;
- int lmd_unref;
-};
-
-/**
* Check if the given @lock meets the criteria for a match.
* A reference on the lock is taken if matched.
*
* \param lock test-against this lock
* \param data parameters
*/
-static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
+static int lock_matches(struct ldlm_lock *lock, struct ldlm_match_data *data)
{
union ldlm_policy_data *lpol = &lock->l_policy_data;
- enum ldlm_mode match;
+ enum ldlm_mode match = LCK_MINMODE;
if (lock == data->lmd_old)
return INTERVAL_ITER_STOP;
if (!(lock->l_req_mode & *data->lmd_mode))
return INTERVAL_ITER_CONT;
+
+ /* When we search for ast_data, we are not doing a traditional match,
+ * so we don't worry about IBITS or extent matching.
+ */
+ if (data->lmd_has_ast_data) {
+ if (!lock->l_ast_data)
+ return INTERVAL_ITER_CONT;
+
+ goto matched;
+ }
+
match = lock->l_req_mode;
switch (lock->l_resource->lr_type) {
if (data->lmd_skip_flags & lock->l_flags)
return INTERVAL_ITER_CONT;
+matched:
if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
LDLM_LOCK_GET(lock);
ldlm_lock_touch_in_lru(lock);
static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
{
struct ldlm_interval *node = to_ldlm_interval(in);
- struct lock_match_data *data = args;
+ struct ldlm_match_data *data = args;
struct ldlm_lock *lock;
int rc;
*
* \retval a referenced lock or NULL.
*/
-static struct ldlm_lock *search_itree(struct ldlm_resource *res,
- struct lock_match_data *data)
+struct ldlm_lock *search_itree(struct ldlm_resource *res,
+ struct ldlm_match_data *data)
{
struct interval_node_extent ext = {
.start = data->lmd_policy->l_extent.start,
return NULL;
}
+EXPORT_SYMBOL(search_itree);
/**
* \retval a referenced lock or NULL.
*/
static struct ldlm_lock *search_queue(struct list_head *queue,
- struct lock_match_data *data)
+ struct ldlm_match_data *data)
{
struct ldlm_lock *lock;
int rc;
enum ldlm_mode mode,
struct lustre_handle *lockh, int unref)
{
- struct lock_match_data data = {
+ struct ldlm_match_data data = {
.lmd_old = NULL,
.lmd_lock = NULL,
.lmd_mode = &mode,
.lmd_flags = flags,
.lmd_skip_flags = skip_flags,
.lmd_unref = unref,
+ .lmd_has_ast_data = false,
};
struct ldlm_resource *res;
struct ldlm_lock *lock;
{
struct ldlm_resource *res = lock->l_resource;
enum ldlm_error rc = ELDLM_OK;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
ldlm_processing_policy policy;
ENTRY;
__u64 flags;
int rc = LDLM_ITER_CONTINUE;
enum ldlm_error err;
- struct list_head bl_ast_list = LIST_HEAD_INIT(bl_ast_list);
+ LIST_HEAD(bl_ast_list);
ENTRY;
restart:
list_for_each_safe(tmp, pos, queue) {
struct ldlm_lock *pending;
- struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
+ LIST_HEAD(rpc_list);
pending = list_entry(tmp, struct ldlm_lock, l_res_link);
if (!ldlm_is_cancel(lock)) {
ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
- unlock_res_and_lock(lock);
- lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
- LDLM_CB_CANCELING);
- lock_res_and_lock(lock);
- } else {
- LDLM_DEBUG(lock, "no blocking ast");
- }
+ unlock_res_and_lock(lock);
+ lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
+ LDLM_CB_CANCELING);
+ lock_res_and_lock(lock);
+ } else {
+ LDLM_DEBUG(lock, "no blocking ast");
+ }
/* only canceller can set bl_done bit */
ldlm_set_bl_done(lock);
wake_up_all(&lock->l_waitq);
} else if (!ldlm_is_bl_done(lock)) {
- struct l_wait_info lwi = { 0 };
-
/* The lock is guaranteed to have been canceled once
* returning from this function. */
unlock_res_and_lock(lock);
- l_wait_event(lock->l_waitq, is_bl_done(lock), &lwi);
+ wait_event_idle(lock->l_waitq, is_bl_done(lock));
lock_res_and_lock(lock);
}
}
}
/**
- * Downgrade an PW/EX lock to COS mode.
+ * Downgrade an PW/EX lock to COS | CR mode.
*
* A lock mode convertion from PW/EX mode to less conflict mode. The
* convertion may fail if lock was canceled before downgrade, but it doesn't
* things are cleared, so any pending or new blocked lock on that lock will
* cause new call to blocking_ast and force resource object commit.
*
+ * Also used by layout_change to replace EX lock to CR lock.
+ *
* \param lock A lock to convert
* \param new_mode new lock mode
*/
#ifdef HAVE_SERVER_SUPPORT
ENTRY;
- LASSERT(new_mode == LCK_COS);
+ LASSERT(new_mode == LCK_COS || new_mode == LCK_CR);
lock_res_and_lock(lock);