long delay;
int result;
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
if (ns_is_client(ldlm_lock_to_ns(lock)) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
int do_ast;
ENTRY;
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
do_ast = (!lock->l_readers && !lock->l_writers);
unlock_res_and_lock(lock);
/* NB: we don't have any lock now (lock_res_and_lock)
* because it's a new lock */
ldlm_lock_addref_internal_nolock(lock, mode);
- lock->l_flags |= LDLM_FL_LOCAL;
+ ldlm_set_local(lock);
if (*flags & LDLM_FL_ATOMIC_CB)
- lock->l_flags |= LDLM_FL_ATOMIC_CB;
+ ldlm_set_atomic_cb(lock);
if (policy != NULL)
lock->l_policy_data = *policy;
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
- /* Make sure that this lock will not be found by raced
- * bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
- need_cancel = 1;
+ !ldlm_is_failed(lock)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * b=17645*/
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+ need_cancel = 1;
}
unlock_res_and_lock(lock);
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well */
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
* bug 7311). */
(LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
}
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
- ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
- unlock_res_and_lock(lock);
+ ldlm_cancel_callback(lock);
+ rc = (ldlm_is_bl_ast(lock)) ?
+ LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ unlock_res_and_lock(lock);
if (local_only) {
CDEBUG(D_DLMTRACE, "not sending request (at caller's "
int unused, int added,
int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
- lock_res_and_lock(lock);
-
- /* don't check added & count since we want to process all locks
- * from unused list */
- switch (lock->l_resource->lr_type) {
- case LDLM_EXTENT:
- case LDLM_IBITS:
- if (cb && cb(lock))
- break;
- default:
- result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
- break;
- }
+ ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
+ ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+
+ /* don't check added & count since we want to process all locks
+ * from unused list.
+ * It's fine to not take lock to access lock->l_resource since
+ * the lock has already been granted so it won't change. */
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (cb && cb(lock))
+ break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock_res_and_lock(lock);
+ ldlm_set_skipped(lock);
+ unlock_res_and_lock(lock);
+ break;
+ }
- unlock_res_and_lock(lock);
- RETURN(result);
+ RETURN(result);
}
/**
cfs_list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
- /* already processed */
- continue;
+ if (flags & LDLM_CANCEL_NO_WAIT &&
+ ldlm_is_skipped(lock))
+ /* already processed */
+ continue;
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again. */
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
- ldlm_lock_remove_from_lru_nolock(lock);
- }
- if (&lock->l_lru == &ns->ns_unused_list)
- break;
+ ldlm_lock_remove_from_lru_nolock(lock);
+ }
+ if (&lock->l_lru == &ns->ns_unused_list)
+ break;
- LDLM_LOCK_GET(lock);
+ LDLM_LOCK_GET(lock);
spin_unlock(&ns->ns_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
+ lu_ref_add(&lock->l_reference, __FUNCTION__, current);
/* Pass the lock through the policy filter and see if it
* should stay in LRU.
* old locks, but additionally choose them by
* their weight. Big extent locks will stay in
* the cache. */
- result = pf(ns, lock, unused, added, count);
- if (result == LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ result = pf(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
break;
}
if (result == LDLM_POLICY_SKIP_LOCK) {
lu_ref_del(&lock->l_reference,
- __func__, cfs_current());
+ __func__, current);
LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
+ continue;
+ }
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ lock_res_and_lock(lock);
+ /* Check flags again under the lock. */
+ if (ldlm_is_canceling(lock) ||
+ (ldlm_lock_remove_from_lru(lock) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
* by itself, or the lock is no longer unused. */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
spin_lock(&ns->ns_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop lr_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(cfs_list_empty(&lock->l_bl_ast));
- cfs_list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
+ continue;
+ }
+ LASSERT(!lock->l_readers && !lock->l_writers);
+
+ /* If we have chosen to cancel this lock voluntarily, we
+ * better send cancel notification to server, so that it
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
+ * silently cancelling this lock. */
+ ldlm_clear_cancel_on_block(lock);
+
+ /* Setting the CBPENDING flag is a little misleading,
+ * but prevents an important race; namely, once
+ * CBPENDING is set, the lock can accumulate no more
+ * readers/writers. Since readers and writers are
+ * already zero here, ldlm_lock_decref() won't see
+ * this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+ /* We can't re-add to l_lru as it confuses the
+ * refcounting in ldlm_lock_remove_from_lru() if an AST
+ * arrives after we drop lr_lock below. We use l_bl_ast
+ * and can't use l_pending_chain as it is used both on
+ * server and client nevertheless bug 5666 says it is
+ * used only on server */
+ LASSERT(cfs_list_empty(&lock->l_bl_ast));
+ cfs_list_add(&lock->l_bl_ast, cancels);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
spin_lock(&ns->ns_lock);
added++;
unused--;
* list.
*/
int ldlm_cancel_resource_local(struct ldlm_resource *res,
- cfs_list_t *cancels,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode, int lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque)
+ cfs_list_t *cancels,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, __u64 lock_flags,
+ ldlm_cancel_flags_t cancel_flags, void *opaque)
{
struct ldlm_lock *lock;
int count = 0;
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
- continue;
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
+ continue;
if (lockmode_compat(lock->l_granted_mode, mode))
continue;
policy->l_inodebits.bits))
continue;
- /* See CBPENDING comment in ldlm_cancel_lru */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
- lock_flags;
+ /* See CBPENDING comment in ldlm_cancel_lru */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
+ lock_flags;
LASSERT(cfs_list_empty(&lock->l_bl_ast));
cfs_list_add(&lock->l_bl_ast, cancels);
}
static int replay_lock_interpret(const struct lu_env *env,
- struct ptlrpc_request *req,
- struct ldlm_async_args *aa, int rc)
+ struct ptlrpc_request *req,
+ struct ldlm_async_args *aa, int rc)
{
- struct ldlm_lock *lock;
- struct ldlm_reply *reply;
- struct obd_export *exp;
-
- ENTRY;
- cfs_atomic_dec(&req->rq_import->imp_replay_inflight);
- if (rc != ELDLM_OK)
- GOTO(out, rc);
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ struct obd_export *exp;
+ ENTRY;
+ atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (rc != ELDLM_OK)
+ GOTO(out, rc);
reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
if (reply == NULL)
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
RETURN(0);
}
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already) */
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
RETURN(0);
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
* bug 6063 */
- lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
+ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
- LDLM_DEBUG(lock, "replaying lock:");
+ LDLM_DEBUG(lock, "replaying lock:");
- cfs_atomic_inc(&req->rq_import->imp_replay_inflight);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
- aa = ptlrpc_req_async_args(req);
- aa->lock_handle = body->lock_handle[0];
- req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
- ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->lock_handle = body->lock_handle[0];
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ RETURN(0);
}
/**
int ldlm_replay_locks(struct obd_import *imp)
{
- struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- CFS_LIST_HEAD(list);
- struct ldlm_lock *lock, *next;
- int rc = 0;
+ struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
+ CFS_LIST_HEAD(list);
+ struct ldlm_lock *lock, *next;
+ int rc = 0;
- ENTRY;
+ ENTRY;
- LASSERT(cfs_atomic_read(&imp->imp_replay_inflight) == 0);
+ LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
- /* don't replay locks if import failed recovery */
- if (imp->imp_vbr_failed)
- RETURN(0);
+ /* don't replay locks if import failed recovery */
+ if (imp->imp_vbr_failed)
+ RETURN(0);
- /* ensure this doesn't fall to 0 before all have been queued */
- cfs_atomic_inc(&imp->imp_replay_inflight);
+ /* ensure this doesn't fall to 0 before all have been queued */
+ atomic_inc(&imp->imp_replay_inflight);
- if (ldlm_cancel_unused_locks_before_replay)
- ldlm_cancel_unused_locks_for_replay(ns);
+ if (ldlm_cancel_unused_locks_before_replay)
+ ldlm_cancel_unused_locks_for_replay(ns);
- ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
+ ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
- cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
- cfs_list_del_init(&lock->l_pending_chain);
- if (rc) {
- LDLM_LOCK_RELEASE(lock);
- continue; /* or try to do the rest? */
- }
- rc = replay_one_lock(imp, lock);
- LDLM_LOCK_RELEASE(lock);
- }
+ cfs_list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ cfs_list_del_init(&lock->l_pending_chain);
+ if (rc) {
+ LDLM_LOCK_RELEASE(lock);
+ continue; /* or try to do the rest? */
+ }
+ rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
- cfs_atomic_dec(&imp->imp_replay_inflight);
+ atomic_dec(&imp->imp_replay_inflight);
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_replay_locks);