lu_ref_init(&lock->l_reference);
lu_ref_add(&lock->l_reference, "hash", lock);
- lock->l_callback_timeout = 0;
+ lock->l_callback_timestamp = 0;
lock->l_activity = 0;
#if LUSTRE_TRACKS_LOCK_EXP_REFS
int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
const struct ldlm_res_id *new_resid)
{
- struct ldlm_resource *oldres = lock->l_resource;
+ struct ldlm_resource *oldres;
struct ldlm_resource *newres;
int type;
ENTRY;
LASSERT(ns_is_client(ns));
- lock_res_and_lock(lock);
- if (memcmp(new_resid, &lock->l_resource->lr_name,
- sizeof(lock->l_resource->lr_name)) == 0) {
+ oldres = lock_res_and_lock(lock);
+ if (memcmp(new_resid, &oldres->lr_name,
+ sizeof(oldres->lr_name)) == 0) {
/* Nothing to do */
unlock_res_and_lock(lock);
RETURN(0);
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
if ((flags == 0) && !ldlm_is_destroyed(lock)) {
- lu_ref_add_atomic(&lock->l_reference, "handle", current);
+ lu_ref_add_atomic(&lock->l_reference, "handle", lock);
RETURN(lock);
}
LASSERT(lock->l_resource != NULL);
- lu_ref_add_atomic(&lock->l_reference, "handle", current);
+ lu_ref_add_atomic(&lock->l_reference, "handle", lock);
if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
*/
void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
{
- struct ldlm_namespace *ns;
- ENTRY;
+ struct ldlm_namespace *ns;
- lock_res_and_lock(lock);
+ ENTRY;
+
+ lock_res_and_lock(lock);
- ns = ldlm_lock_to_ns(lock);
+ ns = ldlm_lock_to_ns(lock);
- ldlm_lock_decref_internal_nolock(lock, mode);
+ ldlm_lock_decref_internal_nolock(lock, mode);
if ((ldlm_is_local(lock) || lock->l_req_mode == LCK_GROUP) &&
!lock->l_readers && !lock->l_writers) {
}
if (!lock->l_readers && !lock->l_writers && ldlm_is_cbpending(lock)) {
+ unsigned int mask = D_DLMTRACE;
+
/* If we received a blocked AST and this was the last reference,
* run the callback. */
if (ldlm_is_ns_srv(lock) && lock->l_export)
- CERROR("FL_CBPENDING set on non-local lock--just a "
- "warning\n");
+ mask |= D_WARNING;
+ LDLM_DEBUG_LIMIT(mask, lock,
+ "final decref done on %sCBPENDING lock",
+ mask & D_WARNING ? "non-local " : "");
- LDLM_DEBUG(lock, "final decref done on cbpending lock");
-
- LDLM_LOCK_GET(lock); /* dropped by bl thread */
- ldlm_lock_remove_from_lru(lock);
- unlock_res_and_lock(lock);
+ LDLM_LOCK_GET(lock); /* dropped by bl thread */
+ ldlm_lock_remove_from_lru(lock);
+ unlock_res_and_lock(lock);
if (ldlm_is_fail_loc(lock))
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
+ OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
- ldlm_handle_bl_callback(ns, NULL, lock);
+ ldlm_handle_bl_callback(ns, NULL, lock);
} else if (ns_is_client(ns) &&
- !lock->l_readers && !lock->l_writers &&
+ !lock->l_readers && !lock->l_writers &&
!ldlm_is_no_lru(lock) &&
!ldlm_is_bl_ast(lock) &&
!ldlm_is_converting(lock)) {
- LDLM_DEBUG(lock, "add lock into lru list");
-
- /* If this is a client-side namespace and this was the last
- * reference, put it on the LRU. */
- ldlm_lock_add_to_lru(lock);
- unlock_res_and_lock(lock);
+ /* If this is a client-side namespace and this was the last
+ * reference, put it on the LRU.
+ */
+ ldlm_lock_add_to_lru(lock);
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "add lock into lru list");
if (ldlm_is_fail_loc(lock))
- OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
-
- /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
- * are not supported by the server, otherwise, it is done on
- * enqueue. */
- if (!exp_connect_cancelset(lock->l_conn_export) &&
- !ns_connect_lru_resize(ns))
- ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
- } else {
- LDLM_DEBUG(lock, "do not add lock into lru list");
- unlock_res_and_lock(lock);
- }
+ OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- EXIT;
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
+ } else {
+ LDLM_DEBUG(lock, "do not add lock into lru list");
+ unlock_res_and_lock(lock);
+ }
+
+ EXIT;
}
/**
if (ldlm_is_cbpending(lock) &&
!(data->lmd_flags & LDLM_FL_CBPENDING))
return INTERVAL_ITER_CONT;
- if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+ if (!(data->lmd_match & LDLM_MATCH_UNREF) && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
return INTERVAL_ITER_CONT;
/* When we search for ast_data, we are not doing a traditional match,
* so we don't worry about IBITS or extent matching.
*/
- if (data->lmd_has_ast_data) {
+ if (data->lmd_match & (LDLM_MATCH_AST | LDLM_MATCH_AST_ANY)) {
if (!lock->l_ast_data)
return INTERVAL_ITER_CONT;
- goto matched;
+ if (data->lmd_match & LDLM_MATCH_AST_ANY)
+ goto matched;
}
match = lock->l_req_mode;
/* We match if we have existing lock with same or wider set
of bits. */
- if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+ if (!(data->lmd_match & LDLM_MATCH_UNREF) && LDLM_HAVE_MASK(lock, GONE))
return INTERVAL_ITER_CONT;
if (!equi(data->lmd_flags & LDLM_FL_LOCAL_ONLY, ldlm_is_local(lock)))
enum ldlm_type type,
union ldlm_policy_data *policy,
enum ldlm_mode mode,
- struct lustre_handle *lockh, int unref)
+ struct lustre_handle *lockh,
+ enum ldlm_match_flags match_flags)
{
struct ldlm_match_data data = {
.lmd_old = NULL,
.lmd_policy = policy,
.lmd_flags = flags,
.lmd_skip_flags = skip_flags,
- .lmd_unref = unref,
- .lmd_has_ast_data = false,
+ .lmd_match = match_flags,
};
struct ldlm_resource *res;
struct ldlm_lock *lock;
lock->l_req_mode = mode;
lock->l_ast_data = data;
- lock->l_pid = current_pid();
+ lock->l_pid = current->pid;
if (ns_is_server(ns))
ldlm_set_ns_srv(lock);
if (cbs) {
void *cookie, __u64 *flags)
{
struct ldlm_lock *lock = *lockp;
- struct ldlm_resource *res = lock->l_resource;
- int local = ns_is_client(ldlm_res_to_ns(res));
+ struct ldlm_resource *res;
+ int local = ns_is_client(ns);
enum ldlm_error rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
#ifdef HAVE_SERVER_SUPPORT
RETURN(ELDLM_OK);
}
+#ifdef HAVE_SERVER_SUPPORT
/* For a replaying lock, it might be already in granted list. So
* unlinking the lock will cause the interval node to be freed, we
* have to allocate the interval node early otherwise we can't regrant
- * this lock in the future. - jay */
- if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
+ * this lock in the future. - jay
+ *
+ * The only time the ldlm_resource changes for the ldlm_lock is when
+ * ldlm_lock_change_resource() is called and that only happens for
+ * the Lustre client case.
+ */
+ if (!local && (*flags & LDLM_FL_REPLAY) &&
+ lock->l_resource->lr_type == LDLM_EXTENT)
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
-#ifdef HAVE_SERVER_SUPPORT
- reconstruct = !local && res->lr_type == LDLM_FLOCK &&
+ reconstruct = !local && lock->l_resource->lr_type == LDLM_FLOCK &&
!(*flags & LDLM_FL_TEST_LOCK);
if (reconstruct) {
rc = req_can_reconstruct(cookie, NULL);
}
}
#endif
-
- lock_res_and_lock(lock);
+ res = lock_res_and_lock(lock);
if (local && ldlm_is_granted(lock)) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
lock->l_flags, nid,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
- lock->l_pid, lock->l_callback_timeout,
+ lock->l_pid, lock->l_callback_timestamp,
lock->l_lvb_type);
va_end(args);
return;
lock->l_flags, nid,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
- lock->l_pid, lock->l_callback_timeout,
+ lock->l_pid, lock->l_callback_timestamp,
lock->l_lvb_type);
break;
lock->l_flags, nid,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
- lock->l_pid, lock->l_callback_timeout);
+ lock->l_pid, lock->l_callback_timestamp);
break;
case LDLM_IBITS:
lock->l_flags, nid,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
- lock->l_pid, lock->l_callback_timeout,
+ lock->l_pid, lock->l_callback_timestamp,
lock->l_lvb_type);
break;
lock->l_flags, nid,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
- lock->l_pid, lock->l_callback_timeout,
+ lock->l_pid, lock->l_callback_timestamp,
lock->l_lvb_type);
break;
}