* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* @{
*/
+#define _PAGEREF_MAGIC (-10000000)
+
/*****************************************************************************
*
* Type conversions.
if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
struct cl_object *obj = olck->ols_cl.cls_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ __u64 old_kms;
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid possible races. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
/* Update the kms. Need to loop all granted locks.
* Not a problem for the client */
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- unlock_res_and_lock(dlmlock);
- cl_object_attr_lock(obj);
cl_object_attr_set(env, obj, attr, CAT_KMS);
cl_object_attr_unlock(obj);
- } else
- unlock_res_and_lock(dlmlock);
+ }
+ unlock_res_and_lock(dlmlock);
/* release a reference taken in osc_lock_upcall0(). */
LASSERT(olck->ols_has_ref);
{
struct osc_lock *ols = cl2osc_lock(slice);
- LASSERT(ols->ols_state == OLS_GRANTED ||
- ols->ols_state == OLS_UPCALL_RECEIVED);
LINVRNT(osc_lock_invariant(ols));
- if (ols->ols_glimpse) {
- LASSERT(ols->ols_hold == 0);
+ switch (ols->ols_state) {
+ case OLS_NEW:
+ LASSERT(!ols->ols_hold);
+ LASSERT(ols->ols_agl);
+ return 0;
+ case OLS_UPCALL_RECEIVED:
+ LASSERT(!ols->ols_hold);
+ ols->ols_state = OLS_NEW;
return 0;
+ case OLS_GRANTED:
+ LASSERT(!ols->ols_glimpse);
+ LASSERT(ols->ols_hold);
+ /*
+ * Move lock into OLS_RELEASED state before calling
+ * osc_cancel_base() so that possible synchronous cancellation
+ * (that always happens e.g., for liblustre) sees that lock is
+ * released.
+ */
+ ols->ols_state = OLS_RELEASED;
+ return osc_lock_unhold(ols);
+ default:
+ CERROR("Impossible state: %d\n", ols->ols_state);
+ LBUG();
}
- LASSERT(ols->ols_hold);
-
- /*
- * Move lock into OLS_RELEASED state before calling osc_cancel_base()
- * so that possible synchronous cancellation (that always happens
- * e.g., for liblustre) sees that lock is released.
- */
- ols->ols_state = OLS_RELEASED;
- return osc_lock_unhold(ols);
}
static void osc_lock_fini(const struct lu_env *env,
*/
osc_lock_unhold(ols);
LASSERT(ols->ols_lock == NULL);
+ LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
+ cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
} else {
/*
* In reality, where ost server expects ->lsm_object_id and
- * ->lsm_object_gr in rename.
+ * ->lsm_object_seq in rename.
*/
- osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
+ osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
resname);
}
}
ENTRY;
- if (!(olck->ols_flags & LDLM_FL_LVB_READY)) {
- EXIT;
- return;
- }
+ if (!(olck->ols_flags & LDLM_FL_LVB_READY))
+ RETURN_EXIT;
lvb = &olck->ols_lvb;
obj = olck->ols_cl.cls_obj;
LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
ENTRY;
- if (olck->ols_state != OLS_GRANTED) {
+ if (olck->ols_state < OLS_GRANTED) {
lock = olck->ols_cl.cls_lock;
ext = &dlmlock->l_policy_data.l_extent;
descr = &osc_env_info(env)->oti_descr;
} else if (olck->ols_state == OLS_CANCELLED) {
rc = -EIO;
} else {
- CERROR("Impossible state: %i\n", olck->ols_state);
+ CERROR("Impossible state: %d\n", olck->ols_state);
LBUG();
}
if (rc) {
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
cfs_spin_unlock(&osc_ast_guard);
+ ldlm_lock_fail_match_locked(dlmlock);
unlock_res_and_lock(dlmlock);
LDLM_LOCK_PUT(dlmlock);
}
} else {
- if (olck->ols_glimpse)
+ if (olck->ols_glimpse) {
olck->ols_glimpse = 0;
+ olck->ols_agl = 0 ;
+ }
osc_lock_upcall0(env, olck);
}
rc = 0;
}
- if (rc == 0)
- /* on error, lock was signaled by cl_lock_error() */
+ if (rc == 0) {
cl_lock_signal(env, lock);
- else
+ /* del user for lock upcall cookie */
+ cl_unuse_try(env, lock);
+ } else {
+ /* del user for lock upcall cookie */
+ cl_lock_user_del(env, lock);
cl_lock_error(env, lock, rc);
+ }
cl_lock_mutex_put(env, lock);
/* release cookie reference, acquired by osc_lock_enqueue() */
lu_ref_del(&lock->cll_reference, "upcall", lock);
cl_lock_put(env, lock);
+
cl_env_nested_put(&nest, env);
} else
/* should never happen, similar to osc_ldlm_blocking_ast(). */
ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
/* make it enqueue anyway for glimpse lock, because we actually
* don't need to cancel any conflicting locks. */
cfs_spin_unlock(&hdr->coh_lock_guard);
if (conflict) {
- CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
- lock, conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait", lock);
- LASSERT(lock->cll_conflict == NULL);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
+ if (lock->cll_descr.cld_mode == CLM_GROUP) {
+ /* we want a group lock but a previous lock request
+ * conflicts, we do not wait but return 0 so the
+ * request is send to the server
+ */
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
+ "with %p, no wait, send to server\n",
+ lock, conflict);
+ cl_lock_put(env, conflict);
+ rc = 0;
+ } else {
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
+ "will wait\n",
+ lock, conflict);
+ LASSERT(lock->cll_conflict == NULL);
+ lu_ref_add(&conflict->cll_reference, "cancel-wait",
+ lock);
+ lock->cll_conflict = conflict;
+ rc = CLO_WAIT;
+ }
}
RETURN(rc);
}
{
struct osc_lock *ols = cl2osc_lock(slice);
struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
int result;
ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(ols->ols_state == OLS_NEW);
+ LASSERTF(ols->ols_state == OLS_NEW,
+ "Impossible state: %d\n", ols->ols_state);
- osc_lock_build_res(env, obj, resname);
- osc_lock_build_policy(env, lock, policy);
ols->ols_flags = osc_enq2ldlm_flags(enqflags);
+ if (enqflags & CEF_AGL) {
+ ols->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ ols->ols_agl = 1;
+ }
if (ols->ols_flags & LDLM_FL_HAS_INTENT)
ols->ols_glimpse = 1;
- if (!(enqflags & CEF_MUST))
+ if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
/* try to convert this lock to a lockless lock */
osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
result = osc_lock_enqueue_wait(env, ols);
if (result == 0) {
if (!osc_lock_is_lockless(ols)) {
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+
if (ols->ols_locklessable)
ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
/* a reference for lock, passed as an upcall cookie */
cl_lock_get(lock);
lu_ref_add(&lock->cll_reference, "upcall", lock);
+ /* a user for lock also */
+ cl_lock_user_add(env, lock);
ols->ols_state = OLS_ENQUEUED;
/*
* ldlm_lock_match(LDLM_FL_LVB_READY) waits for
* LDLM_CP_CALLBACK.
*/
+ osc_lock_build_res(env, obj, resname);
+ osc_lock_build_policy(env, lock, policy);
result = osc_enqueue_base(osc_export(obj), resname,
&ols->ols_flags, policy,
&ols->ols_lvb,
obj->oo_oinfo->loi_kms_valid,
osc_lock_upcall,
ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1);
+ PTLRPCD_SET, 1, ols->ols_agl);
if (result != 0) {
+ cl_lock_user_del(env, lock);
lu_ref_del(&lock->cll_reference,
"upcall", lock);
cl_lock_put(env, lock);
+ if (unlikely(result == -ECANCELED)) {
+ ols->ols_state = OLS_NEW;
+ result = 0;
+ }
}
} else {
ols->ols_state = OLS_GRANTED;
struct cl_lock *lock = olck->ols_cl.cls_lock;
LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED)
- return 0;
+
+ if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
+ if (olck->ols_flags & LDLM_FL_LVB_READY) {
+ return 0;
+ } else if (olck->ols_agl) {
+ olck->ols_state = OLS_NEW;
+ } else {
+ LASSERT(lock->cll_error);
+ return lock->cll_error;
+ }
+ }
+
+ if (olck->ols_state == OLS_NEW) {
+ if (lock->cll_descr.cld_enq_flags & CEF_NO_REENQUEUE) {
+ return -ENAVAIL;
+ } else {
+ int rc;
+
+ LASSERT(olck->ols_agl);
+
+ rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC |
+ CEF_MUST);
+ if (rc != 0)
+ return rc;
+ else
+ return CLO_REENQUEUED;
+ }
+ }
LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
lock->cll_error == 0, olck->ols_lock != NULL));
if (dlmlock != NULL) {
int do_cancel;
- discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
+ discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
result = osc_lock_flush(olck, discard);
osc_lock_unhold(olck);
lock, result);
}
olck->ols_state = OLS_CANCELLED;
+ olck->ols_flags &= ~LDLM_FL_LVB_READY;
osc_lock_detach(env, olck);
}
-void cl_lock_page_list_fixup(const struct lu_env *env,
- struct cl_io *io, struct cl_lock *lock,
- struct cl_page_list *queue);
-
#ifdef INVARIANT_CHECK
+static int check_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_lock *lock = cbdata;
+
+ if (lock->cll_descr.cld_mode == CLM_READ) {
+ struct cl_lock *tmp;
+ tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj,
+ page, lock, 1, 0);
+ if (tmp != NULL) {
+ cl_lock_put(env, tmp);
+ return CLP_GANG_OKAY;
+ }
+ }
+
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
+ CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
+ return CLP_GANG_ABORT;
+}
+
/**
* Returns true iff there are pages under \a olck not protected by other
* locks.
struct cl_lock_descr *descr;
struct cl_object *obj;
struct osc_object *oob;
- struct cl_page_list *plist;
- struct cl_page *page;
struct cl_env_nest nest;
struct cl_io *io;
struct lu_env *env;
int result;
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- obj = olck->ols_cl.cls_obj;
- oob = cl2osc(obj);
- io = &oob->oo_debug_io;
- lock = olck->ols_cl.cls_lock;
- descr = &lock->cll_descr;
- plist = &osc_env_info(env)->oti_plist;
- cl_page_list_init(plist);
-
- cfs_mutex_lock(&oob->oo_debug_mutex);
-
- io->ci_obj = cl_object_top(obj);
- cl_io_init(env, io, CIT_MISC, io->ci_obj);
- cl_page_gang_lookup(env, obj, io,
- descr->cld_start, descr->cld_end, plist, 0,
- NULL);
- cl_lock_page_list_fixup(env, io, lock, plist);
- if (plist->pl_nr > 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
- cl_page_list_for_each(page, plist)
- CL_PAGE_DEBUG(D_ERROR, env, page, "\n");
- }
- result = plist->pl_nr > 0;
- cl_page_list_disown(env, io, plist);
- cl_page_list_fini(env, plist);
- cl_io_fini(env, io);
- cfs_mutex_unlock(&oob->oo_debug_mutex);
- cl_env_nested_put(&nest, env);
- } else
- result = 0;
- return result;
+ if (IS_ERR(env))
+ return 0;
+
+ obj = olck->ols_cl.cls_obj;
+ oob = cl2osc(obj);
+ io = &oob->oo_debug_io;
+ lock = olck->ols_cl.cls_lock;
+ descr = &lock->cll_descr;
+
+ cfs_mutex_lock(&oob->oo_debug_mutex);
+
+ io->ci_obj = cl_object_top(obj);
+ cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ do {
+ result = cl_page_gang_lookup(env, obj, io,
+ descr->cld_start, descr->cld_end,
+ check_cb, (void *)lock);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cfs_cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
+ cfs_mutex_unlock(&oob->oo_debug_mutex);
+ cl_env_nested_put(&nest, env);
+
+ return (result == CLP_GANG_ABORT);
}
#else
static int osc_lock_has_pages(struct osc_lock *olck)
return 0;
if (need->cld_mode == CLM_PHANTOM) {
+ if (ols->ols_agl)
+ return !(ols->ols_state > OLS_RELEASED);
+
/*
* Note: the QUEUED lock can't be matched here, otherwise
* it might cause the deadlocks.
.clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- LBUG();
- return 0;
-}
-
static int osc_lock_lockless_unuse(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
static const struct cl_lock_operations osc_lock_lockless_ops = {
.clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_lockless_enqueue,
+ .clo_enqueue = osc_lock_enqueue,
.clo_wait = osc_lock_lockless_wait,
.clo_unuse = osc_lock_lockless_unuse,
.clo_state = osc_lock_lockless_state,
OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
if (clk != NULL) {
osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
+ cfs_atomic_set(&clk->ols_pageref, 0);
clk->ols_state = OLS_NEW;
cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
result = 0;
return result;
}
+int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+{
+ struct osc_lock *olock;
+ int rc = 0;
+
+ cfs_spin_lock(&osc_ast_guard);
+ olock = dlm->l_ast_data;
+ /*
+ * there's a very rare race with osc_page_addref_lock(), but that
+ * doesn't matter because in the worst case we don't cancel a lock
+ * which we actually can, that's no harm.
+ */
+ if (olock != NULL &&
+ cfs_atomic_add_return(_PAGEREF_MAGIC,
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
+ rc = 1;
+ }
+ cfs_spin_unlock(&osc_ast_guard);
+ return rc;
+}
/** @} osc */