* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* @{
*/
+#define _PAGEREF_MAGIC (-10000000)
+
/*****************************************************************************
*
* Type conversions.
if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
struct cl_object *obj = olck->ols_cl.cls_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ __u64 old_kms;
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid possible races. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
/* Update the kms. Need to loop all granted locks.
* Not a problem for the client */
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- unlock_res_and_lock(dlmlock);
- cl_object_attr_lock(obj);
cl_object_attr_set(env, obj, attr, CAT_KMS);
cl_object_attr_unlock(obj);
- } else
- unlock_res_and_lock(dlmlock);
+ }
+ unlock_res_and_lock(dlmlock);
/* release a reference taken in osc_lock_upcall0(). */
LASSERT(olck->ols_has_ref);
*/
osc_lock_unhold(ols);
LASSERT(ols->ols_lock == NULL);
+ LASSERT(cfs_atomic_read(&ols->ols_pageref) == 0 ||
+ cfs_atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
}
} else {
/*
* In reality, where ost server expects ->lsm_object_id and
- * ->lsm_object_gr in rename.
+ * ->lsm_object_seq in rename.
*/
- osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_gr,
+ osc_build_res_name(obj->oo_oinfo->loi_id, obj->oo_oinfo->loi_seq,
resname);
}
}
LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
ENTRY;
- if (olck->ols_state != OLS_GRANTED) {
+ if (olck->ols_state < OLS_GRANTED) {
lock = olck->ols_cl.cls_lock;
ext = &dlmlock->l_policy_data.l_extent;
descr = &osc_env_info(env)->oti_descr;
} else if (olck->ols_state == OLS_CANCELLED) {
rc = -EIO;
} else {
- CERROR("Impossible state: %i\n", olck->ols_state);
+ CERROR("Impossible state: %d\n", olck->ols_state);
LBUG();
}
if (rc) {
io->ci_lockreq == CILR_NEVER);
ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
- ols->ols_locklessable = (io->ci_type != CIT_TRUNC) &&
+ ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
(io->ci_lockreq == CILR_MAYBE) &&
(ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
if (io->ci_lockreq == CILR_NEVER ||
/* lockless IO */
(ols->ols_locklessable && osc_object_is_contended(oob)) ||
/* lockless truncate */
- (io->ci_type == CIT_TRUNC &&
+ (cl_io_is_trunc(io) &&
(ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
osd->od_lockless_truncate)) {
ols->ols_locklessable = 1;
struct cl_lock *lock = olck->ols_cl.cls_lock;
struct cl_lock_descr *descr = &lock->cll_descr;
struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock *scan = lock;
+ struct cl_lock *scan;
struct cl_lock *conflict= NULL;
int lockless = osc_lock_is_lockless(olck);
int rc = 0;
return 0;
cfs_spin_lock(&hdr->coh_lock_guard);
- cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
+ cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
struct cl_lock_descr *cld = &scan->cll_descr;
const struct osc_lock *scan_ols;
+ if (scan == lock)
+ break;
+
if (scan->cll_state < CLS_QUEUING ||
scan->cll_state == CLS_FREEING ||
cld->cld_start > descr->cld_end ||
cfs_spin_unlock(&hdr->coh_lock_guard);
if (conflict) {
- CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
- lock, conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait", lock);
- LASSERT(lock->cll_conflict == NULL);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
- }
- RETURN(rc);
-}
-
-/**
- * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario:
- *
- * - Thread0: obtains PR:[0, 10]. Lock is busy.
- *
- * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to
- * PR:[0, 10], but cancellation of busy lock is postponed.
- *
- * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to
- * PW:[5, 50], and thread0 waits for the lock completion never
- * releasing PR:[0, 10]---deadlock.
- *
- * The second PR lock can be glimpse (it is to deal with that situation that
- * ll_glimpse_size() has second argument, preventing local match of
- * not-yet-granted locks, see bug 10295). Similar situation is possible in the
- * case of memory mapped user level buffer.
- *
- * To prevent this we can detect a situation when current "thread" or "io"
- * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to
- * the ols->ols_flags, or prevent local match with PW locks.
- */
-static int osc_deadlock_is_possible(const struct lu_env *env,
- struct cl_lock *lock)
-{
- struct cl_object *obj;
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct osc_io *oio;
-
- int result;
-
- ENTRY;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- oio = osc_env_io(env);
- obj = lock->cll_descr.cld_obj;
- head = cl_object_header(obj);
-
- result = 0;
- cfs_spin_lock(&head->coh_lock_guard);
- cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != lock) {
- struct osc_lock *oscan;
-
- oscan = osc_lock_at(scan);
- LASSERT(oscan != NULL);
- if (oscan->ols_owner == oio) {
- result = 1;
- break;
- }
+ if (lock->cll_descr.cld_mode == CLM_GROUP) {
+ /* we want a group lock but a previous lock request
+ * conflicts, we do not wait but return 0 so the
+ * request is send to the server
+ */
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
+ "with %p, no wait, send to server\n",
+ lock, conflict);
+ cl_lock_put(env, conflict);
+ rc = 0;
+ } else {
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
+ "will wait\n",
+ lock, conflict);
+ LASSERT(lock->cll_conflict == NULL);
+ lu_ref_add(&conflict->cll_reference, "cancel-wait",
+ lock);
+ lock->cll_conflict = conflict;
+ rc = CLO_WAIT;
}
}
- cfs_spin_unlock(&head->coh_lock_guard);
- RETURN(result);
+ RETURN(rc);
}
/**
* Implementation of cl_lock_operations::clo_enqueue() method for osc
* layer. This initiates ldlm enqueue:
*
- * - checks for possible dead-lock conditions (osc_deadlock_is_possible());
- *
* - cancels conflicting locks early (osc_lock_enqueue_wait());
*
* - calls osc_enqueue_base() to do actual enqueue.
{
struct osc_lock *ols = cl2osc_lock(slice);
struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
int result;
ENTRY;
LASSERT(lock->cll_state == CLS_QUEUING);
LASSERT(ols->ols_state == OLS_NEW);
- osc_lock_build_res(env, obj, resname);
- osc_lock_build_policy(env, lock, policy);
ols->ols_flags = osc_enq2ldlm_flags(enqflags);
- if (osc_deadlock_is_possible(env, lock))
- ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
if (ols->ols_flags & LDLM_FL_HAS_INTENT)
ols->ols_glimpse = 1;
- if (!(enqflags & CEF_MUST))
+ if (!osc_lock_is_lockless(ols) && !(enqflags & CEF_MUST))
/* try to convert this lock to a lockless lock */
osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
result = osc_lock_enqueue_wait(env, ols);
if (result == 0) {
if (!osc_lock_is_lockless(ols)) {
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+
if (ols->ols_locklessable)
ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
* ldlm_lock_match(LDLM_FL_LVB_READY) waits for
* LDLM_CP_CALLBACK.
*/
+ osc_lock_build_res(env, obj, resname);
+ osc_lock_build_policy(env, lock, policy);
result = osc_enqueue_base(osc_export(obj), resname,
&ols->ols_flags, policy,
&ols->ols_lvb,
}
} else {
ols->ols_state = OLS_GRANTED;
+ ols->ols_owner = osc_env_io(env);
}
}
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
io->ci_obj = cl_object_top(obj);
cl_io_init(env, io, CIT_MISC, io->ci_obj);
cl_page_gang_lookup(env, obj, io,
- descr->cld_start, descr->cld_end, plist, 0);
+ descr->cld_start, descr->cld_end, plist, 0,
+ NULL);
cl_lock_page_list_fixup(env, io, lock, plist);
if (plist->pl_nr > 0) {
CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
enum cl_lock_state state)
{
struct osc_lock *lock = cl2osc_lock(slice);
- struct osc_io *oio = osc_env_io(env);
/*
* XXX multiple io contexts can use the lock at the same time.
*/
LINVRNT(osc_lock_invariant(lock));
if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
+ struct osc_io *oio = osc_env_io(env);
+
LASSERT(lock->ols_owner == NULL);
lock->ols_owner = oio;
} else if (state != CLS_HELD)
* will not release sublock1. Bang!
*/
if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
+ ols->ols_state > OLS_RELEASED)
return 0;
} else if (need->cld_enq_flags & CEF_MUST) {
- /*
+ /*
* If the lock hasn't ever enqueued, it can't be matched
* because enqueue process brings in many information
* which can be used to determine things such as lockless,
* CEF_MUST, etc.
*/
- if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
- return 0;
if (ols->ols_state < OLS_UPCALL_RECEIVED &&
- ols->ols_locklessable)
+ ols->ols_locklessable)
return 0;
}
return 1;
.clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- LBUG();
- return 0;
-}
-
static int osc_lock_lockless_unuse(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
enum cl_lock_state state)
{
struct osc_lock *lock = cl2osc_lock(slice);
- struct osc_io *oio = osc_env_io(env);
LINVRNT(osc_lock_invariant(lock));
if (state == CLS_HELD) {
- LASSERT(lock->ols_owner == NULL);
+ struct osc_io *oio = osc_env_io(env);
+
+ LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
lock->ols_owner = oio;
/* set the io to be lockless if this lock is for io's
* host object */
if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
oio->oi_lockless = 1;
- } else
- lock->ols_owner = NULL;
+ }
}
static int osc_lock_lockless_fits_into(const struct lu_env *env,
const struct cl_lock_descr *need,
const struct cl_io *io)
{
- return 0;
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ if (!(need->cld_enq_flags & CEF_NEVER))
+ return 0;
+
+ /* lockless lock should only be used by its owning io. b22147 */
+ return (lock->ols_owner == osc_env_io(env));
}
static const struct cl_lock_operations osc_lock_lockless_ops = {
.clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_lockless_enqueue,
+ .clo_enqueue = osc_lock_enqueue,
.clo_wait = osc_lock_lockless_wait,
.clo_unuse = osc_lock_lockless_unuse,
.clo_state = osc_lock_lockless_state,
OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
if (clk != NULL) {
osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
+ cfs_atomic_set(&clk->ols_pageref, 0);
clk->ols_state = OLS_NEW;
cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
result = 0;
return result;
}
+int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+{
+ struct osc_lock *olock;
+ int rc = 0;
+
+ cfs_spin_lock(&osc_ast_guard);
+ olock = dlm->l_ast_data;
+ /*
+ * there's a very rare race with osc_page_addref_lock(), but that
+ * doesn't matter because in the worst case we don't cancel a lock
+ * which we actually can, that's no harm.
+ */
+ if (olock != NULL &&
+ cfs_atomic_add_return(_PAGEREF_MAGIC,
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
+ rc = 1;
+ }
+ cfs_spin_unlock(&osc_ast_guard);
+ return rc;
+}
/** @} osc */