__u64 cld_gid;
/** Lock mode. */
enum cl_lock_mode cld_mode;
+ /**
+ * flags to enqueue lock. A combination of bit-flags from
+ * enum cl_enq_flags.
+ */
+ __u32 cld_enq_flags;
};
#define DDESCR "%s(%d):[%lu, %lu]"
struct list_head cill_linkage;
struct cl_lock_descr cill_descr;
struct cl_lock *cill_lock;
- /**
- * flags to enqueue lock for this IO. A combination of bit-flags from
- * enum cl_enq_flags.
- */
- __u32 cill_enq_flags;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
const char *scope, const void *source);
struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
const struct cl_lock_descr *need,
- __u32 enqflags,
const char *scope, const void *source);
struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, struct cl_lock *except,
int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link);
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr, int enqflags);
+ struct cl_lock_descr *descr);
int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
*descr = whole_file;
descr->cld_obj = clob;
descr->cld_mode = CLM_PHANTOM;
+ descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
cio->cui_glimpse = 1;
/*
* CEF_ASYNC is used because glimpse sub-locks cannot
* CEF_MUST protects glimpse lock from conversion into
* a lockless mode.
*/
- lock = cl_lock_request(env, io, descr,
- CEF_ASYNC|CEF_MUST,
- "glimpse", cfs_current());
+ lock = cl_lock_request(env, io, descr, "glimpse",
+ cfs_current());
cio->cui_glimpse = 0;
if (!IS_ERR(lock)) {
result = cl_wait(env, lock);
descr->cld_obj = obj;
descr->cld_start = start;
descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
- cio->cui_link.cill_enq_flags = enqflags;
cl_io_lock_add(env, io, &cio->cui_link);
RETURN(0);
}
descr->cld_mode = CLM_GROUP;
enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
- lock = cl_lock_request(env, io, descr, enqflags,
- GROUPLOCK_SCOPE, cfs_current());
+ descr->cld_enq_flags = enqflags;
+
+ lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current());
if (IS_ERR(lock)) {
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
policy.l_extent.start);
descr->cld_end = cl_index(descr->cld_obj,
policy.l_extent.end);
- result = cl_io_lock_alloc_add(env, io, descr, flags);
+ descr->cld_enq_flags = flags;
+ result = cl_io_lock_alloc_add(env, io, descr);
if (result < 0)
RETURN(result);
descr->cld_end = cl_index(descr->cld_obj, end);
descr->cld_mode = parent->cll_descr.cld_mode;
descr->cld_gid = parent->cll_descr.cld_gid;
+ descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
/* XXX has no effect */
lck->lls_sub[nr].sub_got = *descr;
lck->lls_sub[nr].sub_stripe = i;
result = PTR_ERR(sublock);
break;
}
+ cl_lock_get_trust(sublock);
cl_lock_mutex_get(env, sublock);
cl_lock_mutex_get(env, parent);
/*
"lov-parent", parent);
}
cl_lock_mutex_put(env, sublock);
+ cl_lock_put(env, sublock);
}
}
/*
cl_lock_mutex_get(env, parent);
if (!IS_ERR(sublock)) {
+ cl_lock_get_trust(sublock);
if (parent->cll_state == CLS_QUEUING &&
- lck->lls_sub[idx].sub_lock == NULL)
+ lck->lls_sub[idx].sub_lock == NULL) {
lov_sublock_adopt(env, lck, sublock, idx, link);
- else {
+ } else {
OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
/* other thread allocated sub-lock, or enqueue is no
* longer going on */
cl_lock_mutex_get(env, parent);
}
cl_lock_mutex_put(env, sublock);
+ cl_lock_put(env, sublock);
result = CLO_REPEAT;
} else
result = PTR_ERR(sublock);
RETURN(result);
}
+
+static void lov_lock_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ int i;
+ int result;
+
+ ENTRY;
+
+ for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct cl_lock *sublock;
+ struct lov_lock_sub *lls;
+ struct lov_sublock_env *subenv;
+
+ /* top-lock state cannot change concurrently, because single
+ * thread (one that released the last hold) carries unlocking
+ * to the completion. */
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ if (sub == NULL)
+ continue;
+
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ if (lls->sub_flags & LSF_HELD) {
+ if (sublock->cll_state == CLS_HELD) {
+ rc = cl_unuse_try(subenv->lse_env,
+ sublock);
+ lov_sublock_release(env, lck, i, 0, 0);
+ } else {
+ lov_sublock_release(env, lck, i, 1, 0);
+ }
+ }
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ result = lov_subresult(result, rc);
+ if (result < 0)
+ break;
+ }
+
+ cl_lock_closure_fini(closure);
+
+ return;
+}
+
static int lov_lock_wait(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
.clo_wait = lov_lock_wait,
.clo_use = lov_lock_use,
.clo_unuse = lov_lock_unuse,
+ .clo_cancel = lov_lock_cancel,
.clo_fits_into = lov_lock_fits_into,
.clo_delete = lov_lock_delete,
.clo_print = lov_lock_print
break;
case CLS_HELD:
default:
- CERROR("Impossible state: %i\n", parent->cll_state);
- LBUG();
+ LASSERTF(parent->cll_error != 0, "cll state %d is wrong!\n",
+ parent->cll_state);
+ break;
}
RETURN(result);
ENTRY;
- lock = cl_lock_request(env, io, &link->cill_descr, link->cill_enq_flags,
- "io", io);
+ lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
link->cill_lock = lock;
list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_enq_flags & CEF_ASYNC)) {
+ if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
result = cl_wait(env, lock);
if (result == 0)
list_move(&link->cill_linkage, &set->cls_done);
* Allocates new lock link, and uses it to add a lock to a lockset.
*/
int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr, int enqflags)
+ struct cl_lock_descr *descr)
{
struct cl_io_lock_link *link;
int result;
OBD_ALLOC_PTR(link);
if (link != NULL) {
link->cill_descr = *descr;
- link->cill_enq_flags = enqflags;
link->cill_fini = cl_free_io_lock_link;
result = cl_io_lock_add(env, io, link);
if (result) /* lock match */
need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
* not PHANTOM */
need->cld_start = need->cld_end = page->cp_index;
+ need->cld_enq_flags = 0;
spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
*/
struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
const struct cl_lock_descr *need,
- __u32 enqflags,
const char *scope, const void *source)
{
struct cl_lock *lock;
int rc;
int iter;
int warn;
+ __u32 enqflags = need->cld_enq_flags;
ENTRY;
fid = lu_object_fid(&io->ci_obj->co_lu);
descr->cld_start = cl_index(obj, start);
descr->cld_end = cl_index(obj, end);
descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
+ descr->cld_enq_flags = CEF_ASYNC | enqflags;
io->ci_obj = obj;
- lck = cl_lock_request(env, io, descr, CEF_ASYNC | enqflags,
- "ec enqueue", eco);
+ lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
if (lck) {
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
struct echo_lock *el;
{
struct osc_lock *ols = cl2osc_lock(slice);
- /* If the lock hasn't ever enqueued, it can't be matched because
- * enqueue process brings in many information which can be used to
- * determine things such as lockless, CEF_MUST, etc.
- */
- if (ols->ols_state < OLS_ENQUEUED)
- return 0;
-
- /* Don't match this lock if the lock is able to become lockless lock.
- * This is because the new lock might be covering a mmap region and
- * so that it must have a cached at the local side. */
- if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable)
- return 0;
-
- /* If the lock is going to be canceled, no reason to match it as well */
- if (ols->ols_state > OLS_RELEASED)
+ if (need->cld_enq_flags & CEF_NEVER)
return 0;
- /* go for it. */
+ if (need->cld_mode == CLM_PHANTOM) {
+ /*
+ * Note: the QUEUED lock can't be matched here, otherwise
+ * it might cause the deadlocks.
+ * In read_process,
+ * P1: enqueued read lock, create sublock1
+ * P2: enqueued write lock, create sublock2(conflicted
+ * with sublock1).
+ * P1: Grant read lock.
+ * P1: enqueued glimpse lock(with holding sublock1_read),
+ * matched with sublock2, waiting sublock2 to be granted.
+ * But sublock2 can not be granted, because P1
+ * will not release sublock1. Bang!
+ */
+ if (ols->ols_state < OLS_GRANTED ||
+ ols->ols_state > OLS_RELEASED)
+ return 0;
+ } else if (need->cld_enq_flags & CEF_MUST) {
+ /*
+ * If the lock hasn't ever enqueued, it can't be matched
+ * because enqueue process brings in many information
+ * which can be used to determine things such as lockless,
+ * CEF_MUST, etc.
+ */
+ if (ols->ols_state < OLS_GRANTED ||
+ ols->ols_state > OLS_RELEASED)
+ return 0;
+ if (ols->ols_state < OLS_UPCALL_RECEIVED &&
+ ols->ols_locklessable)
+ return 0;
+ }
return 1;
}