From: wangdi Date: Thu, 5 Nov 2009 21:45:51 +0000 (+0000) Subject: Branch:HEAD X-Git-Tag: GIT_EPOCH_B_HD_KDMU~118 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=7877f9ebf7cde785319f92e4a1fd033def11f23a Branch:HEAD b=19906 1. Fix osc_lock_fits_into to make sure only the granted locks can be matched by the enqueue sublocks. 2. Add loc_lock_cancel to Unuse all sublocks once use(for glimpse) failed. 3. some other small fixes. i=Jay,Ericm --- diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 20b14d2..f67c30d 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1301,6 +1301,11 @@ struct cl_lock_descr { __u64 cld_gid; /** Lock mode. */ enum cl_lock_mode cld_mode; + /** + * flags to enqueue lock. A combination of bit-flags from + * enum cl_enq_flags. + */ + __u32 cld_enq_flags; }; #define DDESCR "%s(%d):[%lu, %lu]" @@ -2156,11 +2161,6 @@ struct cl_io_lock_link { struct list_head cill_linkage; struct cl_lock_descr cill_descr; struct cl_lock *cill_lock; - /** - * flags to enqueue lock for this IO. A combination of bit-flags from - * enum cl_enq_flags. - */ - __u32 cill_enq_flags; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, struct cl_io_lock_link *link); @@ -2763,7 +2763,6 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, const char *scope, const void *source); struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, - __u32 enqflags, const char *scope, const void *source); struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, struct cl_lock *except, @@ -2901,7 +2900,7 @@ void cl_io_end (const struct lu_env *env, struct cl_io *io); int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, struct cl_io_lock_link *link); int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr, int enqflags); + struct cl_lock_descr *descr); int cl_io_read_page (const struct lu_env *env, struct cl_io *io, struct cl_page *page); int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, diff --git a/lustre/lclient/glimpse.c b/lustre/lclient/glimpse.c index ed81f15..1527635 100644 --- a/lustre/lclient/glimpse.c +++ b/lustre/lclient/glimpse.c @@ -118,6 +118,7 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, *descr = whole_file; descr->cld_obj = clob; descr->cld_mode = CLM_PHANTOM; + descr->cld_enq_flags = CEF_ASYNC | CEF_MUST; cio->cui_glimpse = 1; /* * CEF_ASYNC is used because glimpse sub-locks cannot @@ -127,9 +128,8 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, * CEF_MUST protects glimpse lock from conversion into * a lockless mode. */ - lock = cl_lock_request(env, io, descr, - CEF_ASYNC|CEF_MUST, - "glimpse", cfs_current()); + lock = cl_lock_request(env, io, descr, "glimpse", + cfs_current()); cio->cui_glimpse = 0; if (!IS_ERR(lock)) { result = cl_wait(env, lock); diff --git a/lustre/lclient/lcommon_cl.c b/lustre/lclient/lcommon_cl.c index dc2be2f..db5080e 100644 --- a/lustre/lclient/lcommon_cl.c +++ b/lustre/lclient/lcommon_cl.c @@ -738,8 +738,8 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, descr->cld_obj = obj; descr->cld_start = start; descr->cld_end = end; + descr->cld_enq_flags = enqflags; - cio->cui_link.cill_enq_flags = enqflags; cl_io_lock_add(env, io, &cio->cui_link); RETURN(0); } diff --git a/lustre/lclient/lcommon_misc.c b/lustre/lclient/lcommon_misc.c index 24e896d..ee4fef3 100644 --- a/lustre/lclient/lcommon_misc.c +++ b/lustre/lclient/lcommon_misc.c @@ -154,8 +154,9 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock, descr->cld_mode = CLM_GROUP; enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0); - lock = cl_lock_request(env, io, descr, enqflags, - GROUPLOCK_SCOPE, cfs_current()); + descr->cld_enq_flags = enqflags; + + lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, cfs_current()); if (IS_ERR(lock)) { cl_io_fini(env, io); cl_env_put(env, &refcheck); diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index a602672..5107795 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -178,7 +178,8 @@ static int vvp_mmap_locks(const struct lu_env *env, policy.l_extent.start); descr->cld_end = cl_index(descr->cld_obj, policy.l_extent.end); - result = cl_io_lock_alloc_add(env, io, descr, flags); + descr->cld_enq_flags = flags; + result = cl_io_lock_alloc_add(env, io, descr); if (result < 0) RETURN(result); diff --git a/lustre/lov/lov_lock.c b/lustre/lov/lov_lock.c index bc6ab44..cac7865 100644 --- a/lustre/lov/lov_lock.c +++ b/lustre/lov/lov_lock.c @@ -345,6 +345,7 @@ static int lov_lock_sub_init(const struct lu_env *env, descr->cld_end = cl_index(descr->cld_obj, end); descr->cld_mode = parent->cll_descr.cld_mode; descr->cld_gid = parent->cll_descr.cld_gid; + descr->cld_enq_flags = parent->cll_descr.cld_enq_flags; /* XXX has no effect */ lck->lls_sub[nr].sub_got = *descr; lck->lls_sub[nr].sub_stripe = i; @@ -366,6 +367,7 @@ static int lov_lock_sub_init(const struct lu_env *env, result = PTR_ERR(sublock); break; } + cl_lock_get_trust(sublock); cl_lock_mutex_get(env, sublock); cl_lock_mutex_get(env, parent); /* @@ -383,6 +385,7 @@ static int lov_lock_sub_init(const struct lu_env *env, "lov-parent", parent); } cl_lock_mutex_put(env, sublock); + cl_lock_put(env, sublock); } } /* @@ -536,10 +539,11 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent, cl_lock_mutex_get(env, parent); if (!IS_ERR(sublock)) { + cl_lock_get_trust(sublock); if (parent->cll_state == CLS_QUEUING && - lck->lls_sub[idx].sub_lock == NULL) + lck->lls_sub[idx].sub_lock == NULL) { lov_sublock_adopt(env, lck, sublock, idx, link); - else { + } else { OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem); /* other thread allocated sub-lock, or enqueue is no * longer going on */ @@ -548,6 +552,7 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent, cl_lock_mutex_get(env, parent); } cl_lock_mutex_put(env, sublock); + cl_lock_put(env, sublock); result = CLO_REPEAT; } else result = PTR_ERR(sublock); @@ -678,6 +683,56 @@ static int lov_lock_unuse(const struct lu_env *env, RETURN(result); } + +static void lov_lock_cancel(const struct lu_env *env, + const struct cl_lock_slice *slice) +{ + struct lov_lock *lck = cl2lov_lock(slice); + struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); + int i; + int result; + + ENTRY; + + for (result = 0, i = 0; i < lck->lls_nr; ++i) { + int rc; + struct lovsub_lock *sub; + struct cl_lock *sublock; + struct lov_lock_sub *lls; + struct lov_sublock_env *subenv; + + /* top-lock state cannot change concurrently, because single + * thread (one that released the last hold) carries unlocking + * to the completion. */ + lls = &lck->lls_sub[i]; + sub = lls->sub_lock; + if (sub == NULL) + continue; + + sublock = sub->lss_cl.cls_lock; + rc = lov_sublock_lock(env, lck, lls, closure, &subenv); + if (rc == 0) { + if (lls->sub_flags & LSF_HELD) { + if (sublock->cll_state == CLS_HELD) { + rc = cl_unuse_try(subenv->lse_env, + sublock); + lov_sublock_release(env, lck, i, 0, 0); + } else { + lov_sublock_release(env, lck, i, 1, 0); + } + } + lov_sublock_unlock(env, sub, closure, subenv); + } + result = lov_subresult(result, rc); + if (result < 0) + break; + } + + cl_lock_closure_fini(closure); + + return; +} + static int lov_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice) { @@ -1049,6 +1104,7 @@ static const struct cl_lock_operations lov_lock_ops = { .clo_wait = lov_lock_wait, .clo_use = lov_lock_use, .clo_unuse = lov_lock_unuse, + .clo_cancel = lov_lock_cancel, .clo_fits_into = lov_lock_fits_into, .clo_delete = lov_lock_delete, .clo_print = lov_lock_print diff --git a/lustre/lov/lovsub_lock.c b/lustre/lov/lovsub_lock.c index 66b6989..baceb9b 100644 --- a/lustre/lov/lovsub_lock.c +++ b/lustre/lov/lovsub_lock.c @@ -412,8 +412,9 @@ static int lovsub_lock_delete_one(const struct lu_env *env, break; case CLS_HELD: default: - CERROR("Impossible state: %i\n", parent->cll_state); - LBUG(); + LASSERTF(parent->cll_error != 0, "cll state %d is wrong!\n", + parent->cll_state); + break; } RETURN(result); diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index fffe551..e386396 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -323,12 +323,11 @@ static int cl_lockset_lock_one(const struct lu_env *env, ENTRY; - lock = cl_lock_request(env, io, &link->cill_descr, link->cill_enq_flags, - "io", io); + lock = cl_lock_request(env, io, &link->cill_descr, "io", io); if (!IS_ERR(lock)) { link->cill_lock = lock; list_move(&link->cill_linkage, &set->cls_curr); - if (!(link->cill_enq_flags & CEF_ASYNC)) { + if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) { result = cl_wait(env, lock); if (result == 0) list_move(&link->cill_linkage, &set->cls_done); @@ -573,7 +572,7 @@ static void cl_free_io_lock_link(const struct lu_env *env, * Allocates new lock link, and uses it to add a lock to a lockset. */ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr, int enqflags) + struct cl_lock_descr *descr) { struct cl_io_lock_link *link; int result; @@ -582,7 +581,6 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, OBD_ALLOC_PTR(link); if (link != NULL) { link->cill_descr = *descr; - link->cill_enq_flags = enqflags; link->cill_fini = cl_free_io_lock_link; result = cl_io_lock_add(env, io, link); if (result) /* lock match */ diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 8db4297..f9a1925 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -1787,6 +1787,7 @@ struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but * not PHANTOM */ need->cld_start = need->cld_end = page->cp_index; + need->cld_enq_flags = 0; spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one @@ -2071,7 +2072,6 @@ EXPORT_SYMBOL(cl_lock_hold); */ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, - __u32 enqflags, const char *scope, const void *source) { struct cl_lock *lock; @@ -2079,6 +2079,7 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, int rc; int iter; int warn; + __u32 enqflags = need->cld_enq_flags; ENTRY; fid = lu_object_fid(&io->ci_obj->co_lu); diff --git a/lustre/obdecho/echo_client.c b/lustre/obdecho/echo_client.c index 504196a..3f54f2e 100644 --- a/lustre/obdecho/echo_client.c +++ b/lustre/obdecho/echo_client.c @@ -995,10 +995,10 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco, descr->cld_start = cl_index(obj, start); descr->cld_end = cl_index(obj, end); descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ; + descr->cld_enq_flags = CEF_ASYNC | enqflags; io->ci_obj = obj; - lck = cl_lock_request(env, io, descr, CEF_ASYNC | enqflags, - "ec enqueue", eco); + lck = cl_lock_request(env, io, descr, "ec enqueue", eco); if (lck) { struct echo_client_obd *ec = eco->eo_dev->ed_ec; struct echo_lock *el; diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index 6ca2014..1b5f411 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -1576,24 +1576,40 @@ static int osc_lock_fits_into(const struct lu_env *env, { struct osc_lock *ols = cl2osc_lock(slice); - /* If the lock hasn't ever enqueued, it can't be matched because - * enqueue process brings in many information which can be used to - * determine things such as lockless, CEF_MUST, etc. - */ - if (ols->ols_state < OLS_ENQUEUED) - return 0; - - /* Don't match this lock if the lock is able to become lockless lock. - * This is because the new lock might be covering a mmap region and - * so that it must have a cached at the local side. */ - if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable) - return 0; - - /* If the lock is going to be canceled, no reason to match it as well */ - if (ols->ols_state > OLS_RELEASED) + if (need->cld_enq_flags & CEF_NEVER) return 0; - /* go for it. */ + if (need->cld_mode == CLM_PHANTOM) { + /* + * Note: the QUEUED lock can't be matched here, otherwise + * it might cause the deadlocks. + * In read_process, + * P1: enqueued read lock, create sublock1 + * P2: enqueued write lock, create sublock2(conflicted + * with sublock1). + * P1: Grant read lock. + * P1: enqueued glimpse lock(with holding sublock1_read), + * matched with sublock2, waiting sublock2 to be granted. + * But sublock2 can not be granted, because P1 + * will not release sublock1. Bang! + */ + if (ols->ols_state < OLS_GRANTED || + ols->ols_state > OLS_RELEASED) + return 0; + } else if (need->cld_enq_flags & CEF_MUST) { + /* + * If the lock hasn't ever enqueued, it can't be matched + * because enqueue process brings in many information + * which can be used to determine things such as lockless, + * CEF_MUST, etc. + */ + if (ols->ols_state < OLS_GRANTED || + ols->ols_state > OLS_RELEASED) + return 0; + if (ols->ols_state < OLS_UPCALL_RECEIVED && + ols->ols_locklessable) + return 0; + } return 1; }