From 51459371a2e146d01f9d2ef27fdf75d77639266f Mon Sep 17 00:00:00 2001 From: jxiong Date: Wed, 25 Nov 2009 02:10:39 +0000 Subject: [PATCH] b=19906 r=wangdi,ericm Revise osc_lock_cancel_wait. In clio, we tend to cancel the conflicting locks at the same client before enqueuing a new lock. In the old implementation, it waits at osc layer with parent lock held, this is not good - so I moved the wait to lov layer. --- lustre/include/cl_object.h | 6 +- lustre/lov/lov_lock.c | 77 +++++++++++++++-- lustre/lov/lovsub_lock.c | 54 ++---------- lustre/obdclass/cl_lock.c | 19 ----- lustre/osc/osc_cl_internal.h | 1 - lustre/osc/osc_lock.c | 194 +++++++++++++------------------------------ 6 files changed, 141 insertions(+), 210 deletions(-) diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 563edd1..9996eb3 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1570,6 +1570,10 @@ struct cl_lock { */ struct list_head cll_inclosure; /** + * Confict lock at queuing time. + */ + struct cl_lock *cll_conflict; + /** * A list of references to this lock, for debugging. */ struct lu_ref cll_reference; @@ -2783,8 +2787,6 @@ void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_compatible(const struct cl_lock *lock1, - const struct cl_lock *lock2); enum cl_lock_state cl_lock_intransit(const struct lu_env *env, struct cl_lock *lock); diff --git a/lustre/lov/lov_lock.c b/lustre/lov/lov_lock.c index fae9437..25e3d1b 100644 --- a/lustre/lov/lov_lock.c +++ b/lustre/lov/lov_lock.c @@ -492,6 +492,52 @@ static void lov_lock_fini(const struct lu_env *env, } /** + * + * \retval 0 if state-transition can proceed + * \retval -ve otherwise. + */ +static int lov_lock_enqueue_wait(const struct lu_env *env, + struct lov_lock *lck, + struct cl_lock *sublock) +{ + struct cl_lock *lock = lck->lls_cl.cls_lock; + struct cl_lock *conflict = sublock->cll_conflict; + int result = CLO_REPEAT; + ENTRY; + + LASSERT(cl_lock_is_mutexed(lock)); + LASSERT(cl_lock_is_mutexed(sublock)); + LASSERT(sublock->cll_state == CLS_QUEUING); + LASSERT(conflict != NULL); + + sublock->cll_conflict = NULL; + cl_lock_mutex_put(env, lock); + cl_lock_mutex_put(env, sublock); + + LASSERT(cl_lock_nr_mutexed(env) == 0); + + cl_lock_mutex_get(env, conflict); + cl_lock_cancel(env, conflict); + cl_lock_delete(env, conflict); + while (conflict->cll_state != CLS_FREEING) { + int rc = 0; + + rc = cl_lock_state_wait(env, conflict); + if (rc == 0) + continue; + + result = lov_subresult(result, rc); + break; + } + cl_lock_mutex_put(env, conflict); + lu_ref_del(&conflict->cll_reference, "cancel-wait", sublock); + cl_lock_put(env, conflict); + + cl_lock_mutex_get(env, lock); + RETURN(result); +} + +/** * Tries to advance a state machine of a given sub-lock toward enqueuing of * the top-lock. * @@ -617,13 +663,30 @@ static int lov_lock_enqueue(const struct lu_env *env, subenv->lse_io, enqflags, i == lck->lls_nr - 1); minstate = min(minstate, sublock->cll_state); - /* - * Don't hold a sub-lock in CLS_CACHED state, see - * description for lov_lock::lls_sub. - */ - if (sublock->cll_state > CLS_HELD) - rc = lov_sublock_release(env, lck, i, 1, rc); - lov_sublock_unlock(env, sub, closure, subenv); + if (rc == CLO_WAIT) { + switch (sublock->cll_state) { + case CLS_QUEUING: + /* take recursive mutex, the lock is + * released in lov_lock_enqueue_wait. + */ + cl_lock_mutex_get(env, sublock); + lov_sublock_unlock(env, sub, closure, + subenv); + rc = lov_lock_enqueue_wait(env, lck, + sublock); + break; + case CLS_CACHED: + rc = lov_sublock_release(env, lck, i, + 1, rc); + default: + lov_sublock_unlock(env, sub, closure, + subenv); + break; + } + } else { + LASSERT(sublock->cll_conflict == NULL); + lov_sublock_unlock(env, sub, closure, subenv); + } } result = lov_subresult(result, rc); if (result != 0) diff --git a/lustre/lov/lovsub_lock.c b/lustre/lov/lovsub_lock.c index 0d75bbe..663547a 100644 --- a/lustre/lov/lovsub_lock.c +++ b/lustre/lov/lovsub_lock.c @@ -88,40 +88,6 @@ static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov) EXIT; } -static int lovsub_lock_state_one(const struct lu_env *env, - const struct lovsub_lock *lovsub, - struct lov_lock *lov) -{ - struct cl_lock *parent; - struct cl_lock *child; - int restart = 0; - - ENTRY; - parent = lov->lls_cl.cls_lock; - child = lovsub->lss_cl.cls_lock; - - if (lovsub->lss_active != parent) { - lovsub_parent_lock(env, lov); - if (child->cll_error != 0 && parent->cll_error == 0) { - /* - * This is a deadlock case: - * cl_lock_error(for the parent lock) - * -> cl_lock_delete - * -> lov_lock_delete - * -> cl_lock_enclosure - * -> cl_lock_mutex_try(for the child lock) - */ - cl_lock_mutex_put(env, child); - cl_lock_error(env, parent, child->cll_error); - restart = 1; - } else { - cl_lock_signal(env, parent); - } - lovsub_parent_unlock(env, lov); - } - RETURN(restart); -} - /** * Implements cl_lock_operations::clo_state() method for lovsub layer, which * method is called whenever sub-lock state changes. Propagates state change @@ -133,22 +99,20 @@ static void lovsub_lock_state(const struct lu_env *env, { struct lovsub_lock *sub = cl2lovsub_lock(slice); struct lov_lock_link *scan; - int restart = 0; LASSERT(cl_lock_is_mutexed(slice->cls_lock)); ENTRY; - do { - restart = 0; - list_for_each_entry(scan, &sub->lss_parents, lll_list) { - restart = lovsub_lock_state_one(env, sub, - scan->lll_super); - if (restart) { - cl_lock_mutex_get(env, slice->cls_lock); - break; - } + list_for_each_entry(scan, &sub->lss_parents, lll_list) { + struct lov_lock *lov = scan->lll_super; + struct cl_lock *parent = lov->lls_cl.cls_lock; + + if (sub->lss_active != parent) { + lovsub_parent_lock(env, lov); + cl_lock_signal(env, parent); + lovsub_parent_unlock(env, lov); } - } while(restart); + } EXIT; } diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 50d44f5..190a941 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -2200,25 +2200,6 @@ int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock) } EXPORT_SYMBOL(cl_lock_user_del); -/** - * Check if two lock's mode are compatible. - * - * This returns true iff en-queuing \a lock2 won't cause cancellation of \a - * lock1 even when these locks overlap. - */ -int cl_lock_compatible(const struct cl_lock *lock1, const struct cl_lock *lock2) -{ - enum cl_lock_mode mode1; - enum cl_lock_mode mode2; - - ENTRY; - mode1 = lock1->cll_descr.cld_mode; - mode2 = lock2->cll_descr.cld_mode; - RETURN(mode2 == CLM_PHANTOM || - (mode1 == CLM_READ && mode2 == CLM_READ)); -} -EXPORT_SYMBOL(cl_lock_compatible); - const char *cl_lock_mode_name(const enum cl_lock_mode mode) { static const char *names[] = { diff --git a/lustre/osc/osc_cl_internal.h b/lustre/osc/osc_cl_internal.h index a96bd84..7e0dfe5 100644 --- a/lustre/osc/osc_cl_internal.h +++ b/lustre/osc/osc_cl_internal.h @@ -97,7 +97,6 @@ struct osc_thread_info { struct cl_lock_descr oti_descr; struct cl_attr oti_attr; struct lustre_handle oti_handle; - struct cl_lock_closure oti_closure; struct cl_page_list oti_plist; }; diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index 67baec8..8cb79b1 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -950,86 +950,6 @@ static void osc_lock_build_einfo(const struct lu_env *env, einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ } -static int osc_lock_delete0(struct cl_lock *conflict) -{ - struct cl_env_nest nest; - struct lu_env *env; - int rc = 0; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - cl_lock_delete(env, conflict); - cl_env_nested_put(&nest, env); - } else - rc = PTR_ERR(env); - return rc; -} -/** - * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This - * is called as a part of enqueuing to cancel conflicting locks early. - * - * \retval 0: success, \a conflict was cancelled and destroyed. - * - * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was - * released in the process. Repeat enqueing. - * - * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and - * either \a lock is non-blocking, or current thread - * holds other locks, that prevent it from waiting - * for cancel to complete. - * - * \retval -ve: other error, including -EINTR. - * - */ -static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock *conflict, int canwait) -{ - int rc; - - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(cl_lock_is_mutexed(conflict)); - - rc = 0; - if (conflict->cll_state != CLS_FREEING) { - cl_lock_cancel(env, conflict); - rc = osc_lock_delete0(conflict); - if (rc) - return rc; - if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) { - rc = -EWOULDBLOCK; - if (cl_lock_nr_mutexed(env) > 2) - /* - * If mutices of locks other than @lock and - * @scan are held by the current thread, it - * cannot wait on @scan state change in a - * dead-lock safe matter, so simply skip early - * cancellation in this case. - * - * This means that early cancellation doesn't - * work when there is even slight mutex - * contention, as top-lock's mutex is usually - * held at this time. - */ - ; - else if (canwait) { - /* Waiting for @scan to be destroyed */ - cl_lock_mutex_put(env, lock); - do { - rc = cl_lock_state_wait(env, conflict); - } while (!rc && - conflict->cll_state < CLS_FREEING); - /* mutex was released, repeat enqueue. */ - rc = rc ?: CLO_REPEAT; - cl_lock_mutex_get(env, lock); - } - } - LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING)); - CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n", - conflict, rc ? "not":"", rc); - } - return rc; -} - /** * Determine if the lock should be converted into a lockless lock. * @@ -1085,6 +1005,28 @@ static void osc_lock_to_lockless(const struct lu_env *env, LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); } +static int osc_lock_compatible(const struct osc_lock *qing, + const struct osc_lock *qed) +{ + enum cl_lock_mode qing_mode; + enum cl_lock_mode qed_mode; + + qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode; + if (qed->ols_glimpse && + (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ)) + return 1; + + qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode; + return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ)); +} + +#ifndef list_for_each_entry_continue +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) +#endif + /** * Cancel all conflicting locks and wait for them to be destroyed. * @@ -1102,36 +1044,29 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock = olck->ols_cl.cls_lock; struct cl_lock_descr *descr = &lock->cll_descr; struct cl_object_header *hdr = cl_object_header(descr->cld_obj); - struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure; - struct cl_lock *scan; - struct cl_lock *temp; + struct cl_lock *scan = lock; + struct cl_lock *conflict= NULL; int lockless = osc_lock_is_lockless(olck); int rc = 0; - int canwait; - int stop; ENTRY; LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - /* - * XXX This function could be sped up if we had asynchronous - * cancellation. - */ + /* make it enqueue anyway for glimpse lock, because we actually + * don't need to cancel any conflicting locks. */ + if (olck->ols_glimpse) + return 0; - canwait = - !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) && - cl_lock_nr_mutexed(env) == 1; - cl_lock_closure_init(env, closure, lock, canwait); spin_lock(&hdr->coh_lock_guard); - list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) { - if (scan == lock) - continue; + list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) { + struct cl_lock_descr *cld = &scan->cll_descr; + const struct osc_lock *scan_ols; if (scan->cll_state < CLS_QUEUING || scan->cll_state == CLS_FREEING || - scan->cll_descr.cld_start > descr->cld_end || - scan->cll_descr.cld_end < descr->cld_start) + cld->cld_start > descr->cld_end || + cld->cld_end < descr->cld_start) continue; /* overlapped and living locks. */ @@ -1143,52 +1078,39 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, continue; } - /* A tricky case for lockless pages: - * We need to cancel the compatible locks if we're enqueuing + scan_ols = osc_lock_at(scan); + + /* We need to cancel the compatible locks if we're enqueuing * a lockless lock, for example: * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. - */ - if (!lockless && cl_lock_compatible(scan, lock)) + * wrong, because these data are possibly stale. */ + if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; /* Now @scan is conflicting with @lock, this means current * thread have to sleep for @scan being destroyed. */ - cl_lock_get_trust(scan); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_get_trust(temp); - spin_unlock(&hdr->coh_lock_guard); - lu_ref_add(&scan->cll_reference, "cancel-wait", lock); - - LASSERT(list_empty(&closure->clc_list)); - rc = cl_lock_closure_build(env, scan, closure); - if (rc == 0) { - rc = osc_lock_cancel_wait(env, lock, scan, canwait); - cl_lock_disclosure(env, closure); - if (rc == -EWOULDBLOCK) - rc = 0; + if (scan_ols->ols_owner == osc_env_io(env)) { + CERROR("DEADLOCK POSSIBLE!\n"); + CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n"); + CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n"); + libcfs_debug_dumpstack(NULL); } - if (rc == CLO_REPEAT && !canwait) - /* cannot wait... no early cancellation. */ - rc = 0; - - lu_ref_del(&scan->cll_reference, "cancel-wait", lock); - cl_lock_put(env, scan); - spin_lock(&hdr->coh_lock_guard); - /* - * Lock list could have been modified, while spin-lock was - * released. Check that it is safe to continue. - */ - stop = list_empty(&temp->cll_linkage); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_put(env, temp); - if (stop || rc != 0) - break; + cl_lock_get_trust(scan); + conflict = scan; + break; } spin_unlock(&hdr->coh_lock_guard); - cl_lock_closure_fini(closure); + + if (conflict) { + CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n", + lock, conflict); + lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); + LASSERT(lock->cll_conflict == NULL); + lock->cll_conflict = conflict; + rc = CLO_WAIT; + } RETURN(rc); } @@ -1290,12 +1212,12 @@ static int osc_lock_enqueue(const struct lu_env *env, ols->ols_flags |= LDLM_FL_BLOCK_GRANTED; if (ols->ols_flags & LDLM_FL_HAS_INTENT) ols->ols_glimpse = 1; + if (!(enqflags & CEF_MUST)) + /* try to convert this lock to a lockless lock */ + osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { - if (!(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); if (!osc_lock_is_lockless(ols)) { if (ols->ols_locklessable) ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; -- 1.8.3.1