X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcl_lock.c;h=de487f6e80aefeb80b97f2029ddfff7ea270c158;hb=f4831ab428de8468d8ef25ddb72d3e34b69040b0;hp=3e8b00483dea34425bf77793250b01b6395272d0;hpb=6e3ec5812ebd1b5ecf7cae584f429b013ffe7431;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 3e8b004..de487f6 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -77,9 +77,7 @@ static struct lu_kmem_descr cl_lock_caches[] = { static int cl_lock_invariant_trusted(const struct lu_env *env, const struct cl_lock *lock) { - return - cl_is_lock(lock) && - ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && + return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) && cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds && lock->cll_holds >= lock->cll_users && lock->cll_holds >= 0 && @@ -132,8 +130,8 @@ static void cl_lock_trace0(int level, const struct lu_env *env, const char *func, const int line) { struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj); - CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)" - "(%p/%d/%i) at %s():%d\n", + CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)" + "(%p/%d/%d) at %s():%d\n", prefix, lock, cfs_atomic_read(&lock->cll_ref), lock->cll_guarder, lock->cll_depth, lock->cll_state, lock->cll_error, lock->cll_holds, @@ -158,9 +156,13 @@ static void cl_lock_lockdep_acquire(const struct lu_env *env, struct cl_lock *lock, __u32 enqflags) { cl_lock_counters(env, lock)->ctc_nr_locks_acquired++; +#ifdef HAVE_LOCK_MAP_ACQUIRE + lock_map_acquire(&lock->dep_map); +#else /* HAVE_LOCK_MAP_ACQUIRE */ lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC), /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ, /* check: */ 2, RETIP); +#endif /* HAVE_LOCK_MAP_ACQUIRE */ } static void cl_lock_lockdep_release(const struct lu_env *env, @@ -257,7 +259,6 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) { struct cl_object *obj = lock->cll_descr.cld_obj; - LASSERT(cl_is_lock(lock)); LINVRNT(!cl_lock_is_mutexed(lock)); ENTRY; @@ -348,7 +349,6 @@ void cl_lock_get_trust(struct cl_lock *lock) { struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj); - LASSERT(cl_is_lock(lock)); CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n", cfs_atomic_read(&lock->cll_ref), lock, RETIP); if (cfs_atomic_inc_return(&lock->cll_ref) == 1) @@ -509,13 +509,12 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env, cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) { int matched; - LASSERT(cl_is_lock(lock)); matched = cl_lock_ext_match(&lock->cll_descr, need) && lock->cll_state < CLS_FREEING && lock->cll_error == 0 && !(lock->cll_flags & CLF_CANCELLED) && cl_lock_fits_into(env, lock, need, io); - CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n", + CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n", PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need), matched); if (matched) { @@ -1221,6 +1220,51 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, } EXPORT_SYMBOL(cl_enqueue_try); +/** + * Cancel the conflicting lock found during previous enqueue. + * + * \retval 0 conflicting lock has been canceled. + * \retval -ve error code. + */ +int cl_lock_enqueue_wait(const struct lu_env *env, + struct cl_lock *lock, + int keep_mutex) +{ + struct cl_lock *conflict; + int rc = 0; + ENTRY; + + LASSERT(cl_lock_is_mutexed(lock)); + LASSERT(lock->cll_state == CLS_QUEUING); + LASSERT(lock->cll_conflict != NULL); + + conflict = lock->cll_conflict; + lock->cll_conflict = NULL; + + cl_lock_mutex_put(env, lock); + LASSERT(cl_lock_nr_mutexed(env) == 0); + + cl_lock_mutex_get(env, conflict); + cl_lock_cancel(env, conflict); + cl_lock_delete(env, conflict); + + while (conflict->cll_state != CLS_FREEING) { + rc = cl_lock_state_wait(env, conflict); + if (rc != 0) + break; + } + cl_lock_mutex_put(env, conflict); + lu_ref_del(&conflict->cll_reference, "cancel-wait", lock); + cl_lock_put(env, conflict); + + if (keep_mutex) + cl_lock_mutex_get(env, lock); + + LASSERT(rc <= 0); + RETURN(rc); +} +EXPORT_SYMBOL(cl_lock_enqueue_wait); + static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, struct cl_io *io, __u32 enqflags) { @@ -1236,7 +1280,10 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, do { result = cl_enqueue_try(env, lock, io, enqflags); if (result == CLO_WAIT) { - result = cl_lock_state_wait(env, lock); + if (lock->cll_conflict != NULL) + result = cl_lock_enqueue_wait(env, lock, 1); + else + result = cl_lock_state_wait(env, lock); if (result == 0) continue; } @@ -1460,7 +1507,6 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock) LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, "Wrong state %d \n", lock->cll_state); LASSERT(lock->cll_holds > 0); - cl_lock_trace(D_DLMTRACE, env, "wait lock", lock); do { result = cl_wait_try(env, lock); @@ -1476,6 +1522,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock) cl_lock_error(env, lock, result); cl_lock_lockdep_release(env, lock); } + cl_lock_trace(D_DLMTRACE, env, "wait lock", lock); cl_lock_mutex_put(env, lock); LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD)); RETURN(result); @@ -1868,7 +1915,7 @@ void cl_lock_page_list_fixup(const struct lu_env *env, page->cp_index < temp->cp_index)); found = cl_lock_at_page(env, lock->cll_descr.cld_obj, - page, lock, 0, 0); + page, lock, 1, 0); if (found == NULL) continue; @@ -1923,6 +1970,7 @@ int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock, struct cl_2queue *queue = &info->clt_queue; struct cl_lock_descr *descr = &lock->cll_descr; long page_count; + int nonblock = 1, resched; int result; LINVRNT(cl_lock_invariant(env, lock)); @@ -1930,13 +1978,14 @@ int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock, io->ci_obj = cl_object_top(descr->cld_obj); result = cl_io_init(env, io, CIT_MISC, io->ci_obj); - if (result == 0) { - int nonblock = 1; + if (result != 0) + GOTO(out, result); -restart: + do { cl_2queue_init(queue); cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start, - descr->cld_end, &queue->c2_qin, nonblock); + descr->cld_end, &queue->c2_qin, nonblock, + &resched); page_count = queue->c2_qin.pl_nr; if (page_count > 0) { result = cl_page_list_unmap(env, io, &queue->c2_qin); @@ -1959,11 +2008,10 @@ restart: } cl_2queue_fini(env, queue); - if (nonblock) { - nonblock = 0; - goto restart; - } - } + if (resched) + cfs_cond_resched(); + } while (resched || nonblock--); +out: cl_io_fini(env, io); RETURN(result); } @@ -2016,20 +2064,6 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) } EXPORT_SYMBOL(cl_locks_prune); -/** - * Returns true if \a addr is an address of an allocated cl_lock. Used in - * assertions. This check is optimistically imprecise, i.e., it occasionally - * returns true for the incorrect addresses, but if it returns false, then the - * address is guaranteed to be incorrect. (Should be named cl_lockp().) - * - * \see cl_is_page() - */ -int cl_is_lock(const void *addr) -{ - return cfs_mem_is_in_cache(addr, cl_lock_kmem); -} -EXPORT_SYMBOL(cl_is_lock); - static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, @@ -2088,14 +2122,10 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const char *scope, const void *source) { struct cl_lock *lock; - const struct lu_fid *fid; int rc; - int iter; __u32 enqflags = need->cld_enq_flags; ENTRY; - fid = lu_object_fid(&io->ci_obj->co_lu); - iter = 0; do { lock = cl_lock_hold_mutex(env, io, need, scope, source); if (!IS_ERR(lock)) { @@ -2117,7 +2147,6 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, lock = ERR_PTR(rc); } else rc = PTR_ERR(lock); - iter++; } while (rc == 0); RETURN(lock); }