X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fosc%2Fosc_lock.c;h=0d4e3830641a750bfcec9f1ecacc8b0d519e8177;hb=7d2a94e40959a63d97ca4adc9a5d7a33511faa55;hp=f611bf6db40ea15cbe0c8ac07de1a76eff3840b0;hpb=5f19eae46092014fdf7bd8a643de14aec8fb63c1;p=fs%2Flustre-release.git diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index f611bf6..0d4e383 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -38,8 +38,6 @@ * Author: Nikita Danilov */ -/** \addtogroup osc osc @{ */ - #define DEBUG_SUBSYSTEM S_OSC #ifdef __KERNEL__ @@ -52,6 +50,10 @@ #include "osc_cl_internal.h" +/** \addtogroup osc + * @{ + */ + /***************************************************************************** * * Type conversions. @@ -62,6 +64,7 @@ static const struct cl_lock_operations osc_lock_ops; static const struct cl_lock_operations osc_lock_lockless_ops; static void osc_lock_to_lockless(const struct lu_env *env, struct osc_lock *ols, int force); +static int osc_lock_has_pages(struct osc_lock *olck); int osc_lock_is_lockless(const struct osc_lock *olck) { @@ -132,10 +135,10 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) { struct ldlm_lock *dlmlock; - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; if (dlmlock == NULL) { - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); return; } @@ -144,7 +147,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) * call to osc_lock_detach() */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); lock_res_and_lock(dlmlock); if (dlmlock->l_granted_mode == dlmlock->l_req_mode) { @@ -164,15 +167,28 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) unlock_res_and_lock(dlmlock); /* release a reference taken in osc_lock_upcall0(). */ + LASSERT(olck->ols_has_ref); lu_ref_del(&dlmlock->l_reference, "osc_lock", olck); LDLM_LOCK_RELEASE(dlmlock); + olck->ols_has_ref = 0; +} + +static int osc_lock_unhold(struct osc_lock *ols) +{ + int result = 0; + + if (ols->ols_hold) { + ols->ols_hold = 0; + result = osc_cancel_base(&ols->ols_handle, + ols->ols_einfo.ei_mode); + } + return result; } static int osc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice) { struct osc_lock *ols = cl2osc_lock(slice); - int result; LASSERT(ols->ols_state == OLS_GRANTED || ols->ols_state == OLS_UPCALL_RECEIVED); @@ -190,10 +206,7 @@ static int osc_lock_unuse(const struct lu_env *env, * e.g., for liblustre) sees that lock is released. */ ols->ols_state = OLS_RELEASED; - ols->ols_hold = 0; - result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode); - ols->ols_has_ref = 0; - return result; + return osc_lock_unhold(ols); } static void osc_lock_fini(const struct lu_env *env, @@ -208,8 +221,7 @@ static void osc_lock_fini(const struct lu_env *env, * to the lock), before reply from a server was received. In this case * lock is destroyed immediately after upcall. */ - if (ols->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(ols); LASSERT(ols->ols_lock == NULL); OBD_SLAB_FREE_PTR(ols, osc_lock_kmem); @@ -242,6 +254,7 @@ static void osc_lock_build_policy(const struct lu_env *env, const struct cl_lock_descr *d = &lock->cll_descr; osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end); + policy->l_extent.gid = d->cld_gid; } static int osc_enq2ldlm_flags(__u32 enqflags) @@ -263,14 +276,14 @@ static int osc_enq2ldlm_flags(__u32 enqflags) * Global spin-lock protecting consistency of ldlm_lock::l_ast_data * pointers. Initialized in osc_init(). */ -spinlock_t osc_ast_guard; +cfs_spinlock_t osc_ast_guard; static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) { struct osc_lock *olck; lock_res_and_lock(dlm_lock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; if (olck != NULL) { struct cl_lock *lock = olck->ols_cl.cls_lock; @@ -290,7 +303,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) } else olck = NULL; } - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); unlock_res_and_lock(dlm_lock); return olck; } @@ -405,6 +418,7 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode); descr->cld_start = cl_index(descr->cld_obj, ext->start); descr->cld_end = cl_index(descr->cld_obj, ext->end); + descr->cld_gid = ext->gid; /* * tell upper layers the extent of the lock that was actually * granted @@ -437,11 +451,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) LASSERT(dlmlock != NULL); lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); LASSERT(olck->ols_lock == NULL); olck->ols_lock = dlmlock; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); /* * Lock might be not yet granted. In this case, completion ast @@ -457,11 +471,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) * this. */ ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode); - olck->ols_hold = olck->ols_has_ref = 1; + olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by * osc_lock and released in osc_lock_detach() */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); + olck->ols_has_ref = 1; } /** @@ -500,11 +515,11 @@ static int osc_lock_upcall(void *cookie, int errcode) dlmlock = ldlm_handle2lock(&olck->ols_handle); if (dlmlock != NULL) { lock_res_and_lock(dlmlock); - spin_lock(&osc_ast_guard); + cfs_spin_lock(&osc_ast_guard); LASSERT(olck->ols_lock == NULL); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; - spin_unlock(&osc_ast_guard); + cfs_spin_unlock(&osc_ast_guard); unlock_res_and_lock(dlmlock); LDLM_LOCK_PUT(dlmlock); } @@ -564,12 +579,11 @@ static void osc_lock_blocking(const struct lu_env *env, CLASSERT(OLS_BLOCKED < OLS_CANCELLED); LASSERT(!osc_lock_is_lockless(olck)); - if (olck->ols_hold) - /* - * Lock might be still addref-ed here, if e.g., blocking ast - * is sent for a failed lock. - */ - osc_lock_unuse(env, &olck->ols_cl); + /* + * Lock might be still addref-ed here, if e.g., blocking ast + * is sent for a failed lock. + */ + osc_lock_unhold(olck); if (blocking && olck->ols_state < OLS_BLOCKED) /* @@ -766,7 +780,7 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, LASSERT(dlmlock->l_lvb_data != NULL); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) + if (olck->ols_lock == NULL) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -776,12 +790,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, * and ldlm_lock are always bound when * osc_lock is in OLS_GRANTED state. */ - ; - else if (dlmlock->l_granted_mode != LCK_MINMODE) + } else if (dlmlock->l_granted_mode == + dlmlock->l_req_mode) { osc_lock_granted(env, olck, dlmlock, dlmrc); + } unlock_res_and_lock(dlmlock); - if (dlmrc != 0) + + if (dlmrc != 0) { + CL_LOCK_DEBUG(D_ERROR, env, lock, + "dlmlock returned %d\n", dlmrc); cl_lock_error(env, lock, dlmrc); + } cl_lock_mutex_put(env, lock); osc_ast_data_put(env, olck); result = 0; @@ -871,7 +890,7 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock) unsigned long weight; ENTRY; - might_sleep(); + cfs_might_sleep(); /* * osc_ldlm_weigh_ast has a complex context since it might be called * because of lock canceling, or from user's input. We have to make @@ -931,86 +950,6 @@ static void osc_lock_build_einfo(const struct lu_env *env, einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */ } -static int osc_lock_delete0(struct cl_lock *conflict) -{ - struct cl_env_nest nest; - struct lu_env *env; - int rc = 0; - - env = cl_env_nested_get(&nest); - if (!IS_ERR(env)) { - cl_lock_delete(env, conflict); - cl_env_nested_put(&nest, env); - } else - rc = PTR_ERR(env); - return rc; -} -/** - * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This - * is called as a part of enqueuing to cancel conflicting locks early. - * - * \retval 0: success, \a conflict was cancelled and destroyed. - * - * \retval CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was - * released in the process. Repeat enqueing. - * - * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and - * either \a lock is non-blocking, or current thread - * holds other locks, that prevent it from waiting - * for cancel to complete. - * - * \retval -ve: other error, including -EINTR. - * - */ -static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock *conflict, int canwait) -{ - int rc; - - LASSERT(cl_lock_is_mutexed(lock)); - LASSERT(cl_lock_is_mutexed(conflict)); - - rc = 0; - if (conflict->cll_state != CLS_FREEING) { - cl_lock_cancel(env, conflict); - rc = osc_lock_delete0(conflict); - if (rc) - return rc; - if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) { - rc = -EWOULDBLOCK; - if (cl_lock_nr_mutexed(env) > 2) - /* - * If mutices of locks other than @lock and - * @scan are held by the current thread, it - * cannot wait on @scan state change in a - * dead-lock safe matter, so simply skip early - * cancellation in this case. - * - * This means that early cancellation doesn't - * work when there is even slight mutex - * contention, as top-lock's mutex is usually - * held at this time. - */ - ; - else if (canwait) { - /* Waiting for @scan to be destroyed */ - cl_lock_mutex_put(env, lock); - do { - rc = cl_lock_state_wait(env, conflict); - } while (!rc && - conflict->cll_state < CLS_FREEING); - /* mutex was released, repeat enqueue. */ - rc = rc ?: CLO_REPEAT; - cl_lock_mutex_get(env, lock); - } - } - LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING)); - CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n", - conflict, rc ? "not":"", rc); - } - return rc; -} - /** * Determine if the lock should be converted into a lockless lock. * @@ -1049,14 +988,14 @@ static void osc_lock_to_lockless(const struct lu_env *env, io->ci_lockreq == CILR_NEVER); ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data; - ols->ols_locklessable = (io->ci_type != CIT_TRUNC) && + ols->ols_locklessable = (io->ci_type != CIT_SETATTR) && (io->ci_lockreq == CILR_MAYBE) && (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK); if (io->ci_lockreq == CILR_NEVER || /* lockless IO */ (ols->ols_locklessable && osc_object_is_contended(oob)) || /* lockless truncate */ - (io->ci_type == CIT_TRUNC && + (cl_io_is_trunc(io) && (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) && osd->od_lockless_truncate)) { ols->ols_locklessable = 1; @@ -1066,6 +1005,21 @@ static void osc_lock_to_lockless(const struct lu_env *env, LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); } +static int osc_lock_compatible(const struct osc_lock *qing, + const struct osc_lock *qed) +{ + enum cl_lock_mode qing_mode; + enum cl_lock_mode qed_mode; + + qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode; + if (qed->ols_glimpse && + (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ)) + return 1; + + qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode; + return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ)); +} + /** * Cancel all conflicting locks and wait for them to be destroyed. * @@ -1083,151 +1037,80 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock = olck->ols_cl.cls_lock; struct cl_lock_descr *descr = &lock->cll_descr; struct cl_object_header *hdr = cl_object_header(descr->cld_obj); - struct cl_lock_closure *closure = &osc_env_info(env)->oti_closure; - struct cl_lock *scan; - struct cl_lock *temp; + struct cl_lock *scan = lock; + struct cl_lock *conflict= NULL; int lockless = osc_lock_is_lockless(olck); int rc = 0; - int canwait; - int stop; ENTRY; LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - /* - * XXX This function could be sped up if we had asynchronous - * cancellation. - */ + /* make it enqueue anyway for glimpse lock, because we actually + * don't need to cancel any conflicting locks. */ + if (olck->ols_glimpse) + return 0; - canwait = - !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) && - cl_lock_nr_mutexed(env) == 1; - cl_lock_closure_init(env, closure, lock, canwait); - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) { - if (scan == lock) - continue; + cfs_spin_lock(&hdr->coh_lock_guard); + cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) { + struct cl_lock_descr *cld = &scan->cll_descr; + const struct osc_lock *scan_ols; if (scan->cll_state < CLS_QUEUING || scan->cll_state == CLS_FREEING || - scan->cll_descr.cld_start > descr->cld_end || - scan->cll_descr.cld_end < descr->cld_start) + cld->cld_start > descr->cld_end || + cld->cld_end < descr->cld_start) continue; /* overlapped and living locks. */ - /* A tricky case for lockless pages: - * We need to cancel the compatible locks if we're enqueuing + + /* We're not supposed to give up group lock. */ + if (scan->cll_descr.cld_mode == CLM_GROUP) { + LASSERT(descr->cld_mode != CLM_GROUP || + descr->cld_gid != scan->cll_descr.cld_gid); + continue; + } + + scan_ols = osc_lock_at(scan); + + /* We need to cancel the compatible locks if we're enqueuing * a lockless lock, for example: * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. - */ - if (!lockless && cl_lock_compatible(scan, lock)) + * wrong, because these data are possibly stale. */ + if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; /* Now @scan is conflicting with @lock, this means current * thread have to sleep for @scan being destroyed. */ - cl_lock_get_trust(scan); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_get_trust(temp); - spin_unlock(&hdr->coh_lock_guard); - lu_ref_add(&scan->cll_reference, "cancel-wait", lock); - - LASSERT(list_empty(&closure->clc_list)); - rc = cl_lock_closure_build(env, scan, closure); - if (rc == 0) { - rc = osc_lock_cancel_wait(env, lock, scan, canwait); - cl_lock_disclosure(env, closure); - if (rc == -EWOULDBLOCK) - rc = 0; + if (scan_ols->ols_owner == osc_env_io(env)) { + CERROR("DEADLOCK POSSIBLE!\n"); + CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n"); + CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n"); + libcfs_debug_dumpstack(NULL); } - if (rc == CLO_REPEAT && !canwait) - /* cannot wait... no early cancellation. */ - rc = 0; - - lu_ref_del(&scan->cll_reference, "cancel-wait", lock); - cl_lock_put(env, scan); - spin_lock(&hdr->coh_lock_guard); - /* - * Lock list could have been modified, while spin-lock was - * released. Check that it is safe to continue. - */ - stop = list_empty(&temp->cll_linkage); - if (&temp->cll_linkage != &hdr->coh_locks) - cl_lock_put(env, temp); - if (stop || rc != 0) - break; + cl_lock_get_trust(scan); + conflict = scan; + break; } - spin_unlock(&hdr->coh_lock_guard); - cl_lock_closure_fini(closure); - RETURN(rc); -} - -/** - * Deadlock avoidance for osc_lock_enqueue(). Consider following scenario: - * - * - Thread0: obtains PR:[0, 10]. Lock is busy. - * - * - Thread1: enqueues PW:[5, 50]. Blocking ast is sent to - * PR:[0, 10], but cancellation of busy lock is postponed. - * - * - Thread0: enqueue PR:[30, 40]. Lock is locally matched to - * PW:[5, 50], and thread0 waits for the lock completion never - * releasing PR:[0, 10]---deadlock. - * - * The second PR lock can be glimpse (it is to deal with that situation that - * ll_glimpse_size() has second argument, preventing local match of - * not-yet-granted locks, see bug 10295). Similar situation is possible in the - * case of memory mapped user level buffer. - * - * To prevent this we can detect a situation when current "thread" or "io" - * already holds a lock on this object and either add LDLM_FL_BLOCK_GRANTED to - * the ols->ols_flags, or prevent local match with PW locks. - */ -static int osc_deadlock_is_possible(const struct lu_env *env, - struct cl_lock *lock) -{ - struct cl_object *obj; - struct cl_object_header *head; - struct cl_lock *scan; - struct osc_io *oio; - - int result; - - ENTRY; - - LASSERT(cl_lock_is_mutexed(lock)); - - oio = osc_env_io(env); - obj = lock->cll_descr.cld_obj; - head = cl_object_header(obj); - - result = 0; - spin_lock(&head->coh_lock_guard); - list_for_each_entry(scan, &head->coh_locks, cll_linkage) { - if (scan != lock) { - struct osc_lock *oscan; - - oscan = osc_lock_at(scan); - LASSERT(oscan != NULL); - if (oscan->ols_owner == oio) { - result = 1; - break; - } - } + cfs_spin_unlock(&hdr->coh_lock_guard); + + if (conflict) { + CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n", + lock, conflict); + lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); + LASSERT(lock->cll_conflict == NULL); + lock->cll_conflict = conflict; + rc = CLO_WAIT; } - spin_unlock(&head->coh_lock_guard); - RETURN(result); + RETURN(rc); } /** * Implementation of cl_lock_operations::clo_enqueue() method for osc * layer. This initiates ldlm enqueue: * - * - checks for possible dead-lock conditions (osc_deadlock_is_possible()); - * * - cancels conflicting locks early (osc_lock_enqueue_wait()); * * - calls osc_enqueue_base() to do actual enqueue. @@ -1240,7 +1123,7 @@ static int osc_deadlock_is_possible(const struct lu_env *env, */ static int osc_lock_enqueue(const struct lu_env *env, const struct cl_lock_slice *slice, - struct cl_io *_, __u32 enqflags) + struct cl_io *unused, __u32 enqflags) { struct osc_lock *ols = cl2osc_lock(slice); struct cl_lock *lock = ols->ols_cl.cls_lock; @@ -1259,16 +1142,14 @@ static int osc_lock_enqueue(const struct lu_env *env, osc_lock_build_res(env, obj, resname); osc_lock_build_policy(env, lock, policy); ols->ols_flags = osc_enq2ldlm_flags(enqflags); - if (osc_deadlock_is_possible(env, lock)) - ols->ols_flags |= LDLM_FL_BLOCK_GRANTED; if (ols->ols_flags & LDLM_FL_HAS_INTENT) ols->ols_glimpse = 1; + if (!(enqflags & CEF_MUST)) + /* try to convert this lock to a lockless lock */ + osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { - if (!(enqflags & CEF_MUST)) - /* try to convert this lock to a lockless lock */ - osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER)); if (!osc_lock_is_lockless(ols)) { if (ols->ols_locklessable) ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; @@ -1297,6 +1178,7 @@ static int osc_lock_enqueue(const struct lu_env *env, } } else { ols->ols_state = OLS_GRANTED; + ols->ols_owner = osc_env_io(env); } } LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols))); @@ -1330,13 +1212,14 @@ static int osc_lock_use(const struct lu_env *env, int rc; LASSERT(!olck->ols_hold); + /* * Atomically check for LDLM_FL_CBPENDING and addref a lock if this * flag is not set. This protects us from a concurrent blocking ast. */ rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode); if (rc == 0) { - olck->ols_hold = olck->ols_has_ref = 1; + olck->ols_hold = 1; olck->ols_state = OLS_GRANTED; } else { struct cl_lock *lock; @@ -1348,9 +1231,8 @@ static int osc_lock_use(const struct lu_env *env, * cl_lock mutex. */ lock = slice->cls_lock; - LASSERT(lock->cll_state == CLS_CACHED); + LASSERT(lock->cll_state == CLS_INTRANSIT); LASSERT(lock->cll_users > 0); - LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING); /* set a flag for osc_dlm_blocking_ast0() to signal the * lock.*/ olck->ols_ast_wait = 1; @@ -1372,8 +1254,10 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) cl_env_nested_put(&nest, env); } else result = PTR_ERR(env); - if (result == 0) + if (result == 0) { ols->ols_flush = 1; + LINVRNT(!osc_lock_has_pages(ols)); + } return result; } @@ -1408,8 +1292,7 @@ static void osc_lock_cancel(const struct lu_env *env, discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA; result = osc_lock_flush(olck, discard); - if (olck->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(olck); lock_res_and_lock(dlmlock); /* Now that we're the only user of dlm read/write reference, @@ -1463,12 +1346,13 @@ static int osc_lock_has_pages(struct osc_lock *olck) plist = &osc_env_info(env)->oti_plist; cl_page_list_init(plist); - mutex_lock(&oob->oo_debug_mutex); + cfs_mutex_lock(&oob->oo_debug_mutex); io->ci_obj = cl_object_top(obj); cl_io_init(env, io, CIT_MISC, io->ci_obj); cl_page_gang_lookup(env, obj, io, - descr->cld_start, descr->cld_end, plist); + descr->cld_start, descr->cld_end, plist, 0, + NULL); cl_lock_page_list_fixup(env, io, lock, plist); if (plist->pl_nr > 0) { CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n"); @@ -1479,14 +1363,17 @@ static int osc_lock_has_pages(struct osc_lock *olck) cl_page_list_disown(env, io, plist); cl_page_list_fini(env, plist); cl_io_fini(env, io); - mutex_unlock(&oob->oo_debug_mutex); + cfs_mutex_unlock(&oob->oo_debug_mutex); cl_env_nested_put(&nest, env); } else result = 0; return result; } #else -# define osc_lock_has_pages(olck) (0) +static int osc_lock_has_pages(struct osc_lock *olck) +{ + return 0; +} #endif /* INVARIANT_CHECK */ static void osc_lock_delete(const struct lu_env *env, @@ -1495,11 +1382,16 @@ static void osc_lock_delete(const struct lu_env *env, struct osc_lock *olck; olck = cl2osc_lock(slice); + if (olck->ols_glimpse) { + LASSERT(!olck->ols_hold); + LASSERT(!olck->ols_lock); + return; + } + LINVRNT(osc_lock_invariant(olck)); LINVRNT(!osc_lock_has_pages(olck)); - if (olck->ols_hold) - osc_lock_unuse(env, slice); + osc_lock_unhold(olck); osc_lock_detach(env, olck); } @@ -1518,13 +1410,14 @@ static void osc_lock_state(const struct lu_env *env, enum cl_lock_state state) { struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); /* * XXX multiple io contexts can use the lock at the same time. */ LINVRNT(osc_lock_invariant(lock)); if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { + struct osc_io *oio = osc_env_io(env); + LASSERT(lock->ols_owner == NULL); lock->ols_owner = oio; } else if (state != CLS_HELD) @@ -1539,13 +1432,54 @@ static int osc_lock_print(const struct lu_env *env, void *cookie, /* * XXX print ldlm lock and einfo properly. */ - (*p)(env, cookie, "%p %08x "LPU64" %d %p ", + (*p)(env, cookie, "%p %08x "LPX64" %d %p ", lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie, lock->ols_state, lock->ols_owner); osc_lvb_print(env, cookie, p, &lock->ols_lvb); return 0; } +static int osc_lock_fits_into(const struct lu_env *env, + const struct cl_lock_slice *slice, + const struct cl_lock_descr *need, + const struct cl_io *io) +{ + struct osc_lock *ols = cl2osc_lock(slice); + + if (need->cld_enq_flags & CEF_NEVER) + return 0; + + if (need->cld_mode == CLM_PHANTOM) { + /* + * Note: the QUEUED lock can't be matched here, otherwise + * it might cause the deadlocks. + * In read_process, + * P1: enqueued read lock, create sublock1 + * P2: enqueued write lock, create sublock2(conflicted + * with sublock1). + * P1: Grant read lock. + * P1: enqueued glimpse lock(with holding sublock1_read), + * matched with sublock2, waiting sublock2 to be granted. + * But sublock2 can not be granted, because P1 + * will not release sublock1. Bang! + */ + if (ols->ols_state < OLS_GRANTED || + ols->ols_state > OLS_RELEASED) + return 0; + } else if (need->cld_enq_flags & CEF_MUST) { + /* + * If the lock hasn't ever enqueued, it can't be matched + * because enqueue process brings in many information + * which can be used to determine things such as lockless, + * CEF_MUST, etc. + */ + if (ols->ols_state < OLS_UPCALL_RECEIVED && + ols->ols_locklessable) + return 0; + } + return 1; +} + static const struct cl_lock_operations osc_lock_ops = { .clo_fini = osc_lock_fini, .clo_enqueue = osc_lock_enqueue, @@ -1556,12 +1490,13 @@ static const struct cl_lock_operations osc_lock_ops = { .clo_state = osc_lock_state, .clo_cancel = osc_lock_cancel, .clo_weigh = osc_lock_weigh, - .clo_print = osc_lock_print + .clo_print = osc_lock_print, + .clo_fits_into = osc_lock_fits_into, }; static int osc_lock_lockless_enqueue(const struct lu_env *env, const struct cl_lock_slice *slice, - struct cl_io *_, __u32 enqflags) + struct cl_io *unused, __u32 enqflags) { LBUG(); return 0; @@ -1611,11 +1546,12 @@ static void osc_lock_lockless_state(const struct lu_env *env, enum cl_lock_state state) { struct osc_lock *lock = cl2osc_lock(slice); - struct osc_io *oio = osc_env_io(env); LINVRNT(osc_lock_invariant(lock)); if (state == CLS_HELD) { - LASSERT(lock->ols_owner == NULL); + struct osc_io *oio = osc_env_io(env); + + LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio)); lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's @@ -1631,7 +1567,15 @@ static int osc_lock_lockless_fits_into(const struct lu_env *env, const struct cl_lock_descr *need, const struct cl_io *io) { - return 0; + struct osc_lock *lock = cl2osc_lock(slice); + + if (!(need->cld_enq_flags & CEF_NEVER)) + return 0; + + /* To solve the problem of stacking echo client upon osc directly. + * see bug 22147 for details. + */ + return (lock->ols_owner == osc_env_io(env)); } static const struct cl_lock_operations osc_lock_lockless_ops = { @@ -1647,7 +1591,7 @@ static const struct cl_lock_operations osc_lock_lockless_ops = { int osc_lock_init(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *_) + const struct cl_io *unused) { struct osc_lock *clk; int result;