Whamcloud - gitweb
b=21718 refer to osc_io only if lock was held.
[fs/lustre-release.git] / lustre / osc / osc_lock.c
index 247c044..b96b962 100644 (file)
@@ -38,8 +38,6 @@
  *   Author: Nikita Danilov <nikita.danilov@sun.com>
  */
 
-/** \addtogroup osc osc @{ */
-
 #define DEBUG_SUBSYSTEM S_OSC
 
 #ifdef __KERNEL__
 
 #include "osc_cl_internal.h"
 
+/** \addtogroup osc 
+ *  @{ 
+ */
+
 /*****************************************************************************
  *
  * Type conversions.
@@ -62,6 +64,7 @@ static const struct cl_lock_operations osc_lock_ops;
 static const struct cl_lock_operations osc_lock_lockless_ops;
 static void osc_lock_to_lockless(const struct lu_env *env,
                                  struct osc_lock *ols, int force);
+static int osc_lock_has_pages(struct osc_lock *olck);
 
 int osc_lock_is_lockless(const struct osc_lock *olck)
 {
@@ -132,10 +135,10 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
 {
         struct ldlm_lock *dlmlock;
 
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         dlmlock = olck->ols_lock;
         if (dlmlock == NULL) {
-                spin_unlock(&osc_ast_guard);
+                cfs_spin_unlock(&osc_ast_guard);
                 return;
         }
 
@@ -144,7 +147,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
          * call to osc_lock_detach() */
         dlmlock->l_ast_data = NULL;
         olck->ols_handle.cookie = 0ULL;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         lock_res_and_lock(dlmlock);
         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
@@ -164,15 +167,28 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
                 unlock_res_and_lock(dlmlock);
 
         /* release a reference taken in osc_lock_upcall0(). */
+        LASSERT(olck->ols_has_ref);
         lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
         LDLM_LOCK_RELEASE(dlmlock);
+        olck->ols_has_ref = 0;
+}
+
+static int osc_lock_unhold(struct osc_lock *ols)
+{
+        int result = 0;
+
+        if (ols->ols_hold) {
+                ols->ols_hold = 0;
+                result = osc_cancel_base(&ols->ols_handle,
+                                         ols->ols_einfo.ei_mode);
+        }
+        return result;
 }
 
 static int osc_lock_unuse(const struct lu_env *env,
                           const struct cl_lock_slice *slice)
 {
         struct osc_lock *ols = cl2osc_lock(slice);
-        int result;
 
         LASSERT(ols->ols_state == OLS_GRANTED ||
                 ols->ols_state == OLS_UPCALL_RECEIVED);
@@ -190,10 +206,7 @@ static int osc_lock_unuse(const struct lu_env *env,
          * e.g., for liblustre) sees that lock is released.
          */
         ols->ols_state = OLS_RELEASED;
-        ols->ols_hold = 0;
-        result = osc_cancel_base(&ols->ols_handle, ols->ols_einfo.ei_mode);
-        ols->ols_has_ref = 0;
-        return result;
+        return osc_lock_unhold(ols);
 }
 
 static void osc_lock_fini(const struct lu_env *env,
@@ -208,10 +221,8 @@ static void osc_lock_fini(const struct lu_env *env,
          * to the lock), before reply from a server was received. In this case
          * lock is destroyed immediately after upcall.
          */
-        if (ols->ols_hold)
-                osc_lock_unuse(env, slice);
-        if (ols->ols_lock != NULL)
-                osc_lock_detach(env, ols);
+        osc_lock_unhold(ols);
+        LASSERT(ols->ols_lock == NULL);
 
         OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
 }
@@ -243,6 +254,7 @@ static void osc_lock_build_policy(const struct lu_env *env,
         const struct cl_lock_descr *d = &lock->cll_descr;
 
         osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
+        policy->l_extent.gid = d->cld_gid;
 }
 
 static int osc_enq2ldlm_flags(__u32 enqflags)
@@ -264,14 +276,14 @@ static int osc_enq2ldlm_flags(__u32 enqflags)
  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
  * pointers. Initialized in osc_init().
  */
-spinlock_t osc_ast_guard;
+cfs_spinlock_t osc_ast_guard;
 
 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
 {
         struct osc_lock *olck;
 
         lock_res_and_lock(dlm_lock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         olck = dlm_lock->l_ast_data;
         if (olck != NULL) {
                 struct cl_lock *lock = olck->ols_cl.cls_lock;
@@ -291,7 +303,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
                 } else
                         olck = NULL;
         }
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
         unlock_res_and_lock(dlm_lock);
         return olck;
 }
@@ -406,11 +418,11 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
                 descr->cld_mode  = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
                 descr->cld_start = cl_index(descr->cld_obj, ext->start);
                 descr->cld_end   = cl_index(descr->cld_obj, ext->end);
+                descr->cld_gid   = ext->gid;
                 /*
                  * tell upper layers the extent of the lock that was actually
                  * granted
                  */
-                LINVRNT(osc_lock_invariant(olck));
                 olck->ols_state = OLS_GRANTED;
                 osc_lock_lvb_update(env, olck, rc);
 
@@ -422,6 +434,7 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
                 unlock_res_and_lock(dlmlock);
                 cl_lock_modify(env, lock, descr);
                 cl_lock_signal(env, lock);
+                LINVRNT(osc_lock_invariant(olck));
                 lock_res_and_lock(dlmlock);
         }
         EXIT;
@@ -438,11 +451,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
         LASSERT(dlmlock != NULL);
 
         lock_res_and_lock(dlmlock);
-        spin_lock(&osc_ast_guard);
+        cfs_spin_lock(&osc_ast_guard);
         LASSERT(dlmlock->l_ast_data == olck);
         LASSERT(olck->ols_lock == NULL);
         olck->ols_lock = dlmlock;
-        spin_unlock(&osc_ast_guard);
+        cfs_spin_unlock(&osc_ast_guard);
 
         /*
          * Lock might be not yet granted. In this case, completion ast
@@ -458,11 +471,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
          * this.
          */
         ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
-        olck->ols_hold = olck->ols_has_ref = 1;
+        olck->ols_hold = 1;
 
         /* lock reference taken by ldlm_handle2lock_long() is owned by
          * osc_lock and released in osc_lock_detach() */
         lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
+        olck->ols_has_ref = 1;
 }
 
 /**
@@ -472,18 +486,14 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
  */
 static int osc_lock_upcall(void *cookie, int errcode)
 {
-        struct osc_lock      *olck  = cookie;
-        struct cl_lock_slice *slice = &olck->ols_cl;
-        struct cl_lock       *lock  = slice->cls_lock;
-        struct lu_env        *env;
-
-        int refcheck;
+        struct osc_lock         *olck  = cookie;
+        struct cl_lock_slice    *slice = &olck->ols_cl;
+        struct cl_lock          *lock  = slice->cls_lock;
+        struct lu_env           *env;
+        struct cl_env_nest       nest;
 
         ENTRY;
-        /*
-         * XXX environment should be created in ptlrpcd.
-         */
-        env = cl_env_get(&refcheck);
+        env = cl_env_nested_get(&nest);
         if (!IS_ERR(env)) {
                 int rc;
 
@@ -505,11 +515,11 @@ static int osc_lock_upcall(void *cookie, int errcode)
                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
                         if (dlmlock != NULL) {
                                 lock_res_and_lock(dlmlock);
-                                spin_lock(&osc_ast_guard);
+                                cfs_spin_lock(&osc_ast_guard);
                                 LASSERT(olck->ols_lock == NULL);
                                 dlmlock->l_ast_data = NULL;
                                 olck->ols_handle.cookie = 0ULL;
-                                spin_unlock(&osc_ast_guard);
+                                cfs_spin_unlock(&osc_ast_guard);
                                 unlock_res_and_lock(dlmlock);
                                 LDLM_LOCK_PUT(dlmlock);
                         }
@@ -549,7 +559,7 @@ static int osc_lock_upcall(void *cookie, int errcode)
                 /* release cookie reference, acquired by osc_lock_enqueue() */
                 lu_ref_del(&lock->cll_reference, "upcall", lock);
                 cl_lock_put(env, lock);
-                cl_env_put(env, &refcheck);
+                cl_env_nested_put(&nest, env);
         } else
                 /* should never happen, similar to osc_ldlm_blocking_ast(). */
                 LBUG();
@@ -569,12 +579,11 @@ static void osc_lock_blocking(const struct lu_env *env,
         CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
         LASSERT(!osc_lock_is_lockless(olck));
 
-        if (olck->ols_hold)
-                /*
-                 * Lock might be still addref-ed here, if e.g., blocking ast
-                 * is sent for a failed lock.
-                 */
-                osc_lock_unuse(env, &olck->ols_cl);
+        /*
+         * Lock might be still addref-ed here, if e.g., blocking ast
+         * is sent for a failed lock.
+         */
+        osc_lock_unhold(olck);
 
         if (blocking && olck->ols_state < OLS_BLOCKED)
                 /*
@@ -724,9 +733,10 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
          * new environment has to be created to not corrupt outer context.
          */
         env = cl_env_nested_get(&nest);
-        if (!IS_ERR(env))
+        if (!IS_ERR(env)) {
                 result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
-        else {
+                cl_env_nested_put(&nest, env);
+        } else {
                 result = PTR_ERR(env);
                 /*
                  * XXX This should never happen, as cl_lock is
@@ -741,26 +751,23 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
                 else
                         CERROR("BAST failed: %d\n", result);
         }
-        cl_env_nested_put(&nest, env);
         return result;
 }
 
 static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
                                    int flags, void *data)
 {
-        struct lu_env   *env;
-        void            *env_cookie;
-        struct osc_lock *olck;
-        struct cl_lock  *lock;
-        int refcheck;
+        struct cl_env_nest nest;
+        struct lu_env     *env;
+        struct osc_lock   *olck;
+        struct cl_lock    *lock;
         int result;
         int dlmrc;
 
         /* first, do dlm part of the work */
         dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
         /* then, notify cl_lock */
-        env_cookie = cl_env_reenter();
-        env = cl_env_get(&refcheck);
+        env = cl_env_nested_get(&nest);
         if (!IS_ERR(env)) {
                 olck = osc_ast_data_get(dlmlock);
                 if (olck != NULL) {
@@ -773,7 +780,7 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
                         LASSERT(dlmlock->l_lvb_data != NULL);
                         lock_res_and_lock(dlmlock);
                         olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
-                        if (olck->ols_lock == NULL)
+                        if (olck->ols_lock == NULL) {
                                 /*
                                  * upcall (osc_lock_upcall()) hasn't yet been
                                  * called. Do nothing now, upcall will bind
@@ -783,21 +790,25 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
                                  * and ldlm_lock are always bound when
                                  * osc_lock is in OLS_GRANTED state.
                                  */
-                                ;
-                        else if (dlmlock->l_granted_mode != LCK_MINMODE)
+                        } else if (dlmlock->l_granted_mode ==
+                                   dlmlock->l_req_mode) {
                                 osc_lock_granted(env, olck, dlmlock, dlmrc);
-                        if (dlmrc != 0)
-                                cl_lock_error(env, lock, dlmrc);
+                        }
                         unlock_res_and_lock(dlmlock);
+
+                        if (dlmrc != 0) {
+                                CL_LOCK_DEBUG(D_ERROR, env, lock,
+                                              "dlmlock returned %d\n", dlmrc);
+                                cl_lock_error(env, lock, dlmrc);
+                        }
                         cl_lock_mutex_put(env, lock);
                         osc_ast_data_put(env, olck);
                         result = 0;
                 } else
                         result = -ELDLM_NO_LOCK_DATA;
-                cl_env_put(env, &refcheck);
+                cl_env_nested_put(&nest, env);
         } else
                 result = PTR_ERR(env);
-        cl_env_reexit(env_cookie);
         return dlmrc ?: result;
 }
 
@@ -807,15 +818,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
         struct osc_lock        *olck;
         struct cl_lock         *lock;
         struct cl_object       *obj;
+        struct cl_env_nest      nest;
         struct lu_env          *env;
         struct ost_lvb         *lvb;
         struct req_capsule     *cap;
         int                     result;
-        int                     refcheck;
 
         LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
 
-        env = cl_env_get(&refcheck);
+        env = cl_env_nested_get(&nest);
         if (!IS_ERR(env)) {
                 /*
                  * osc_ast_data_get() has to go after environment is
@@ -848,7 +859,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
                         lustre_pack_reply(req, 1, NULL, NULL);
                         result = -ELDLM_NO_LOCK_DATA;
                 }
-                cl_env_put(env, &refcheck);
+                cl_env_nested_put(&nest, env);
         } else
                 result = PTR_ERR(env);
         req->rq_status = result;
@@ -872,16 +883,14 @@ static unsigned long osc_lock_weigh(const struct lu_env *env,
  */
 static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 {
+        struct cl_env_nest       nest;
         struct lu_env           *env;
-        int                      refcheck;
-        void                    *cookie;
         struct osc_lock         *lock;
         struct cl_lock          *cll;
         unsigned long            weight;
         ENTRY;
 
-        might_sleep();
-        cookie = cl_env_reenter();
+        cfs_might_sleep();
         /*
          * osc_ldlm_weigh_ast has a complex context since it might be called
          * because of lock canceling, or from user's input. We have to make
@@ -889,12 +898,10 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
          * the upper context because cl_lock_put don't modify environment
          * variables. But in case of ..
          */
-        env = cl_env_get(&refcheck);
-        if (IS_ERR(env)) {
+        env = cl_env_nested_get(&nest);
+        if (IS_ERR(env))
                 /* Mostly because lack of memory, tend to eliminate this lock*/
-                cl_env_reexit(cookie);
                 RETURN(0);
-        }
 
         LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
         lock = osc_ast_data_get(dlmlock);
@@ -914,8 +921,7 @@ static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
         EXIT;
 
 out:
-        cl_env_put(env, &refcheck);
-        cl_env_reexit(cookie);
+        cl_env_nested_put(&nest, env);
         return weight;
 }
 
@@ -945,70 +951,6 @@ static void osc_lock_build_einfo(const struct lu_env *env,
 }
 
 /**
- * Cancels \a conflict lock and waits until it reached CLS_FREEING state. This
- * is called as a part of enqueuing to cancel conflicting locks early.
- *
- * \retval            0: success, \a conflict was cancelled and destroyed.
- *
- * \retval   CLO_REPEAT: \a conflict was cancelled, but \a lock mutex was
- *                       released in the process. Repeat enqueing.
- *
- * \retval -EWOULDBLOCK: \a conflict cannot be cancelled immediately, and
- *                       either \a lock is non-blocking, or current thread
- *                       holds other locks, that prevent it from waiting
- *                       for cancel to complete.
- *
- * \retval          -ve: other error, including -EINTR.
- *
- */
-static int osc_lock_cancel_wait(const struct lu_env *env, struct cl_lock *lock,
-                                struct cl_lock *conflict, int canwait)
-{
-        int rc;
-
-        LASSERT(cl_lock_is_mutexed(lock));
-        LASSERT(cl_lock_is_mutexed(conflict));
-
-        rc = 0;
-        if (conflict->cll_state != CLS_FREEING) {
-                cl_lock_cancel(env, conflict);
-                cl_lock_delete(env, conflict);
-                if (conflict->cll_flags & (CLF_CANCELPEND|CLF_DOOMED)) {
-                        rc = -EWOULDBLOCK;
-                        if (cl_lock_nr_mutexed(env) > 2)
-                                /*
-                                 * If mutices of locks other than @lock and
-                                 * @scan are held by the current thread, it
-                                 * cannot wait on @scan state change in a
-                                 * dead-lock safe matter, so simply skip early
-                                 * cancellation in this case.
-                                 *
-                                 * This means that early cancellation doesn't
-                                 * work when there is even slight mutex
-                                 * contention, as top-lock's mutex is usually
-                                 * held at this time.
-                                 */
-                                ;
-                        else if (canwait) {
-                                /* Waiting for @scan to be destroyed */
-                                cl_lock_mutex_put(env, lock);
-                                do {
-                                        rc = cl_lock_state_wait(env, conflict);
-                                } while (!rc &&
-                                         conflict->cll_state < CLS_FREEING);
-                                /* mutex was released, repeat enqueue. */
-                                rc = rc ?: CLO_REPEAT;
-                                cl_lock_mutex_get(env, lock);
-                        }
-                }
-                LASSERT(ergo(!rc, conflict->cll_state == CLS_FREEING));
-                CDEBUG(D_INFO, "lock %p was %s freed now, rc (%d)\n",
-                       conflict, rc ? "not":"", rc);
-        }
-        return rc;
-}
-
-/**
  * Determine if the lock should be converted into a lockless lock.
  *
  * Steps to check:
@@ -1063,6 +1005,21 @@ static void osc_lock_to_lockless(const struct lu_env *env,
         LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
 }
 
+static int osc_lock_compatible(const struct osc_lock *qing,
+                               const struct osc_lock *qed)
+{
+        enum cl_lock_mode qing_mode;
+        enum cl_lock_mode qed_mode;
+
+        qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
+        if (qed->ols_glimpse &&
+            (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
+                return 1;
+
+        qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
+        return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+}
+
 /**
  * Cancel all conflicting locks and wait for them to be destroyed.
  *
@@ -1080,85 +1037,73 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
         struct cl_lock          *lock    = olck->ols_cl.cls_lock;
         struct cl_lock_descr    *descr   = &lock->cll_descr;
         struct cl_object_header *hdr     = cl_object_header(descr->cld_obj);
-        struct cl_lock_closure  *closure = &osc_env_info(env)->oti_closure;
-        struct cl_lock          *scan;
-        struct cl_lock          *temp;
+        struct cl_lock          *scan    = lock;
+        struct cl_lock          *conflict= NULL;
         int lockless                     = osc_lock_is_lockless(olck);
         int rc                           = 0;
-        int canwait;
-        int stop;
         ENTRY;
 
         LASSERT(cl_lock_is_mutexed(lock));
         LASSERT(lock->cll_state == CLS_QUEUING);
 
-        /*
-         * XXX This function could be sped up if we had asynchronous
-         * cancellation.
-         */
+        /* make it enqueue anyway for glimpse lock, because we actually
+         * don't need to cancel any conflicting locks. */
+        if (olck->ols_glimpse)
+                return 0;
 
-        canwait =
-                !(olck->ols_flags & LDLM_FL_BLOCK_NOWAIT) &&
-                cl_lock_nr_mutexed(env) == 1;
-        cl_lock_closure_init(env, closure, lock, canwait);
-        spin_lock(&hdr->coh_lock_guard);
-        list_for_each_entry_safe(scan, temp, &hdr->coh_locks, cll_linkage) {
-                if (scan == lock)
-                        continue;
+        cfs_spin_lock(&hdr->coh_lock_guard);
+        cfs_list_for_each_entry_continue(scan, &hdr->coh_locks, cll_linkage) {
+                struct cl_lock_descr *cld = &scan->cll_descr;
+                const struct osc_lock *scan_ols;
 
                 if (scan->cll_state < CLS_QUEUING ||
                     scan->cll_state == CLS_FREEING ||
-                    scan->cll_descr.cld_start > descr->cld_end ||
-                    scan->cll_descr.cld_end < descr->cld_start)
+                    cld->cld_start > descr->cld_end ||
+                    cld->cld_end < descr->cld_start)
                         continue;
 
                 /* overlapped and living locks. */
-                /* A tricky case for lockless pages:
-                 * We need to cancel the compatible locks if we're enqueuing
+
+                /* We're not supposed to give up group lock. */
+                if (scan->cll_descr.cld_mode == CLM_GROUP) {
+                        LASSERT(descr->cld_mode != CLM_GROUP ||
+                                descr->cld_gid != scan->cll_descr.cld_gid);
+                        continue;
+                }
+
+                scan_ols = osc_lock_at(scan);
+
+                /* We need to cancel the compatible locks if we're enqueuing
                  * a lockless lock, for example:
                  * imagine that client has PR lock on [0, 1000], and thread T0
                  * is doing lockless IO in [500, 1500] region. Concurrent
                  * thread T1 can see lockless data in [500, 1000], which is
-                 * wrong, because these data are possibly stale.
-                 */
-                if (!lockless && cl_lock_compatible(scan, lock))
+                 * wrong, because these data are possibly stale. */
+                if (!lockless && osc_lock_compatible(olck, scan_ols))
                         continue;
 
                 /* Now @scan is conflicting with @lock, this means current
                  * thread have to sleep for @scan being destroyed. */
-                cl_lock_get_trust(scan);
-                if (&temp->cll_linkage != &hdr->coh_locks)
-                        cl_lock_get_trust(temp);
-                spin_unlock(&hdr->coh_lock_guard);
-                lu_ref_add(&scan->cll_reference, "cancel-wait", lock);
-
-                LASSERT(list_empty(&closure->clc_list));
-                rc = cl_lock_closure_build(env, scan, closure);
-                if (rc == 0) {
-                        rc = osc_lock_cancel_wait(env, lock, scan, canwait);
-                        cl_lock_disclosure(env, closure);
-                        if (rc == -EWOULDBLOCK)
-                                rc = 0;
+                if (scan_ols->ols_owner == osc_env_io(env)) {
+                        CERROR("DEADLOCK POSSIBLE!\n");
+                        CL_LOCK_DEBUG(D_ERROR, env, scan, "queued.\n");
+                        CL_LOCK_DEBUG(D_ERROR, env, lock, "queuing.\n");
+                        libcfs_debug_dumpstack(NULL);
                 }
-                if (rc == CLO_REPEAT && !canwait)
-                        /* cannot wait... no early cancellation. */
-                        rc = 0;
-
-                lu_ref_del(&scan->cll_reference, "cancel-wait", lock);
-                cl_lock_put(env, scan);
-                spin_lock(&hdr->coh_lock_guard);
-                /*
-                 * Lock list could have been modified, while spin-lock was
-                 * released. Check that it is safe to continue.
-                 */
-                stop = list_empty(&temp->cll_linkage);
-                if (&temp->cll_linkage != &hdr->coh_locks)
-                        cl_lock_put(env, temp);
-                if (stop || rc != 0)
-                        break;
+                cl_lock_get_trust(scan);
+                conflict = scan;
+                break;
+        }
+        cfs_spin_unlock(&hdr->coh_lock_guard);
+
+        if (conflict) {
+                CDEBUG(D_DLMTRACE, "lock %p is confliced with %p, will wait\n",
+                       lock, conflict);
+                lu_ref_add(&conflict->cll_reference, "cancel-wait", lock);
+                LASSERT(lock->cll_conflict == NULL);
+                lock->cll_conflict = conflict;
+                rc = CLO_WAIT;
         }
-        spin_unlock(&hdr->coh_lock_guard);
-        cl_lock_closure_fini(closure);
         RETURN(rc);
 }
 
@@ -1202,8 +1147,8 @@ static int osc_deadlock_is_possible(const struct lu_env *env,
         head = cl_object_header(obj);
 
         result = 0;
-        spin_lock(&head->coh_lock_guard);
-        list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+        cfs_spin_lock(&head->coh_lock_guard);
+        cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
                 if (scan != lock) {
                         struct osc_lock *oscan;
 
@@ -1215,7 +1160,7 @@ static int osc_deadlock_is_possible(const struct lu_env *env,
                         }
                 }
         }
-        spin_unlock(&head->coh_lock_guard);
+        cfs_spin_unlock(&head->coh_lock_guard);
         RETURN(result);
 }
 
@@ -1237,7 +1182,7 @@ static int osc_deadlock_is_possible(const struct lu_env *env,
  */
 static int osc_lock_enqueue(const struct lu_env *env,
                             const struct cl_lock_slice *slice,
-                            struct cl_io *_, __u32 enqflags)
+                            struct cl_io *unused, __u32 enqflags)
 {
         struct osc_lock          *ols     = cl2osc_lock(slice);
         struct cl_lock           *lock    = ols->ols_cl.cls_lock;
@@ -1260,12 +1205,12 @@ static int osc_lock_enqueue(const struct lu_env *env,
                 ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
         if (ols->ols_flags & LDLM_FL_HAS_INTENT)
                 ols->ols_glimpse = 1;
+        if (!(enqflags & CEF_MUST))
+                /* try to convert this lock to a lockless lock */
+                osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
 
         result = osc_lock_enqueue_wait(env, ols);
         if (result == 0) {
-                if (!(enqflags & CEF_MUST))
-                        /* try to convert this lock to a lockless lock */
-                        osc_lock_to_lockless(env, ols, (enqflags & CEF_NEVER));
                 if (!osc_lock_is_lockless(ols)) {
                         if (ols->ols_locklessable)
                                 ols->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
@@ -1327,13 +1272,14 @@ static int osc_lock_use(const struct lu_env *env,
         int rc;
 
         LASSERT(!olck->ols_hold);
+
         /*
          * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
          * flag is not set. This protects us from a concurrent blocking ast.
          */
         rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
         if (rc == 0) {
-                olck->ols_hold = olck->ols_has_ref = 1;
+                olck->ols_hold = 1;
                 olck->ols_state = OLS_GRANTED;
         } else {
                 struct cl_lock *lock;
@@ -1345,9 +1291,8 @@ static int osc_lock_use(const struct lu_env *env,
                  * cl_lock mutex.
                  */
                 lock = slice->cls_lock;
-                LASSERT(lock->cll_state == CLS_CACHED);
+                LASSERT(lock->cll_state == CLS_INTRANSIT);
                 LASSERT(lock->cll_users > 0);
-                LASSERT(olck->ols_lock->l_flags & LDLM_FL_CBPENDING);
                 /* set a flag for osc_dlm_blocking_ast0() to signal the
                  * lock.*/
                 olck->ols_ast_wait = 1;
@@ -1369,8 +1314,10 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
                 cl_env_nested_put(&nest, env);
         } else
                 result = PTR_ERR(env);
-        if (result == 0)
+        if (result == 0) {
                 ols->ols_flush = 1;
+                LINVRNT(!osc_lock_has_pages(ols));
+        }
         return result;
 }
 
@@ -1394,19 +1341,30 @@ static void osc_lock_cancel(const struct lu_env *env,
         struct cl_lock   *lock    = slice->cls_lock;
         struct osc_lock  *olck    = cl2osc_lock(slice);
         struct ldlm_lock *dlmlock = olck->ols_lock;
-        int               result;
+        int               result  = 0;
         int               discard;
 
         LASSERT(cl_lock_is_mutexed(lock));
         LINVRNT(osc_lock_invariant(olck));
 
         if (dlmlock != NULL) {
+                int do_cancel;
+
                 discard = dlmlock->l_flags & LDLM_FL_DISCARD_DATA;
                 result = osc_lock_flush(olck, discard);
-                if (olck->ols_hold)
-                        osc_lock_unuse(env, slice);
-                LASSERT(dlmlock->l_readers == 0 && dlmlock->l_writers == 0);
-                result = ldlm_cli_cancel(&olck->ols_handle);
+                osc_lock_unhold(olck);
+
+                lock_res_and_lock(dlmlock);
+                /* Now that we're the only user of dlm read/write reference,
+                 * mostly the ->l_readers + ->l_writers should be zero.
+                 * However, there is a corner case.
+                 * See bug 18829 for details.*/
+                do_cancel = (dlmlock->l_readers == 0 &&
+                             dlmlock->l_writers == 0);
+                dlmlock->l_flags |= LDLM_FL_CBPENDING;
+                unlock_res_and_lock(dlmlock);
+                if (do_cancel)
+                        result = ldlm_cli_cancel(&olck->ols_handle);
                 if (result < 0)
                         CL_LOCK_DEBUG(D_ERROR, env, lock,
                                       "lock %p cancel failure with error(%d)\n",
@@ -1448,12 +1406,12 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 plist = &osc_env_info(env)->oti_plist;
                 cl_page_list_init(plist);
 
-                mutex_lock(&oob->oo_debug_mutex);
+                cfs_mutex_lock(&oob->oo_debug_mutex);
 
                 io->ci_obj = cl_object_top(obj);
                 cl_io_init(env, io, CIT_MISC, io->ci_obj);
                 cl_page_gang_lookup(env, obj, io,
-                                    descr->cld_start, descr->cld_end, plist);
+                                    descr->cld_start, descr->cld_end, plist, 0);
                 cl_lock_page_list_fixup(env, io, lock, plist);
                 if (plist->pl_nr > 0) {
                         CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");
@@ -1464,14 +1422,17 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                 cl_page_list_disown(env, io, plist);
                 cl_page_list_fini(env, plist);
                 cl_io_fini(env, io);
-                mutex_unlock(&oob->oo_debug_mutex);
+                cfs_mutex_unlock(&oob->oo_debug_mutex);
                 cl_env_nested_put(&nest, env);
         } else
                 result = 0;
         return result;
 }
 #else
-# define osc_lock_has_pages(olck) (0)
+static int osc_lock_has_pages(struct osc_lock *olck)
+{
+        return 0;
+}
 #endif /* INVARIANT_CHECK */
 
 static void osc_lock_delete(const struct lu_env *env,
@@ -1480,11 +1441,16 @@ static void osc_lock_delete(const struct lu_env *env,
         struct osc_lock *olck;
 
         olck = cl2osc_lock(slice);
+        if (olck->ols_glimpse) {
+                LASSERT(!olck->ols_hold);
+                LASSERT(!olck->ols_lock);
+                return;
+        }
+
         LINVRNT(osc_lock_invariant(olck));
         LINVRNT(!osc_lock_has_pages(olck));
 
-        if (olck->ols_hold)
-                osc_lock_unuse(env, slice);
+        osc_lock_unhold(olck);
         osc_lock_detach(env, olck);
 }
 
@@ -1503,13 +1469,14 @@ static void osc_lock_state(const struct lu_env *env,
                            enum cl_lock_state state)
 {
         struct osc_lock *lock = cl2osc_lock(slice);
-        struct osc_io   *oio  = osc_env_io(env);
 
         /*
          * XXX multiple io contexts can use the lock at the same time.
          */
         LINVRNT(osc_lock_invariant(lock));
         if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
+                struct osc_io *oio = osc_env_io(env);
+
                 LASSERT(lock->ols_owner == NULL);
                 lock->ols_owner = oio;
         } else if (state != CLS_HELD)
@@ -1524,13 +1491,57 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
         /*
          * XXX print ldlm lock and einfo properly.
          */
-        (*p)(env, cookie, "%p %08x "LPU64" %d %p ",
+        (*p)(env, cookie, "%p %08x "LPX64" %d %p ",
              lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
              lock->ols_state, lock->ols_owner);
         osc_lvb_print(env, cookie, p, &lock->ols_lvb);
         return 0;
 }
 
+static int osc_lock_fits_into(const struct lu_env *env,
+                              const struct cl_lock_slice *slice,
+                              const struct cl_lock_descr *need,
+                              const struct cl_io *io)
+{
+        struct osc_lock *ols = cl2osc_lock(slice);
+
+        if (need->cld_enq_flags & CEF_NEVER)
+                return 0;
+
+        if (need->cld_mode == CLM_PHANTOM) {
+                /*
+                 * Note: the QUEUED lock can't be matched here, otherwise
+                 * it might cause the deadlocks.
+                 * In read_process,
+                 * P1: enqueued read lock, create sublock1
+                 * P2: enqueued write lock, create sublock2(conflicted
+                 *     with sublock1).
+                 * P1: Grant read lock.
+                 * P1: enqueued glimpse lock(with holding sublock1_read),
+                 *     matched with sublock2, waiting sublock2 to be granted.
+                 *     But sublock2 can not be granted, because P1
+                 *     will not release sublock1. Bang!
+                 */
+                if (ols->ols_state < OLS_GRANTED ||
+                        ols->ols_state > OLS_RELEASED)
+                        return 0;
+        } else if (need->cld_enq_flags & CEF_MUST) {
+                 /*
+                 * If the lock hasn't ever enqueued, it can't be matched
+                 * because enqueue process brings in many information
+                 * which can be used to determine things such as lockless,
+                 * CEF_MUST, etc.
+                 */
+                if (ols->ols_state < OLS_GRANTED ||
+                        ols->ols_state > OLS_RELEASED)
+                        return 0;
+                if (ols->ols_state < OLS_UPCALL_RECEIVED &&
+                        ols->ols_locklessable)
+                        return 0;
+        }
+        return 1;
+}
+
 static const struct cl_lock_operations osc_lock_ops = {
         .clo_fini    = osc_lock_fini,
         .clo_enqueue = osc_lock_enqueue,
@@ -1541,12 +1552,13 @@ static const struct cl_lock_operations osc_lock_ops = {
         .clo_state   = osc_lock_state,
         .clo_cancel  = osc_lock_cancel,
         .clo_weigh   = osc_lock_weigh,
-        .clo_print   = osc_lock_print
+        .clo_print   = osc_lock_print,
+        .clo_fits_into = osc_lock_fits_into,
 };
 
 static int osc_lock_lockless_enqueue(const struct lu_env *env,
                                      const struct cl_lock_slice *slice,
-                                     struct cl_io *_, __u32 enqflags)
+                                     struct cl_io *unused, __u32 enqflags)
 {
         LBUG();
         return 0;
@@ -1596,10 +1608,11 @@ static void osc_lock_lockless_state(const struct lu_env *env,
                                     enum cl_lock_state state)
 {
         struct osc_lock *lock = cl2osc_lock(slice);
-        struct osc_io   *oio  = osc_env_io(env);
 
         LINVRNT(osc_lock_invariant(lock));
         if (state == CLS_HELD) {
+                struct osc_io *oio  = osc_env_io(env);
+
                 LASSERT(lock->ols_owner == NULL);
                 lock->ols_owner = oio;
 
@@ -1632,12 +1645,12 @@ static const struct cl_lock_operations osc_lock_lockless_ops = {
 
 int osc_lock_init(const struct lu_env *env,
                   struct cl_object *obj, struct cl_lock *lock,
-                  const struct cl_io *_)
+                  const struct cl_io *unused)
 {
         struct osc_lock *clk;
         int result;
 
-        OBD_SLAB_ALLOC_PTR(clk, osc_lock_kmem);
+        OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO);
         if (clk != NULL) {
                 osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
                 clk->ols_state = OLS_NEW;