Whamcloud - gitweb
LU-3027 lov: to not modify lov lock when sublock is canceled
[fs/lustre-release.git] / lustre / lov / lov_lock.c
index eb17fe7..547bdca 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #include "lov_cl_internal.h"
 
-/** \addtogroup lov lov @{ */
+/** \addtogroup lov
+ *  @{
+ */
 
 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
                                                struct cl_lock *parent);
 
+static int lov_lock_unuse(const struct lu_env *env,
+                          const struct cl_lock_slice *slice);
 /*****************************************************************************
  *
  * Lov lock operations.
@@ -73,12 +77,11 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
          * they are not initialized at all. As a temp fix, in this case,
          * we still borrow the parent's env to call sublock operations.
          */
-        if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
+        if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
                 subenv->lse_env = env;
                 subenv->lse_io  = io;
                 subenv->lse_sub = NULL;
         } else {
-                LASSERT(io != NULL);
                 sub = lov_sub_get(env, lio, lls->sub_stripe);
                 if (!IS_ERR(sub)) {
                         subenv->lse_env = sub->sub_env;
@@ -119,7 +122,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
         lck->lls_sub[idx].sub_lock = lsl;
         lck->lls_nr_filled++;
         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
-        list_add_tail(&link->lll_list, &lsl->lss_parents);
+        cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
         link->lll_idx = idx;
         link->lll_super = lck;
         cl_lock_get(parent);
@@ -144,7 +147,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
         LASSERT(idx < lck->lls_nr);
         ENTRY;
 
-        OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
+       OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
         if (link != NULL) {
                 struct lov_sublock_env *subenv;
                 struct lov_lock_sub  *lls;
@@ -152,7 +155,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
 
                 parent = lck->lls_cl.cls_lock;
                 lls    = &lck->lls_sub[idx];
-                descr  = &lls->sub_descr;
+                descr  = &lls->sub_got;
 
                 subenv = lov_sublock_env_get(env, parent, lls);
                 if (!IS_ERR(subenv)) {
@@ -202,7 +205,7 @@ static int lov_sublock_lock(const struct lu_env *env,
         int                 result = 0;
         ENTRY;
 
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
 
         sublock = lls->sub_lock;
         child = sublock->lss_cl.cls_lock;
@@ -213,7 +216,8 @@ static int lov_sublock_lock(const struct lu_env *env,
                 LASSERT(cl_lock_is_mutexed(child));
                 sublock->lss_active = parent;
 
-                if (unlikely(child->cll_state == CLS_FREEING)) {
+                if (unlikely((child->cll_state == CLS_FREEING) ||
+                             (child->cll_flags & CLF_CANCELLED))) {
                         struct lov_lock_link *link;
                         /*
                          * we could race with lock deletion which temporarily
@@ -225,6 +229,7 @@ static int lov_sublock_lock(const struct lu_env *env,
                         LASSERT(link != NULL);
                         lov_lock_unlink(env, link, sublock);
                         lov_sublock_unlock(env, sublock, closure, NULL);
+                        lck->lls_cancel_race = 1;
                         result = CLO_REPEAT;
                 } else if (lsep) {
                         struct lov_sublock_env *subenv;
@@ -262,12 +267,14 @@ static int lov_subresult(int result, int rc)
         int result_rank;
         int rc_rank;
 
-        LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
-        LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
-        CLASSERT(CLO_WAIT < CLO_REPEAT);
-
         ENTRY;
 
+       LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
+                "result = %d", result);
+       LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
+                "rc = %d\n", rc);
+        CLASSERT(CLO_WAIT < CLO_REPEAT);
+
         /* calculate ranks in the ordering above */
         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
@@ -290,10 +297,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
 {
         int result = 0;
         int i;
-        int j;
         int nr;
-        int stripe;
-        int start_stripe;
         obd_off start;
         obd_off end;
         obd_off file_start;
@@ -309,19 +313,17 @@ static int lov_lock_sub_init(const struct lu_env *env,
         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
 
-        start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
                 /*
                  * XXX for wide striping smarter algorithm is desirable,
                  * breaking out of the loop, early.
                  */
-                stripe = (start_stripe + i) % r0->lo_nr;
-                if (lov_stripe_intersects(r0->lo_lsm, stripe,
+               if (lov_stripe_intersects(loo->lo_lsm, i,
                                           file_start, file_end, &start, &end))
                         nr++;
         }
         LASSERT(nr > 0);
-        OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
+        OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
         if (lck->lls_sub == NULL)
                 RETURN(-ENOMEM);
 
@@ -333,23 +335,23 @@ static int lov_lock_sub_init(const struct lu_env *env,
          * create sub-locks. At this moment, no other thread can access
          * top-lock.
          */
-        for (j = 0, nr = 0; j < i; ++j) {
-                stripe = (start_stripe + j) % r0->lo_nr;
-                if (lov_stripe_intersects(r0->lo_lsm, stripe,
+        for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
+               if (lov_stripe_intersects(loo->lo_lsm, i,
                                           file_start, file_end, &start, &end)) {
                         struct cl_lock_descr *descr;
 
                         descr = &lck->lls_sub[nr].sub_descr;
 
                         LASSERT(descr->cld_obj == NULL);
-                        descr->cld_obj   = lovsub2cl(r0->lo_sub[stripe]);
+                        descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
                         descr->cld_start = cl_index(descr->cld_obj, start);
                         descr->cld_end   = cl_index(descr->cld_obj, end);
                         descr->cld_mode  = parent->cll_descr.cld_mode;
                         descr->cld_gid   = parent->cll_descr.cld_gid;
+                        descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
                         /* XXX has no effect */
                         lck->lls_sub[nr].sub_got = *descr;
-                        lck->lls_sub[nr].sub_stripe = stripe;
+                        lck->lls_sub[nr].sub_stripe = i;
                         nr++;
                 }
         }
@@ -368,6 +370,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                                 result = PTR_ERR(sublock);
                                 break;
                         }
+                        cl_lock_get_trust(sublock);
                         cl_lock_mutex_get(env, sublock);
                         cl_lock_mutex_get(env, parent);
                         /*
@@ -385,6 +388,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                                                "lov-parent", parent);
                         }
                         cl_lock_mutex_put(env, sublock);
+                        cl_lock_put(env, sublock);
                 }
         }
         /*
@@ -483,12 +487,29 @@ static void lov_lock_fini(const struct lu_env *env,
                          * a reference on its parent.
                          */
                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
-                OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
+                OBD_FREE_LARGE(lck->lls_sub,
+                               lck->lls_nr * sizeof lck->lls_sub[0]);
         }
         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
         EXIT;
 }
 
+static int lov_lock_enqueue_wait(const struct lu_env *env,
+                                 struct lov_lock *lck,
+                                 struct cl_lock *sublock)
+{
+        struct cl_lock *lock = lck->lls_cl.cls_lock;
+        int             result;
+        ENTRY;
+
+        LASSERT(cl_lock_is_mutexed(lock));
+
+        cl_lock_mutex_put(env, lock);
+        result = cl_lock_enqueue_wait(env, sublock, 0);
+        cl_lock_mutex_get(env, lock);
+        RETURN(result ?: CLO_REPEAT);
+}
+
 /**
  * Tries to advance a state machine of a given sub-lock toward enqueuing of
  * the top-lock.
@@ -505,17 +526,20 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
 
         /* first, try to enqueue a sub-lock ... */
         result = cl_enqueue_try(env, sublock, io, enqflags);
-        if (sublock->cll_state == CLS_ENQUEUED)
-                /* if it is enqueued, try to `wait' on it---maybe it's already
-                 * granted */
-                result = cl_wait_try(env, sublock);
+       if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
+               /* if it is enqueued, try to `wait' on it---maybe it's already
+                * granted */
+               result = cl_wait_try(env, sublock);
+               if (result == CLO_REENQUEUED)
+                       result = CLO_WAIT;
+       }
         /*
          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
          * parallel, otherwise---enqueue has to wait until sub-lock is granted
          * before proceeding to the next one.
          */
-        if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
-            enqflags & CEF_ASYNC && !last)
+        if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
+            (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
                 result = 0;
         RETURN(result);
 }
@@ -538,10 +562,11 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
         cl_lock_mutex_get(env, parent);
 
         if (!IS_ERR(sublock)) {
+                cl_lock_get_trust(sublock);
                 if (parent->cll_state == CLS_QUEUING &&
-                    lck->lls_sub[idx].sub_lock == NULL)
+                    lck->lls_sub[idx].sub_lock == NULL) {
                         lov_sublock_adopt(env, lck, sublock, idx, link);
-                else {
+                else {
                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
                         /* other thread allocated sub-lock, or enqueue is no
                          * longer going on */
@@ -550,6 +575,7 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
                         cl_lock_mutex_get(env, parent);
                 }
                 cl_lock_mutex_put(env, sublock);
+                cl_lock_put(env, sublock);
                 result = CLO_REPEAT;
         } else
                 result = PTR_ERR(sublock);
@@ -613,16 +639,44 @@ static int lov_lock_enqueue(const struct lu_env *env,
                                                   subenv->lse_io, enqflags,
                                                   i == lck->lls_nr - 1);
                         minstate = min(minstate, sublock->cll_state);
-                        /*
-                         * Don't hold a sub-lock in CLS_CACHED state, see
-                         * description for lov_lock::lls_sub.
-                         */
-                        if (sublock->cll_state > CLS_HELD)
-                                rc = lov_sublock_release(env, lck, i, 1, rc);
-                        lov_sublock_unlock(env, sub, closure, subenv);
+                        if (rc == CLO_WAIT) {
+                                switch (sublock->cll_state) {
+                                case CLS_QUEUING:
+                                        /* take recursive mutex, the lock is
+                                         * released in lov_lock_enqueue_wait.
+                                         */
+                                        cl_lock_mutex_get(env, sublock);
+                                        lov_sublock_unlock(env, sub, closure,
+                                                           subenv);
+                                        rc = lov_lock_enqueue_wait(env, lck,
+                                                                   sublock);
+                                        break;
+                                case CLS_CACHED:
+                                       cl_lock_get(sublock);
+                                        /* take recursive mutex of sublock */
+                                        cl_lock_mutex_get(env, sublock);
+                                       /* need to release all locks in closure
+                                        * otherwise it may deadlock. LU-2683.*/
+                                        lov_sublock_unlock(env, sub, closure,
+                                                           subenv);
+                                       /* sublock and parent are held. */
+                                        rc = lov_sublock_release(env, lck, i,
+                                                                 1, rc);
+                                       cl_lock_mutex_put(env, sublock);
+                                       cl_lock_put(env, sublock);
+                                       break;
+                                default:
+                                        lov_sublock_unlock(env, sub, closure,
+                                                           subenv);
+                                        break;
+                                }
+                        } else {
+                                LASSERT(sublock->cll_conflict == NULL);
+                                lov_sublock_unlock(env, sub, closure, subenv);
+                        }
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
+                if (result != 0)
                         break;
         }
         cl_lock_closure_fini(closure);
@@ -649,7 +703,7 @@ static int lov_lock_unuse(const struct lu_env *env,
                 /* top-lock state cannot change concurrently, because single
                  * thread (one that released the last hold) carries unlocking
                  * to the completion. */
-                LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
+                LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
                 lls = &lck->lls_sub[i];
                 sub = lls->sub_lock;
                 if (sub == NULL)
@@ -658,39 +712,101 @@ static int lov_lock_unuse(const struct lu_env *env,
                 sublock = sub->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
                 if (rc == 0) {
-                        if (lck->lls_sub[i].sub_flags & LSF_HELD) {
-                                LASSERT(sublock->cll_state == CLS_HELD);
+                        if (lls->sub_flags & LSF_HELD) {
+                                LASSERT(sublock->cll_state == CLS_HELD ||
+                                        sublock->cll_state == CLS_ENQUEUED);
                                 rc = cl_unuse_try(subenv->lse_env, sublock);
-                                if (rc != CLO_WAIT)
-                                        rc = lov_sublock_release(env, lck,
-                                                                 i, 0, rc);
+                                rc = lov_sublock_release(env, lck, i, 0, rc);
                         }
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
-                        break;
         }
-        if (result == 0 && lck->lls_unuse_race) {
-                lck->lls_unuse_race = 0;
+
+        if (result == 0 && lck->lls_cancel_race) {
+                lck->lls_cancel_race = 0;
                 result = -ESTALE;
         }
         cl_lock_closure_fini(closure);
         RETURN(result);
 }
 
+
+static void lov_lock_cancel(const struct lu_env *env,
+                           const struct cl_lock_slice *slice)
+{
+        struct lov_lock        *lck     = cl2lov_lock(slice);
+        struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+        int i;
+        int result;
+
+        ENTRY;
+
+        for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+                int rc;
+                struct lovsub_lock     *sub;
+                struct cl_lock         *sublock;
+                struct lov_lock_sub    *lls;
+                struct lov_sublock_env *subenv;
+
+                /* top-lock state cannot change concurrently, because single
+                 * thread (one that released the last hold) carries unlocking
+                 * to the completion. */
+                lls = &lck->lls_sub[i];
+                sub = lls->sub_lock;
+                if (sub == NULL)
+                        continue;
+
+                sublock = sub->lss_cl.cls_lock;
+                rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+                if (rc == 0) {
+                        if (!(lls->sub_flags & LSF_HELD)) {
+                                lov_sublock_unlock(env, sub, closure, subenv);
+                                continue;
+                        }
+
+                        switch(sublock->cll_state) {
+                        case CLS_HELD:
+                               rc = cl_unuse_try(subenv->lse_env, sublock);
+                                lov_sublock_release(env, lck, i, 0, 0);
+                                break;
+                        default:
+                                lov_sublock_release(env, lck, i, 1, 0);
+                                break;
+                        }
+                        lov_sublock_unlock(env, sub, closure, subenv);
+                }
+
+                if (rc == CLO_REPEAT) {
+                        --i;
+                        continue;
+                }
+
+                result = lov_subresult(result, rc);
+        }
+
+        if (result)
+                CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+                              "lov_lock_cancel fails with %d.\n", result);
+
+        cl_lock_closure_fini(closure);
+}
+
 static int lov_lock_wait(const struct lu_env *env,
                          const struct cl_lock_slice *slice)
 {
         struct lov_lock        *lck     = cl2lov_lock(slice);
         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
         enum cl_lock_state      minstate;
+        int                     reenqueued;
         int                     result;
         int                     i;
 
         ENTRY;
 
-        for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
+again:
+        for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
+             i < lck->lls_nr; ++i) {
                 int rc;
                 struct lovsub_lock     *sub;
                 struct cl_lock         *sublock;
@@ -710,10 +826,18 @@ static int lov_lock_wait(const struct lu_env *env,
                         minstate = min(minstate, sublock->cll_state);
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
+                if (rc == CLO_REENQUEUED) {
+                        reenqueued++;
+                        rc = 0;
+                }
                 result = lov_subresult(result, rc);
-                if (result < 0)
+                if (result != 0)
                         break;
         }
+        /* Each sublock only can be reenqueued once, so will not loop for
+         * ever. */
+        if (result == 0 && reenqueued != 0)
+                goto again;
         cl_lock_closure_fini(closure);
         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
 }
@@ -726,7 +850,7 @@ static int lov_lock_use(const struct lu_env *env,
         int                     result;
         int                     i;
 
-        LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
+        LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
         ENTRY;
 
         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
@@ -736,37 +860,53 @@ static int lov_lock_use(const struct lu_env *env,
                 struct lov_lock_sub    *lls;
                 struct lov_sublock_env *subenv;
 
-                if (slice->cls_lock->cll_state != CLS_CACHED) {
-                        /* see comment in lov_lock_enqueue(). */
-                        LASSERT(i > 0 && result != 0);
-                        break;
-                }
-                /*
-                 * if a sub-lock was destroyed while top-lock was in
-                 * CLS_CACHED state, top-lock would have been moved into
-                 * CLS_NEW state, so all sub-locks have to be in place.
-                 */
+                LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
+
                 lls = &lck->lls_sub[i];
                 sub = lls->sub_lock;
-                LASSERT(sub != NULL);
+                if (sub == NULL) {
+                        /*
+                         * Sub-lock might have been canceled, while top-lock was
+                         * cached.
+                         */
+                        result = -ESTALE;
+                        break;
+                }
+
                 sublock = sub->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
                 if (rc == 0) {
                         LASSERT(sublock->cll_state != CLS_FREEING);
                         lov_sublock_hold(env, lck, i);
                         if (sublock->cll_state == CLS_CACHED) {
-                                rc = cl_use_try(subenv->lse_env, sublock);
+                                rc = cl_use_try(subenv->lse_env, sublock, 0);
                                 if (rc != 0)
                                         rc = lov_sublock_release(env, lck,
                                                                  i, 1, rc);
-                        } else
-                                rc = 0;
+                        } else if (sublock->cll_state == CLS_NEW) {
+                                /* Sub-lock might have been canceled, while
+                                 * top-lock was cached. */
+                                result = -ESTALE;
+                                lov_sublock_release(env, lck, i, 1, result);
+                        }
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
+                if (result != 0)
                         break;
         }
+
+        if (lck->lls_cancel_race) {
+                /*
+                 * If there is unlocking happened at the same time, then
+                 * sublock_lock state should be FREEING, and lov_sublock_lock
+                 * should return CLO_REPEAT. In this case, it should return
+                 * ESTALE, and up layer should reset the lock state to be NEW.
+                 */
+                lck->lls_cancel_race = 0;
+                LASSERT(result != 0);
+                result = -ESTALE;
+        }
         cl_lock_closure_fini(closure);
         RETURN(result);
 }
@@ -795,7 +935,7 @@ static int lock_lock_multi_match()
                 if (sub->sub_lock == NULL)
                         continue;
                 subobj = sub->sub_descr.cld_obj;
-                if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
+               if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
                                            fstart, fend, &start, &end))
                         continue;
                 subneed->cld_start = cl_index(subobj, start);
@@ -819,7 +959,7 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env,
                                        const struct cl_lock_descr *child,
                                        const struct cl_lock_descr *descr)
 {
-        struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
+       struct lov_stripe_md *lsm = lov->lo_lsm;
         obd_off start;
         obd_off end;
         int result;
@@ -878,6 +1018,14 @@ static int lov_lock_fits_into(const struct lu_env *env,
 
         ENTRY;
 
+       /* for top lock, it's necessary to match enq flags otherwise it will
+        * run into problem if a sublock is missing and reenqueue. */
+       if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
+               return 0;
+
+       if (lov->lls_ever_canceled)
+               return 0;
+
         if (need->cld_mode == CLM_GROUP)
                 /*
                  * always allow to match group lock.
@@ -889,7 +1037,7 @@ static int lov_lock_fits_into(const struct lu_env *env,
                                                      cl2lov(slice->cls_obj),
                                                      lov->lls_sub[0].sub_stripe,
                                                      got, need);
-        } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
+        } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
                 /*
                  * Multi-stripe locks are only suitable for `quick' IO and for
@@ -906,7 +1054,7 @@ static int lov_lock_fits_into(const struct lu_env *env,
                  * match against original lock extent.
                  */
                 result = cl_lock_ext_match(&lov->lls_orig, need);
-        CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
+        CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
                result);
@@ -923,7 +1071,7 @@ void lov_lock_unlink(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_del_init(&link->lll_list);
+        cfs_list_del_init(&link->lll_list);
         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
         /* yank this sub-lock from parent's array */
         lck->lls_sub[link->lll_idx].sub_lock = NULL;
@@ -944,7 +1092,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 if (scan->lll_super == lck)
                         RETURN(scan);
         }
@@ -970,44 +1118,40 @@ static void lov_lock_delete(const struct lu_env *env,
 {
         struct lov_lock        *lck     = cl2lov_lock(slice);
         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
-        int i;
+        struct lov_lock_link   *link;
+        int                     rc;
+        int                     i;
 
         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
         ENTRY;
 
         for (i = 0; i < lck->lls_nr; ++i) {
-                struct lov_lock_sub *lls;
-                struct lovsub_lock  *lsl;
-                struct cl_lock      *sublock;
-                int rc;
+                struct lov_lock_sub *lls = &lck->lls_sub[i];
+                struct lovsub_lock  *lsl = lls->sub_lock;
 
-                lls = &lck->lls_sub[i];
-                lsl = lls->sub_lock;
-                if (lsl == NULL)
+                if (lsl == NULL) /* already removed */
                         continue;
 
-                sublock = lsl->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
-                if (rc == 0) {
-                        if (lck->lls_sub[i].sub_flags & LSF_HELD)
-                                lov_sublock_release(env, lck, i, 1, 0);
-                        if (sublock->cll_state < CLS_FREEING) {
-                                struct lov_lock_link *link;
-
-                                link = lov_lock_link_find(env, lck, lsl);
-                                LASSERT(link != NULL);
-                                lov_lock_unlink(env, link, lsl);
-                                LASSERT(lck->lls_sub[i].sub_lock == NULL);
-                        }
-                        lov_sublock_unlock(env, lsl, closure, NULL);
-                } else if (rc == CLO_REPEAT) {
-                        --i; /* repeat with this lock */
-                } else {
-                        CL_LOCK_DEBUG(D_ERROR, env, sublock,
-                                      "Cannot get sub-lock for delete: %i\n",
-                                      rc);
+                if (rc == CLO_REPEAT) {
+                        --i;
+                        continue;
                 }
+
+                LASSERT(rc == 0);
+                LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
+
+                if (lls->sub_flags & LSF_HELD)
+                        lov_sublock_release(env, lck, i, 1, 0);
+
+                link = lov_lock_link_find(env, lck, lsl);
+                LASSERT(link != NULL);
+                lov_lock_unlink(env, link, lsl);
+                LASSERT(lck->lls_sub[i].sub_lock == NULL);
+
+                lov_sublock_unlock(env, lsl, closure, NULL);
         }
+
         cl_lock_closure_fini(closure);
         EXIT;
 }
@@ -1039,6 +1183,7 @@ static const struct cl_lock_operations lov_lock_ops = {
         .clo_wait      = lov_lock_wait,
         .clo_use       = lov_lock_use,
         .clo_unuse     = lov_lock_unuse,
+        .clo_cancel    = lov_lock_cancel,
         .clo_fits_into = lov_lock_fits_into,
         .clo_delete    = lov_lock_delete,
         .clo_print     = lov_lock_print
@@ -1051,7 +1196,7 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
         int result;
 
         ENTRY;
-        OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
+       OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
         if (lck != NULL) {
                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
                 result = lov_lock_sub_init(env, lck, io);
@@ -1060,13 +1205,49 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
         RETURN(result);
 }
 
+static void lov_empty_lock_fini(const struct lu_env *env,
+                               struct cl_lock_slice *slice)
+{
+       struct lov_lock *lck = cl2lov_lock(slice);
+       OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
+}
+
+static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
+                       lu_printer_t p, const struct cl_lock_slice *slice)
+{
+       (*p)(env, cookie, "empty\n");
+       return 0;
+}
+
+/* XXX: more methods will be added later. */
+static const struct cl_lock_operations lov_empty_lock_ops = {
+       .clo_fini  = lov_empty_lock_fini,
+       .clo_print = lov_empty_lock_print
+};
+
+int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
+               struct cl_lock *lock, const struct cl_io *io)
+{
+       struct lov_lock *lck;
+       int result = -ENOMEM;
+
+       ENTRY;
+       OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
+       if (lck != NULL) {
+               cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
+               lck->lls_orig = lock->cll_descr;
+               result = 0;
+       }
+       RETURN(result);
+}
+
 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
                                                struct cl_lock *parent)
 {
         struct cl_lock_closure *closure;
 
         closure = &lov_env_info(env)->lti_closure;
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
         cl_lock_closure_init(env, closure, parent, 1);
         return closure;
 }