Whamcloud - gitweb
LU-160 Reduce OST size requirement for test 155
[fs/lustre-release.git] / lustre / lov / lov_lock.c
index 58433c4..d633309 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
 
 #include "lov_cl_internal.h"
 
-/** \addtogroup lov lov @{ */
+/** \addtogroup lov
+ *  @{
+ */
 
 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
                                                struct cl_lock *parent);
 
+static int lov_lock_unuse(const struct lu_env *env,
+                          const struct cl_lock_slice *slice);
 /*****************************************************************************
  *
  * Lov lock operations.
@@ -73,12 +77,11 @@ static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
          * they are not initialized at all. As a temp fix, in this case,
          * we still borrow the parent's env to call sublock operations.
          */
-        if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
+        if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
                 subenv->lse_env = env;
                 subenv->lse_io  = io;
                 subenv->lse_sub = NULL;
         } else {
-                LASSERT(io != NULL);
                 sub = lov_sub_get(env, lio, lls->sub_stripe);
                 if (!IS_ERR(sub)) {
                         subenv->lse_env = sub->sub_env;
@@ -119,7 +122,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
         lck->lls_sub[idx].sub_lock = lsl;
         lck->lls_nr_filled++;
         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
-        list_add_tail(&link->lll_list, &lsl->lss_parents);
+        cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
         link->lll_idx = idx;
         link->lll_super = lck;
         cl_lock_get(parent);
@@ -202,7 +205,7 @@ static int lov_sublock_lock(const struct lu_env *env,
         int                 result = 0;
         ENTRY;
 
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
 
         sublock = lls->sub_lock;
         child = sublock->lss_cl.cls_lock;
@@ -213,7 +216,8 @@ static int lov_sublock_lock(const struct lu_env *env,
                 LASSERT(cl_lock_is_mutexed(child));
                 sublock->lss_active = parent;
 
-                if (unlikely(child->cll_state == CLS_FREEING)) {
+                if (unlikely((child->cll_state == CLS_FREEING) ||
+                             (child->cll_flags & CLF_CANCELLED))) {
                         struct lov_lock_link *link;
                         /*
                          * we could race with lock deletion which temporarily
@@ -225,6 +229,7 @@ static int lov_sublock_lock(const struct lu_env *env,
                         LASSERT(link != NULL);
                         lov_lock_unlink(env, link, sublock);
                         lov_sublock_unlock(env, sublock, closure, NULL);
+                        lck->lls_cancel_race = 1;
                         result = CLO_REPEAT;
                 } else if (lsep) {
                         struct lov_sublock_env *subenv;
@@ -316,7 +321,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                         nr++;
         }
         LASSERT(nr > 0);
-        OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
+        OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
         if (lck->lls_sub == NULL)
                 RETURN(-ENOMEM);
 
@@ -341,6 +346,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                         descr->cld_end   = cl_index(descr->cld_obj, end);
                         descr->cld_mode  = parent->cll_descr.cld_mode;
                         descr->cld_gid   = parent->cll_descr.cld_gid;
+                        descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
                         /* XXX has no effect */
                         lck->lls_sub[nr].sub_got = *descr;
                         lck->lls_sub[nr].sub_stripe = i;
@@ -362,6 +368,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                                 result = PTR_ERR(sublock);
                                 break;
                         }
+                        cl_lock_get_trust(sublock);
                         cl_lock_mutex_get(env, sublock);
                         cl_lock_mutex_get(env, parent);
                         /*
@@ -379,6 +386,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
                                                "lov-parent", parent);
                         }
                         cl_lock_mutex_put(env, sublock);
+                        cl_lock_put(env, sublock);
                 }
         }
         /*
@@ -477,12 +485,29 @@ static void lov_lock_fini(const struct lu_env *env,
                          * a reference on its parent.
                          */
                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
-                OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
+                OBD_FREE_LARGE(lck->lls_sub,
+                               lck->lls_nr * sizeof lck->lls_sub[0]);
         }
         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
         EXIT;
 }
 
+static int lov_lock_enqueue_wait(const struct lu_env *env,
+                                 struct lov_lock *lck,
+                                 struct cl_lock *sublock)
+{
+        struct cl_lock *lock = lck->lls_cl.cls_lock;
+        int             result;
+        ENTRY;
+
+        LASSERT(cl_lock_is_mutexed(lock));
+
+        cl_lock_mutex_put(env, lock);
+        result = cl_lock_enqueue_wait(env, sublock, 0);
+        cl_lock_mutex_get(env, lock);
+        RETURN(result ?: CLO_REPEAT);
+}
+
 /**
  * Tries to advance a state machine of a given sub-lock toward enqueuing of
  * the top-lock.
@@ -532,10 +557,11 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
         cl_lock_mutex_get(env, parent);
 
         if (!IS_ERR(sublock)) {
+                cl_lock_get_trust(sublock);
                 if (parent->cll_state == CLS_QUEUING &&
-                    lck->lls_sub[idx].sub_lock == NULL)
+                    lck->lls_sub[idx].sub_lock == NULL) {
                         lov_sublock_adopt(env, lck, sublock, idx, link);
-                else {
+                else {
                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
                         /* other thread allocated sub-lock, or enqueue is no
                          * longer going on */
@@ -544,6 +570,7 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
                         cl_lock_mutex_get(env, parent);
                 }
                 cl_lock_mutex_put(env, sublock);
+                cl_lock_put(env, sublock);
                 result = CLO_REPEAT;
         } else
                 result = PTR_ERR(sublock);
@@ -607,13 +634,30 @@ static int lov_lock_enqueue(const struct lu_env *env,
                                                   subenv->lse_io, enqflags,
                                                   i == lck->lls_nr - 1);
                         minstate = min(minstate, sublock->cll_state);
-                        /*
-                         * Don't hold a sub-lock in CLS_CACHED state, see
-                         * description for lov_lock::lls_sub.
-                         */
-                        if (sublock->cll_state > CLS_HELD)
-                                rc = lov_sublock_release(env, lck, i, 1, rc);
-                        lov_sublock_unlock(env, sub, closure, subenv);
+                        if (rc == CLO_WAIT) {
+                                switch (sublock->cll_state) {
+                                case CLS_QUEUING:
+                                        /* take recursive mutex, the lock is
+                                         * released in lov_lock_enqueue_wait.
+                                         */
+                                        cl_lock_mutex_get(env, sublock);
+                                        lov_sublock_unlock(env, sub, closure,
+                                                           subenv);
+                                        rc = lov_lock_enqueue_wait(env, lck,
+                                                                   sublock);
+                                        break;
+                                case CLS_CACHED:
+                                        rc = lov_sublock_release(env, lck, i,
+                                                                 1, rc);
+                                default:
+                                        lov_sublock_unlock(env, sub, closure,
+                                                           subenv);
+                                        break;
+                                }
+                        } else {
+                                LASSERT(sublock->cll_conflict == NULL);
+                                lov_sublock_unlock(env, sub, closure, subenv);
+                        }
                 }
                 result = lov_subresult(result, rc);
                 if (result != 0)
@@ -643,7 +687,7 @@ static int lov_lock_unuse(const struct lu_env *env,
                 /* top-lock state cannot change concurrently, because single
                  * thread (one that released the last hold) carries unlocking
                  * to the completion. */
-                LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
+                LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
                 lls = &lck->lls_sub[i];
                 sub = lls->sub_lock;
                 if (sub == NULL)
@@ -652,27 +696,93 @@ static int lov_lock_unuse(const struct lu_env *env,
                 sublock = sub->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
                 if (rc == 0) {
-                        if (lck->lls_sub[i].sub_flags & LSF_HELD) {
+                        if (lls->sub_flags & LSF_HELD) {
                                 LASSERT(sublock->cll_state == CLS_HELD);
                                 rc = cl_unuse_try(subenv->lse_env, sublock);
-                                if (rc != CLO_WAIT)
-                                        rc = lov_sublock_release(env, lck,
-                                                                 i, 0, rc);
+                                rc = lov_sublock_release(env, lck, i, 0, rc);
                         }
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
-                        break;
         }
-        if (result == 0 && lck->lls_unuse_race) {
-                lck->lls_unuse_race = 0;
+
+        if (result == 0 && lck->lls_cancel_race) {
+                lck->lls_cancel_race = 0;
                 result = -ESTALE;
         }
         cl_lock_closure_fini(closure);
         RETURN(result);
 }
 
+
+static void lov_lock_cancel(const struct lu_env *env,
+                           const struct cl_lock_slice *slice)
+{
+        struct lov_lock        *lck     = cl2lov_lock(slice);
+        struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+        int i;
+        int result;
+
+        ENTRY;
+
+        for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+                int rc;
+                struct lovsub_lock     *sub;
+                struct cl_lock         *sublock;
+                struct lov_lock_sub    *lls;
+                struct lov_sublock_env *subenv;
+
+                /* top-lock state cannot change concurrently, because single
+                 * thread (one that released the last hold) carries unlocking
+                 * to the completion. */
+                lls = &lck->lls_sub[i];
+                sub = lls->sub_lock;
+                if (sub == NULL)
+                        continue;
+
+                sublock = sub->lss_cl.cls_lock;
+                rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+                if (rc == 0) {
+                        if (!(lls->sub_flags & LSF_HELD)) {
+                                lov_sublock_unlock(env, sub, closure, subenv);
+                                continue;
+                        }
+
+                        switch(sublock->cll_state) {
+                        case CLS_HELD:
+                                rc = cl_unuse_try(subenv->lse_env,
+                                                  sublock);
+                                lov_sublock_release(env, lck, i, 0, 0);
+                                break;
+                        case CLS_ENQUEUED:
+                                /* TODO: it's not a good idea to cancel this
+                                 * lock because it's innocent. But it's
+                                 * acceptable. The better way would be to
+                                 * define a new lock method to unhold the
+                                 * dlm lock. */
+                                cl_lock_cancel(env, sublock);
+                        default:
+                                lov_sublock_release(env, lck, i, 1, 0);
+                                break;
+                        }
+                        lov_sublock_unlock(env, sub, closure, subenv);
+                }
+
+                if (rc == CLO_REPEAT) {
+                        --i;
+                        continue;
+                }
+
+                result = lov_subresult(result, rc);
+        }
+
+        if (result)
+                CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+                              "lov_lock_cancel fails with %d.\n", result);
+
+        cl_lock_closure_fini(closure);
+}
+
 static int lov_lock_wait(const struct lu_env *env,
                          const struct cl_lock_slice *slice)
 {
@@ -705,7 +815,7 @@ static int lov_lock_wait(const struct lu_env *env,
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
+                if (result != 0)
                         break;
         }
         cl_lock_closure_fini(closure);
@@ -720,7 +830,7 @@ static int lov_lock_use(const struct lu_env *env,
         int                     result;
         int                     i;
 
-        LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
+        LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
         ENTRY;
 
         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
@@ -730,37 +840,48 @@ static int lov_lock_use(const struct lu_env *env,
                 struct lov_lock_sub    *lls;
                 struct lov_sublock_env *subenv;
 
-                if (slice->cls_lock->cll_state != CLS_CACHED) {
-                        /* see comment in lov_lock_enqueue(). */
-                        LASSERT(i > 0 && result != 0);
-                        break;
-                }
-                /*
-                 * if a sub-lock was destroyed while top-lock was in
-                 * CLS_CACHED state, top-lock would have been moved into
-                 * CLS_NEW state, so all sub-locks have to be in place.
-                 */
+                LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
+
                 lls = &lck->lls_sub[i];
                 sub = lls->sub_lock;
-                LASSERT(sub != NULL);
+                if (sub == NULL) {
+                        /*
+                         * Sub-lock might have been canceled, while top-lock was
+                         * cached.
+                         */
+                        result = -ESTALE;
+                        break;
+                }
+
                 sublock = sub->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
                 if (rc == 0) {
                         LASSERT(sublock->cll_state != CLS_FREEING);
                         lov_sublock_hold(env, lck, i);
                         if (sublock->cll_state == CLS_CACHED) {
-                                rc = cl_use_try(subenv->lse_env, sublock);
+                                rc = cl_use_try(subenv->lse_env, sublock, 0);
                                 if (rc != 0)
                                         rc = lov_sublock_release(env, lck,
                                                                  i, 1, rc);
-                        } else
-                                rc = 0;
+                        }
                         lov_sublock_unlock(env, sub, closure, subenv);
                 }
                 result = lov_subresult(result, rc);
-                if (result < 0)
+                if (result != 0)
                         break;
         }
+
+        if (lck->lls_cancel_race) {
+                /*
+                 * If there is unlocking happened at the same time, then
+                 * sublock_lock state should be FREEING, and lov_sublock_lock
+                 * should return CLO_REPEAT. In this case, it should return
+                 * ESTALE, and up layer should reset the lock state to be NEW.
+                 */
+                lck->lls_cancel_race = 0;
+                LASSERT(result != 0);
+                result = -ESTALE;
+        }
         cl_lock_closure_fini(closure);
         RETURN(result);
 }
@@ -883,7 +1004,7 @@ static int lov_lock_fits_into(const struct lu_env *env,
                                                      cl2lov(slice->cls_obj),
                                                      lov->lls_sub[0].sub_stripe,
                                                      got, need);
-        } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
+        } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
                 /*
                  * Multi-stripe locks are only suitable for `quick' IO and for
@@ -900,7 +1021,7 @@ static int lov_lock_fits_into(const struct lu_env *env,
                  * match against original lock extent.
                  */
                 result = cl_lock_ext_match(&lov->lls_orig, need);
-        CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
+        CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
                result);
@@ -917,7 +1038,7 @@ void lov_lock_unlink(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_del_init(&link->lll_list);
+        cfs_list_del_init(&link->lll_list);
         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
         /* yank this sub-lock from parent's array */
         lck->lls_sub[link->lll_idx].sub_lock = NULL;
@@ -938,7 +1059,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
         ENTRY;
 
-        list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+        cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
                 if (scan->lll_super == lck)
                         RETURN(scan);
         }
@@ -964,44 +1085,40 @@ static void lov_lock_delete(const struct lu_env *env,
 {
         struct lov_lock        *lck     = cl2lov_lock(slice);
         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
-        int i;
+        struct lov_lock_link   *link;
+        int                     rc;
+        int                     i;
 
         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
         ENTRY;
 
         for (i = 0; i < lck->lls_nr; ++i) {
-                struct lov_lock_sub *lls;
-                struct lovsub_lock  *lsl;
-                struct cl_lock      *sublock;
-                int rc;
+                struct lov_lock_sub *lls = &lck->lls_sub[i];
+                struct lovsub_lock  *lsl = lls->sub_lock;
 
-                lls = &lck->lls_sub[i];
-                lsl = lls->sub_lock;
-                if (lsl == NULL)
+                if (lsl == NULL) /* already removed */
                         continue;
 
-                sublock = lsl->lss_cl.cls_lock;
                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
-                if (rc == 0) {
-                        if (lck->lls_sub[i].sub_flags & LSF_HELD)
-                                lov_sublock_release(env, lck, i, 1, 0);
-                        if (sublock->cll_state < CLS_FREEING) {
-                                struct lov_lock_link *link;
-
-                                link = lov_lock_link_find(env, lck, lsl);
-                                LASSERT(link != NULL);
-                                lov_lock_unlink(env, link, lsl);
-                                LASSERT(lck->lls_sub[i].sub_lock == NULL);
-                        }
-                        lov_sublock_unlock(env, lsl, closure, NULL);
-                } else if (rc == CLO_REPEAT) {
-                        --i; /* repeat with this lock */
-                } else {
-                        CL_LOCK_DEBUG(D_ERROR, env, sublock,
-                                      "Cannot get sub-lock for delete: %i\n",
-                                      rc);
+                if (rc == CLO_REPEAT) {
+                        --i;
+                        continue;
                 }
+
+                LASSERT(rc == 0);
+                LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
+
+                if (lls->sub_flags & LSF_HELD)
+                        lov_sublock_release(env, lck, i, 1, 0);
+
+                link = lov_lock_link_find(env, lck, lsl);
+                LASSERT(link != NULL);
+                lov_lock_unlink(env, link, lsl);
+                LASSERT(lck->lls_sub[i].sub_lock == NULL);
+
+                lov_sublock_unlock(env, lsl, closure, NULL);
         }
+
         cl_lock_closure_fini(closure);
         EXIT;
 }
@@ -1033,6 +1150,7 @@ static const struct cl_lock_operations lov_lock_ops = {
         .clo_wait      = lov_lock_wait,
         .clo_use       = lov_lock_use,
         .clo_unuse     = lov_lock_unuse,
+        .clo_cancel    = lov_lock_cancel,
         .clo_fits_into = lov_lock_fits_into,
         .clo_delete    = lov_lock_delete,
         .clo_print     = lov_lock_print
@@ -1060,7 +1178,7 @@ static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
         struct cl_lock_closure *closure;
 
         closure = &lov_env_info(env)->lti_closure;
-        LASSERT(list_empty(&closure->clc_list));
+        LASSERT(cfs_list_empty(&closure->clc_list));
         cl_lock_closure_init(env, closure, parent, 1);
         return closure;
 }