Whamcloud - gitweb
Update copyrights on source files changed since 2010-02-15.
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
index 2e6aa1a..1c504a1 100644 (file)
@@ -1,27 +1,42 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- *   Author: Peter Braam <braam@clusterfs.com>
- *   Author: Phil Schwan <phil@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of the Lustre file system, http://www.lustre.org
- *   Lustre is a trademark of Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   You may have signed or agreed to another license before downloading
- *   this software.  If so, you are bound by the terms and conditions
- *   of that agreement, and the following does not apply to you.  See the
- *   LICENSE file included with this distribution for more information.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   If you did not agree to a different license, then this copy of Lustre
- *   is open source software; you can redistribute it and/or modify it
- *   under the terms of version 2 of the GNU General Public License as
- *   published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   In either case, Lustre is distributed in the hope that it will be
- *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   license text for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_extent.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
  */
 
 #define DEBUG_SUBSYSTEM S_LDLM
 # include <liblustre.h>
 #else
 # include <libcfs/libcfs.h>
-# include <libcfs/kp30.h>
 #endif
 
 #include <lustre_dlm.h>
 #include <obd_support.h>
 #include <obd.h>
+#include <obd_class.h>
 #include <lustre_lib.h>
 
 #include "ldlm_internal.h"
@@ -50,7 +65,7 @@ static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
         __u64 req_start = req->l_req_extent.start;
         __u64 req_end = req->l_req_extent.end;
         __u64 req_align, mask;
+
         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
@@ -120,7 +135,9 @@ static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
                         limiter.start = req_start;
 
                 if (interval_is_overlapped(tree->lit_root, &ext))
-                        printk("req_mode = %d, tree->lit_mode = %d, tree->lit_size = %d\n",
+                        CDEBUG(D_INFO, 
+                               "req_mode = %d, tree->lit_mode = %d, "
+                               "tree->lit_size = %d\n",
                                req_mode, tree->lit_mode, tree->lit_size);
                 interval_expand(tree->lit_root, &ext, &limiter);
                 limiter.start = max(limiter.start, ext.start);
@@ -147,7 +164,7 @@ static void
 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
                                     struct ldlm_extent *new_ex)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_resource *res = req->l_resource;
         ldlm_mode_t req_mode = req->l_req_mode;
         __u64 req_start = req->l_req_extent.start;
@@ -158,11 +175,11 @@ ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
         lockmode_verify(req_mode);
 
         /* for waiting locks */
-        list_for_each(tmp, &res->lr_waiting) {
+        cfs_list_for_each(tmp, &res->lr_waiting) {
                 struct ldlm_lock *lock;
                 struct ldlm_extent *l_extent;
 
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
                 l_extent = &lock->l_policy_data.l_extent;
 
                 /* We already hit the minimum requested size, search no more */
@@ -200,7 +217,7 @@ ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
                         continue;
 
                 /* We grow extents downwards only as far as they don't overlap
-                 * with already-granted locks, on the assumtion that clients
+                 * with already-granted locks, on the assumption that clients
                  * will be writing beyond the initial requested end and would
                  * then need to enqueue a new lock beyond previous request.
                  * l_req_extent->end strictly < req_start, checked above. */
@@ -268,6 +285,9 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
         struct ldlm_resource *res = lock->l_resource;
         cfs_time_t now = cfs_time_current();
 
+        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
+                return 1;
+
         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
         if (contended_locks > res->lr_namespace->ns_contended_locks)
                 res->lr_contention_time = now;
@@ -276,7 +296,7 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
 }
 
 struct ldlm_extent_compat_args {
-        struct list_head *work_list;
+        cfs_list_t *work_list;
         struct ldlm_lock *lock;
         ldlm_mode_t mode;
         int *locks;
@@ -289,15 +309,15 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
         struct ldlm_extent_compat_args *priv = data;
         struct ldlm_interval *node = to_ldlm_interval(n);
         struct ldlm_extent *extent;
-        struct list_head *work_list = priv->work_list;
+        cfs_list_t *work_list = priv->work_list;
         struct ldlm_lock *lock, *enq = priv->lock;
         ldlm_mode_t mode = priv->mode;
         int count = 0;
         ENTRY;
 
-        LASSERT(!list_empty(&node->li_group));
+        LASSERT(!cfs_list_empty(&node->li_group));
 
-        list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+        cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
                 /* interval tree is for granted lock */
                 LASSERTF(mode == lock->l_granted_mode,
                          "mode = %s, lock->l_granted_mode = %s\n",
@@ -330,11 +350,11 @@ static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
  * negative error, such as EWOULDBLOCK for group locks
  */
 static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
                          int *flags, ldlm_error_t *err,
-                         struct list_head *work_list, int *contended_locks)
+                         cfs_list_t *work_list, int *contended_locks)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lock;
         struct ldlm_resource *res = req->l_resource;
         ldlm_mode_t req_mode = req->l_req_mode;
@@ -405,15 +425,16 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         } else {
                                 interval_search(tree->lit_root, &ex,
                                                 ldlm_extent_compat_cb, &data);
-                                if (!list_empty(work_list) && compat)
+                                if (!cfs_list_empty(work_list) && compat)
                                         compat = 0;
                         }
                 }
         } else { /* for waiting queue */
-                list_for_each(tmp, queue) {
+                cfs_list_for_each(tmp, queue) {
                         check_contention = 1;
 
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
 
                         if (req == lock)
                                 break;
@@ -430,7 +451,7 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                          * front of first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        list_del_init(&lock->l_res_link);
+                                        cfs_list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         compat = 0;
                                         break;
@@ -524,7 +545,7 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                            first non-GROUP lock */
 
                                         ldlm_resource_insert_lock_after(lock, req);
-                                        list_del_init(&lock->l_res_link);
+                                        cfs_list_del_init(&lock->l_res_link);
                                         ldlm_resource_insert_lock_after(req, lock);
                                         break;
                                 }
@@ -584,29 +605,29 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
 
         RETURN(compat);
 destroylock:
-        list_del_init(&req->l_res_link);
+        cfs_list_del_init(&req->l_res_link);
         ldlm_lock_destroy_nolock(req);
         *err = compat;
         RETURN(compat);
 }
 
-static void discard_bl_list(struct list_head *bl_list)
+static void discard_bl_list(cfs_list_t *bl_list)
 {
-        struct list_head *tmp, *pos;
+        cfs_list_t *tmp, *pos;
         ENTRY;
 
-        list_for_each_safe(pos, tmp, bl_list) {
+        cfs_list_for_each_safe(pos, tmp, bl_list) {
                 struct ldlm_lock *lock =
-                        list_entry(pos, struct ldlm_lock, l_bl_ast);
+                        cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
 
-                list_del_init(&lock->l_bl_ast);
+                cfs_list_del_init(&lock->l_bl_ast);
                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
                 lock->l_flags &= ~LDLM_FL_AST_SENT;
                 LASSERT(lock->l_bl_ast_run == 0);
                 LASSERT(lock->l_blocking_lock);
-                LDLM_LOCK_PUT(lock->l_blocking_lock);
+                LDLM_LOCK_RELEASE(lock->l_blocking_lock);
                 lock->l_blocking_lock = NULL;
-                LDLM_LOCK_PUT(lock);
+                LDLM_LOCK_RELEASE(lock);
         }
         EXIT;
 }
@@ -619,15 +640,15 @@ static void discard_bl_list(struct list_head *bl_list)
   *   - blocking ASTs have not been sent
   *   - must call this function with the ns lock held once */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                             ldlm_error_t *err, struct list_head *work_list)
+                             ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
+        CFS_LIST_HEAD(rpc_list);
         int rc, rc2;
         int contended_locks = 0;
         ENTRY;
 
-        LASSERT(list_empty(&res->lr_converting));
+        LASSERT(cfs_list_empty(&res->lr_converting));
         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
                 !(lock->l_flags & LDLM_AST_DISCARD_DATA));
         check_res_locked(res);
@@ -684,20 +705,35 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
+                if (cfs_list_empty(&lock->l_res_link))
                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
                 rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
-                lock_res(res);
 
+                if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
+                    !ns_is_client(res->lr_namespace))
+                        class_fail_export(lock->l_export);
+                lock_res(res);
                 if (rc == -ERESTART) {
+
+                        /* 15715: The lock was granted and destroyed after
+                         * resource lock was dropped. Interval node was freed
+                         * in ldlm_lock_destroy. Anyway, this always happens
+                         * when a client is being evicted. So it would be
+                         * ok to return an error. -jay */
+                        if (lock->l_destroyed) {
+                                *err = -EAGAIN;
+                                GOTO(out, rc = -EAGAIN);
+                        }
+
                         /* lock was granted while resource was unlocked. */
                         if (lock->l_granted_mode == lock->l_req_mode) {
                                 /* bug 11300: if the lock has been granted,
                                  * break earlier because otherwise, we will go
                                  * to restart and ldlm_resource_unlink will be
                                  * called and it causes the interval node to be
-                                 * freed. Then we will fail at 
+                                 * freed. Then we will fail at
                                  * ldlm_extent_add_lock() */
                                 *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV |
                                             LDLM_FL_BLOCK_WAIT);
@@ -715,7 +751,7 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
         }
         RETURN(0);
 out:
-        if (!list_empty(&rpc_list)) {
+        if (!cfs_list_empty(&rpc_list)) {
                 LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
                 discard_bl_list(&rpc_list);
         }
@@ -730,7 +766,7 @@ out:
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lck;
         __u64 kms = 0;
         ENTRY;
@@ -740,8 +776,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
          * calculation of the kms */
         lock->l_flags |= LDLM_FL_KMS_IGNORE;
 
-        list_for_each(tmp, &res->lr_granted) {
-                lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each(tmp, &res->lr_granted) {
+                lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
                         continue;
@@ -766,7 +802,7 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
         ENTRY;
 
         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
-        OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node));
+        OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
         if (node == NULL)
                 RETURN(NULL);
 
@@ -778,7 +814,8 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 void ldlm_interval_free(struct ldlm_interval *node)
 {
         if (node) {
-                LASSERT(list_empty(&node->li_group));
+                LASSERT(cfs_list_empty(&node->li_group));
+                LASSERT(!interval_is_intree(&node->li_node));
                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
         }
 }
@@ -790,7 +827,7 @@ void ldlm_interval_attach(struct ldlm_interval *n,
         LASSERT(l->l_tree_node == NULL);
         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
 
-        list_add_tail(&l->l_sl_policy, &n->li_group);
+        cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
         l->l_tree_node = n;
 }
 
@@ -801,11 +838,11 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
         if (n == NULL)
                 return NULL;
 
-        LASSERT(!list_empty(&n->li_group));
+        LASSERT(!cfs_list_empty(&n->li_group));
         l->l_tree_node = NULL;
-        list_del_init(&l->l_sl_policy);
+        cfs_list_del_init(&l->l_sl_policy);
 
-        return (list_empty(&n->li_group) ? n : NULL);
+        return (cfs_list_empty(&n->li_group) ? n : NULL);
 }
 
 static inline int lock_mode_to_index(ldlm_mode_t mode)
@@ -831,6 +868,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
 
         node = lock->l_tree_node;
         LASSERT(node != NULL);
+        LASSERT(!interval_is_intree(&node->li_node));
 
         idx = lock_mode_to_index(lock->l_granted_mode);
         LASSERT(lock->l_granted_mode == 1 << idx);
@@ -858,14 +896,13 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct ldlm_interval *node;
+        struct ldlm_interval *node = lock->l_tree_node;
         struct ldlm_interval_tree *tree;
         int idx;
 
-        if (lock->l_granted_mode != lock->l_req_mode)
+        if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
                 return;
 
-        LASSERT(lock->l_tree_node != NULL);
         idx = lock_mode_to_index(lock->l_granted_mode);
         LASSERT(lock->l_granted_mode == 1 << idx);
         tree = &res->lr_itree[idx];