Whamcloud - gitweb
b=21528 lower the message severety
[fs/lustre-release.git] / lustre / ldlm / ldlm_resource.c
index 494b74e..3fa5d26 100644 (file)
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- * Copyright (C) 2002 Cluster File Systems, Inc.
+ * GPL HEADER START
  *
- * This code is issued under the GNU General Public License.
- * See the file COPYING in this distribution
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- * by Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_resource.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Peter Braam <braam@clusterfs.com>
  */
 
 #define DEBUG_SUBSYSTEM S_LDLM
+#ifdef __KERNEL__
+# include <lustre_dlm.h>
+#else
+# include <liblustre.h>
+#endif
 
-#include <linux/lustre_dlm.h>
-#include <linux/obd_class.h>
+#include <obd_class.h>
+#include "ldlm_internal.h"
 
-kmem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
+cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
 
-spinlock_t ldlm_namespace_lock = SPIN_LOCK_UNLOCKED;
-struct list_head ldlm_namespace_list = LIST_HEAD_INIT(ldlm_namespace_list);
-static struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
-extern struct proc_dir_entry proc_root;
+cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
+cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
 
-int ldlm_proc_setup(struct obd_device *obd)
+cfs_semaphore_t ldlm_srv_namespace_lock;
+CFS_LIST_HEAD(ldlm_srv_namespace_list);
+
+cfs_semaphore_t ldlm_cli_namespace_lock;
+CFS_LIST_HEAD(ldlm_cli_namespace_list);
+
+cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
+cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
+cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
+
+#ifdef LPROCFS
+static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
+                             unsigned long count, void *data)
 {
-        ENTRY;
+        ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+        ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
+        RETURN(count);
+}
 
+int ldlm_proc_setup(void)
+{
+        int rc;
+        struct lprocfs_vars list[] = {
+                { "dump_namespaces", NULL, ldlm_proc_dump_ns, NULL },
+                { NULL }};
+        ENTRY;
         LASSERT(ldlm_ns_proc_dir == NULL);
 
-        ldlm_ns_proc_dir = proc_mkdir("ldlm", &proc_root);
-        if (ldlm_ns_proc_dir == NULL) {
-                CERROR("Couldn't create /proc/ldlm\n");
-                RETURN(-EPERM);
+        ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
+                                              proc_lustre_root,
+                                              NULL, NULL);
+        if (IS_ERR(ldlm_type_proc_dir)) {
+                CERROR("LProcFS failed in ldlm-init\n");
+                rc = PTR_ERR(ldlm_type_proc_dir);
+                GOTO(err, rc);
         }
+
+        ldlm_ns_proc_dir = lprocfs_register("namespaces",
+                                            ldlm_type_proc_dir,
+                                            NULL, NULL);
+        if (IS_ERR(ldlm_ns_proc_dir)) {
+                CERROR("LProcFS failed in ldlm-init\n");
+                rc = PTR_ERR(ldlm_ns_proc_dir);
+                GOTO(err_type, rc);
+        }
+
+        ldlm_svc_proc_dir = lprocfs_register("services",
+                                            ldlm_type_proc_dir,
+                                            NULL, NULL);
+        if (IS_ERR(ldlm_svc_proc_dir)) {
+                CERROR("LProcFS failed in ldlm-init\n");
+                rc = PTR_ERR(ldlm_svc_proc_dir);
+                GOTO(err_ns, rc);
+        }
+
+        rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
+
         RETURN(0);
+
+err_ns:
+        lprocfs_remove(&ldlm_ns_proc_dir);
+err_type:
+        lprocfs_remove(&ldlm_type_proc_dir);
+err:
+        ldlm_svc_proc_dir = NULL;
+        RETURN(rc);
 }
 
-void ldlm_proc_cleanup(struct obd_device *obd)
+void ldlm_proc_cleanup(void)
 {
-        remove_proc_entry("ldlm", &proc_root);
+        if (ldlm_svc_proc_dir)
+                lprocfs_remove(&ldlm_svc_proc_dir);
+
+        if (ldlm_ns_proc_dir)
+                lprocfs_remove(&ldlm_ns_proc_dir);
+
+        if (ldlm_type_proc_dir)
+                lprocfs_remove(&ldlm_type_proc_dir);
 }
 
-/* FIXME: This can go away when we start to really use lprocfs */
-static int ldlm_proc_ll_rd(char *page, char **start, off_t off,
-                         int count, int *eof, void *data)
+static int lprocfs_rd_lru_size(char *page, char **start, off_t off,
+                               int count, int *eof, void *data)
 {
-        int len;
-        __u64 *temp = (__u64 *)data;
+        struct ldlm_namespace *ns = data;
+        __u32 *nr = &ns->ns_max_unused;
 
-        len = snprintf(page, count, "%Lu\n", *temp);
-
-        return len;
+        if (ns_connect_lru_resize(ns))
+                nr = &ns->ns_nr_unused;
+        return lprocfs_rd_uint(page, start, off, count, eof, nr);
 }
 
-#define LDLM_MAX_UNUSED 20
-struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 client)
+static int lprocfs_wr_lru_size(struct file *file, const char *buffer,
+                               unsigned long count, void *data)
 {
-        struct ldlm_namespace *ns = NULL;
-        struct list_head *bucket;
-        struct proc_dir_entry *proc_entry;
+        struct ldlm_namespace *ns = data;
+        char dummy[MAX_STRING_SIZE + 1], *end;
+        unsigned long tmp;
+        int lru_resize;
+
+        dummy[MAX_STRING_SIZE] = '\0';
+        if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+                return -EFAULT;
+
+        if (strncmp(dummy, "clear", 5) == 0) {
+                CDEBUG(D_DLMTRACE,
+                       "dropping all unused locks from namespace %s\n",
+                       ns->ns_name);
+                if (ns_connect_lru_resize(ns)) {
+                        int canceled, unused  = ns->ns_nr_unused;
+
+                        /* Try to cancel all @ns_nr_unused locks. */
+                        canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC,
+                                                   LDLM_CANCEL_PASSED);
+                        if (canceled < unused) {
+                                CDEBUG(D_DLMTRACE,
+                                       "not all requested locks are canceled, "
+                                       "requested: %d, canceled: %d\n", unused,
+                                       canceled);
+                                return -EINVAL;
+                        }
+                } else {
+                        tmp = ns->ns_max_unused;
+                        ns->ns_max_unused = 0;
+                        ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED);
+                        ns->ns_max_unused = tmp;
+                }
+                return count;
+        }
 
-        OBD_ALLOC(ns, sizeof(*ns));
-        if (!ns) {
-                LBUG();
-                GOTO(out, NULL);
+        tmp = simple_strtoul(dummy, &end, 0);
+        if (dummy == end) {
+                CERROR("invalid value written\n");
+                return -EINVAL;
+        }
+        lru_resize = (tmp == 0);
+
+        if (ns_connect_lru_resize(ns)) {
+                if (!lru_resize)
+                        ns->ns_max_unused = (unsigned int)tmp;
+
+                if (tmp > ns->ns_nr_unused)
+                        tmp = ns->ns_nr_unused;
+                tmp = ns->ns_nr_unused - tmp;
+
+                CDEBUG(D_DLMTRACE,
+                       "changing namespace %s unused locks from %u to %u\n",
+                       ns->ns_name, ns->ns_nr_unused, (unsigned int)tmp);
+                ldlm_cancel_lru(ns, tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED);
+
+                if (!lru_resize) {
+                        CDEBUG(D_DLMTRACE,
+                               "disable lru_resize for namespace %s\n",
+                               ns->ns_name);
+                        ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
+                }
+        } else {
+                CDEBUG(D_DLMTRACE,
+                       "changing namespace %s max_unused from %u to %u\n",
+                       ns->ns_name, ns->ns_max_unused, (unsigned int)tmp);
+                ns->ns_max_unused = (unsigned int)tmp;
+                ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED);
+
+                /* Make sure that originally lru resize was supported before
+                 * turning it on here. */
+                if (lru_resize &&
+                    (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
+                        CDEBUG(D_DLMTRACE,
+                               "enable lru_resize for namespace %s\n",
+                               ns->ns_name);
+                        ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+                }
         }
 
-        ns->ns_hash = vmalloc(sizeof(*ns->ns_hash) * RES_HASH_SIZE);
-        if (!ns->ns_hash) {
-                LBUG();
-                GOTO(out, ns);
+        return count;
+}
+
+void ldlm_proc_namespace(struct ldlm_namespace *ns)
+{
+        struct lprocfs_vars lock_vars[2];
+        char lock_name[MAX_STRING_SIZE + 1];
+
+        LASSERT(ns != NULL);
+        LASSERT(ns->ns_name != NULL);
+
+        lock_name[MAX_STRING_SIZE] = '\0';
+
+        memset(lock_vars, 0, sizeof(lock_vars));
+        lock_vars[0].name = lock_name;
+
+        snprintf(lock_name, MAX_STRING_SIZE, "%s/resource_count", ns->ns_name);
+        lock_vars[0].data = &ns->ns_refcount;
+        lock_vars[0].read_fptr = lprocfs_rd_atomic;
+        lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+        snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_count", ns->ns_name);
+        lock_vars[0].data = &ns->ns_locks;
+        lock_vars[0].read_fptr = lprocfs_rd_atomic;
+        lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+        if (ns_is_client(ns)) {
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_unused_count",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_nr_unused;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_size",
+                         ns->ns_name);
+                lock_vars[0].data = ns;
+                lock_vars[0].read_fptr = lprocfs_rd_lru_size;
+                lock_vars[0].write_fptr = lprocfs_wr_lru_size;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/lru_max_age",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_max_age;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lock_vars[0].write_fptr = lprocfs_wr_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+        } else {
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/ctime_age_limit",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_ctime_age_limit;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lock_vars[0].write_fptr = lprocfs_wr_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/lock_timeouts",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_timeouts;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/max_nolock_bytes",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_max_nolock_size;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lock_vars[0].write_fptr = lprocfs_wr_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/contention_seconds",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_contention_time;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lock_vars[0].write_fptr = lprocfs_wr_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
+
+                snprintf(lock_name, MAX_STRING_SIZE, "%s/contended_locks",
+                         ns->ns_name);
+                lock_vars[0].data = &ns->ns_contended_locks;
+                lock_vars[0].read_fptr = lprocfs_rd_uint;
+                lock_vars[0].write_fptr = lprocfs_wr_uint;
+                lprocfs_add_vars(ldlm_ns_proc_dir, lock_vars, 0);
         }
-        obd_memory += sizeof(*ns->ns_hash) * RES_HASH_SIZE;
+}
+#undef MAX_STRING_SIZE
+#else
+#define ldlm_proc_namespace(ns) do {} while (0)
+#endif /* LPROCFS */
 
-        OBD_ALLOC(ns->ns_name, strlen(name) + 1);
-        if (!ns->ns_name) {
-                LBUG();
-                GOTO(out, ns);
+struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
+                                          ldlm_side_t client, ldlm_appetite_t apt)
+{
+        struct ldlm_namespace *ns = NULL;
+        cfs_list_t *bucket;
+        int rc, idx, namelen;
+        ENTRY;
+
+        rc = ldlm_get_ref();
+        if (rc) {
+                CERROR("ldlm_get_ref failed: %d\n", rc);
+                RETURN(NULL);
         }
+
+        OBD_ALLOC_PTR(ns);
+        if (!ns)
+                GOTO(out_ref, NULL);
+
+        OBD_VMALLOC(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
+        if (!ns->ns_hash)
+                GOTO(out_ns, NULL);
+
+        ns->ns_appetite = apt;
+
+        LASSERT(obd != NULL);
+        ns->ns_obd = obd;
+
+        namelen = strlen(name);
+        OBD_ALLOC(ns->ns_name, namelen + 1);
+        if (!ns->ns_name)
+                GOTO(out_hash, NULL);
+
         strcpy(ns->ns_name, name);
 
-        INIT_LIST_HEAD(&ns->ns_root_list);
-        l_lock_init(&ns->ns_lock);
+        CFS_INIT_LIST_HEAD(&ns->ns_root_list);
+        CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
         ns->ns_refcount = 0;
         ns->ns_client = client;
-        spin_lock_init(&ns->ns_counter_lock);
-        ns->ns_locks = 0;
+        cfs_spin_lock_init(&ns->ns_hash_lock);
+        cfs_atomic_set(&ns->ns_locks, 0);
         ns->ns_resources = 0;
+        cfs_waitq_init(&ns->ns_waitq);
+        ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
+        ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
+        ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
 
         for (bucket = ns->ns_hash + RES_HASH_SIZE - 1; bucket >= ns->ns_hash;
              bucket--)
-                INIT_LIST_HEAD(bucket);
+                CFS_INIT_LIST_HEAD(bucket);
 
-        INIT_LIST_HEAD(&ns->ns_unused_list);
+        CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
         ns->ns_nr_unused = 0;
-        ns->ns_max_unused = LDLM_MAX_UNUSED;
-
-        spin_lock(&ldlm_namespace_lock);
-        list_add(&ns->ns_list_chain, &ldlm_namespace_list);
-        spin_unlock(&ldlm_namespace_lock);
-
-        ns->ns_proc_dir = proc_mkdir(ns->ns_name, ldlm_ns_proc_dir);
-        if (ns->ns_proc_dir == NULL)
-                CERROR("Unable to create proc directory for namespace.\n");
-        proc_entry = create_proc_entry("resource_count", 0444, ns->ns_proc_dir);
-        proc_entry->read_proc = ldlm_proc_ll_rd;
-        proc_entry->data = &ns->ns_resources;
-        proc_entry = create_proc_entry("lock_count", 0444, ns->ns_proc_dir);
-        proc_entry->read_proc = ldlm_proc_ll_rd;
-        proc_entry->data = &ns->ns_locks;
+        ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
+        ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
+        ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
+        ns->ns_timeouts = 0;
+        cfs_spin_lock_init(&ns->ns_unused_lock);
+        ns->ns_orig_connect_flags = 0;
+        ns->ns_connect_flags = 0;
+        ldlm_proc_namespace(ns);
+
+        idx = cfs_atomic_read(ldlm_namespace_nr(client));
+        rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
+        if (rc) {
+                CERROR("Can't initialize lock pool, rc %d\n", rc);
+                GOTO(out_proc, rc);
+        }
 
-        RETURN(ns);
+        at_init(&ns->ns_at_estimate, ldlm_enqueue_min, 0);
 
- out:
-        if (ns && ns->ns_hash) {
-                vfree(ns->ns_hash);
-                obd_memory -= sizeof(*ns->ns_hash) * RES_HASH_SIZE;
-        }
-        if (ns && ns->ns_name)
-                OBD_FREE(ns->ns_name, strlen(name) + 1);
-        if (ns)
-                OBD_FREE(ns, sizeof(*ns));
-        return NULL;
+        ldlm_namespace_register(ns, client);
+        RETURN(ns);
+out_proc:
+        ldlm_namespace_cleanup(ns, 0);
+        OBD_FREE(ns->ns_name, namelen + 1);
+out_hash:
+        OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
+out_ns:
+        OBD_FREE_PTR(ns);
+out_ref:
+        ldlm_put_ref();
+        RETURN(NULL);
 }
 
 extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
 
-/* If 'local_only' is true, don't try to tell the server, just cleanup. */
-static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
-                             int local_only)
+/* If flags contains FL_LOCAL_ONLY, don't try to tell the server, just cleanup.
+ * This is currently only used for recovery, and we make certain assumptions
+ * as a result--notably, that we shouldn't cancel locks with refs. -phil
+ *
+ * Called with the ns_lock held. */
+static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q,
+                             int flags)
 {
-        struct list_head *tmp, *pos;
-        int rc = 0, client = res->lr_namespace->ns_client;
+        cfs_list_t *tmp;
+        int rc = 0, client = ns_is_client(res->lr_namespace);
+        int local_only = (flags & LDLM_FL_LOCAL_ONLY);
         ENTRY;
 
-        list_for_each_safe(tmp, pos, q) {
-                struct ldlm_lock *lock;
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                LDLM_LOCK_GET(lock);
 
-                /* At shutdown time, don't call the cancellation callback */
-                lock->l_flags |= LDLM_FL_CANCEL;
+        do {
+                struct ldlm_lock *lock = NULL;
+
+                /* first, we look for non-cleaned-yet lock
+                 * all cleaned locks are marked by CLEANED flag */
+                lock_res(res);
+                cfs_list_for_each(tmp, q) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
+                        if (lock->l_flags & LDLM_FL_CLEANED) {
+                                lock = NULL;
+                                continue;
+                        }
+                        LDLM_LOCK_GET(lock);
+                        lock->l_flags |= LDLM_FL_CLEANED;
+                        break;
+                }
+
+                if (lock == NULL) {
+                        unlock_res(res);
+                        break;
+                }
+
+                /* Set CBPENDING so nothing in the cancellation path
+                 * can match this lock */
+                lock->l_flags |= LDLM_FL_CBPENDING;
+                lock->l_flags |= LDLM_FL_FAILED;
+                lock->l_flags |= flags;
+
+                /* ... without sending a CANCEL message for local_only. */
+                if (local_only)
+                        lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+
+                if (local_only && (lock->l_readers || lock->l_writers)) {
+                        /* This is a little bit gross, but much better than the
+                         * alternative: pretend that we got a blocking AST from
+                         * the server, so that when the lock is decref'd, it
+                         * will go away ... */
+                        unlock_res(res);
+                        LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
+                        if (lock->l_completion_ast)
+                                lock->l_completion_ast(lock, 0, NULL);
+                        LDLM_LOCK_RELEASE(lock);
+                        continue;
+                }
 
                 if (client) {
                         struct lustre_handle lockh;
+
+                        unlock_res(res);
                         ldlm_lock2handle(lock, &lockh);
-                        if (!local_only) {
-                                rc = ldlm_cli_cancel(&lockh);
-                                if (rc)
-                                        CERROR("ldlm_cli_cancel: %d\n", rc);
-                        }
-                        /* Force local cleanup on errors, too. */
-                        if (local_only || rc != ELDLM_OK)
-                                ldlm_lock_cancel(lock);
+                        rc = ldlm_cli_cancel(&lockh);
+                        if (rc)
+                                CERROR("ldlm_cli_cancel: %d\n", rc);
                 } else {
-                        LDLM_DEBUG(lock, "Freeing a lock still held by a "
-                                   "client node.\n");
-
                         ldlm_resource_unlink_lock(lock);
+                        unlock_res(res);
+                        LDLM_DEBUG(lock, "Freeing a lock still held by a "
+                                   "client node");
                         ldlm_lock_destroy(lock);
                 }
-                LDLM_LOCK_PUT(lock);
-        }
+                LDLM_LOCK_RELEASE(lock);
+        } while (1);
+
+        EXIT;
 }
 
-int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int local_only)
+int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags)
 {
+        cfs_list_t *tmp;
         int i;
 
-        l_lock(&ns->ns_lock);
+        if (ns == NULL) {
+                CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
+                return ELDLM_OK;
+        }
+
         for (i = 0; i < RES_HASH_SIZE; i++) {
-                struct list_head *tmp, *pos;
-                list_for_each_safe(tmp, pos, &(ns->ns_hash[i])) {
+                cfs_spin_lock(&ns->ns_hash_lock);
+                tmp = ns->ns_hash[i].next;
+                while (tmp != &(ns->ns_hash[i])) {
                         struct ldlm_resource *res;
-                        res = list_entry(tmp, struct ldlm_resource, lr_hash);
+                        res = cfs_list_entry(tmp, struct ldlm_resource,
+                                             lr_hash);
                         ldlm_resource_getref(res);
-
-                        cleanup_resource(res, &res->lr_granted, local_only);
-                        cleanup_resource(res, &res->lr_converting, local_only);
-                        cleanup_resource(res, &res->lr_waiting, local_only);
-
-                        /* XXX this is a bit counter-intuitive and should
-                         * probably be cleaner: don't force cleanup if we're
-                         * local_only (which is only used by recovery).  We
-                         * probably still have outstanding lock refs which
-                         * reference these resources. -phil */
-                        if (!ldlm_resource_put(res) && !local_only) {
-                                CERROR("Resource refcount nonzero (%d) after "
-                                       "lock cleanup; forcing cleanup.\n",
-                                       atomic_read(&res->lr_refcount));
-                                ldlm_resource_dump(res);
-                                atomic_set(&res->lr_refcount, 1);
-                                ldlm_resource_put(res);
+                        cfs_spin_unlock(&ns->ns_hash_lock);
+                        LDLM_RESOURCE_ADDREF(res);
+
+                        cleanup_resource(res, &res->lr_granted, flags);
+                        cleanup_resource(res, &res->lr_converting, flags);
+                        cleanup_resource(res, &res->lr_waiting, flags);
+
+                        cfs_spin_lock(&ns->ns_hash_lock);
+                        tmp = tmp->next;
+
+                        /* XXX: former stuff caused issues in case of race
+                         * between ldlm_namespace_cleanup() and lockd() when
+                         * client gets blocking ast when lock gets distracted by
+                         * server. This is 1_4 branch solution, let's see how
+                         * will it behave. */
+                        LDLM_RESOURCE_DELREF(res);
+                        if (!ldlm_resource_putref_locked(res)) {
+                                CERROR("Namespace %s resource refcount nonzero "
+                                       "(%d) after lock cleanup; forcing "
+                                       "cleanup.\n",
+                                       ns->ns_name,
+                                       cfs_atomic_read(&res->lr_refcount));
+                                CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
+                                       LPU64") (rc: %d)\n", res,
+                                       res->lr_name.name[0],
+                                       res->lr_name.name[1],
+                                       res->lr_name.name[2],
+                                       res->lr_name.name[3],
+                                       cfs_atomic_read(&res->lr_refcount));
                         }
                 }
+                cfs_spin_unlock(&ns->ns_hash_lock);
         }
-        l_unlock(&ns->ns_lock);
 
         return ELDLM_OK;
 }
 
-/* Cleanup, but also free, the namespace */
-int ldlm_namespace_free(struct ldlm_namespace *ns)
+static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
 {
-        if (!ns)
-                RETURN(ELDLM_OK);
+        ENTRY;
 
-        spin_lock(&ldlm_namespace_lock);
-        list_del(&ns->ns_list_chain);
-        remove_proc_entry("resource_count", ns->ns_proc_dir);
-        remove_proc_entry("lock_count", ns->ns_proc_dir);
-        remove_proc_entry(ns->ns_name, ldlm_ns_proc_dir);
-        spin_unlock(&ldlm_namespace_lock);
+        /* At shutdown time, don't call the cancellation callback */
+        ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
+
+        if (ns->ns_refcount > 0) {
+                struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+                int rc;
+                CDEBUG(D_DLMTRACE,
+                       "dlm namespace %s free waiting on refcount %d\n",
+                       ns->ns_name, ns->ns_refcount);
+force_wait:
+                if (force)
+                        lwi = LWI_TIMEOUT(obd_timeout * CFS_HZ / 4, NULL, NULL);
+
+                rc = l_wait_event(ns->ns_waitq,
+                                  ns->ns_refcount == 0, &lwi);
+
+                /* Forced cleanups should be able to reclaim all references,
+                 * so it's safe to wait forever... we can't leak locks... */
+                if (force && rc == -ETIMEDOUT) {
+                        LCONSOLE_ERROR("Forced cleanup waiting for %s "
+                                       "namespace with %d resources in use, "
+                                       "(rc=%d)\n", ns->ns_name,
+                                       ns->ns_refcount, rc);
+                        GOTO(force_wait, rc);
+                }
 
-        ldlm_namespace_cleanup(ns, 0);
+                if (ns->ns_refcount) {
+                        LCONSOLE_ERROR("Cleanup waiting for %s namespace "
+                                       "with %d resources in use, (rc=%d)\n",
+                                       ns->ns_name,
+                                       ns->ns_refcount, rc);
+                        RETURN(ELDLM_NAMESPACE_EXISTS);
+                }
+                CDEBUG(D_DLMTRACE,
+                       "dlm namespace %s free done waiting\n", ns->ns_name);
+        }
 
-        vfree(ns->ns_hash /* , sizeof(*ns->ns_hash) * RES_HASH_SIZE */);
-        obd_memory -= sizeof(*ns->ns_hash) * RES_HASH_SIZE;
+        RETURN(ELDLM_OK);
+}
+
+/**
+ * Performs various cleanups for passed \a ns to make it drop refc and be ready
+ * for freeing. Waits for refc == 0.
+ *
+ * The following is done:
+ * (0) Unregister \a ns from its list to make inaccessible for potential users
+ * like pools thread and others;
+ * (1) Clear all locks in \a ns.
+ */
+void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
+                               struct obd_import *imp,
+                               int force)
+{
+        int rc;
+        ENTRY;
+        if (!ns) {
+                EXIT;
+                return;
+        }
+
+
+        /*
+         * Can fail with -EINTR when force == 0 in which case try harder.
+         */
+        rc = __ldlm_namespace_free(ns, force);
+        if (rc != ELDLM_OK) {
+                if (imp) {
+                        ptlrpc_disconnect_import(imp, 0);
+                        ptlrpc_invalidate_import(imp);
+                }
+
+                /*
+                 * With all requests dropped and the import inactive
+                 * we are gaurenteed all reference will be dropped.
+                 */
+                rc = __ldlm_namespace_free(ns, 1);
+                LASSERT(rc == 0);
+        }
+        EXIT;
+}
+
+/**
+ * Performs freeing memory structures related to \a ns. This is only done when
+ * ldlm_namespce_free_prior() successfully removed all resources referencing
+ * \a ns and its refc == 0.
+ */
+void ldlm_namespace_free_post(struct ldlm_namespace *ns)
+{
+        ENTRY;
+        if (!ns) {
+                EXIT;
+                return;
+        }
+
+
+        /*
+         * Make sure that nobody can find this ns in its list.
+         */
+        ldlm_namespace_unregister(ns, ns->ns_client);
+        /*
+         * Fini pool _before_ parent proc dir is removed. This is important as
+         * ldlm_pool_fini() removes own proc dir which is child to @dir. Removing
+         * it after @dir may cause oops.
+         */
+        ldlm_pool_fini(&ns->ns_pool);
+
+#ifdef LPROCFS
+        {
+                struct proc_dir_entry *dir;
+                dir = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
+                if (dir == NULL) {
+                        CERROR("dlm namespace %s has no procfs dir?\n",
+                               ns->ns_name);
+                } else {
+                        lprocfs_remove(&dir);
+                }
+        }
+#endif
+
+        OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
         OBD_FREE(ns->ns_name, strlen(ns->ns_name) + 1);
-        OBD_FREE(ns, sizeof(*ns));
 
-        return ELDLM_OK;
+        /*
+         * Namespace \a ns should be not on list in this time, otherwise this
+         * will cause issues realted to using freed \a ns in pools thread.
+         */
+        LASSERT(cfs_list_empty(&ns->ns_list_chain));
+        OBD_FREE_PTR(ns);
+        ldlm_put_ref();
+        EXIT;
 }
 
-int ldlm_client_free(struct obd_export *exp)
+
+/* Cleanup the resource, and free namespace.
+ * bug 12864:
+ * Deadlock issue:
+ * proc1: destroy import
+ *        class_disconnect_export(grab cl_sem) ->
+ *              -> ldlm_namespace_free ->
+ *              -> lprocfs_remove(grab _lprocfs_lock).
+ * proc2: read proc info
+ *        lprocfs_fops_read(grab _lprocfs_lock) ->
+ *              -> osc_rd_active, etc(grab cl_sem).
+ *
+ * So that I have to split the ldlm_namespace_free into two parts - the first
+ * part ldlm_namespace_free_prior is used to cleanup the resource which is
+ * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
+ * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
+ * held.
+ */
+void ldlm_namespace_free(struct ldlm_namespace *ns,
+                         struct obd_import *imp,
+                         int force)
 {
-        struct ldlm_export_data *led = &exp->exp_ldlm_data;
-        ptlrpc_cleanup_client(&led->led_import);
-        RETURN(0);
+        ldlm_namespace_free_prior(ns, imp, force);
+        ldlm_namespace_free_post(ns);
+}
+
+
+void ldlm_namespace_get_locked(struct ldlm_namespace *ns)
+{
+        ns->ns_refcount++;
+}
+
+void ldlm_namespace_get(struct ldlm_namespace *ns)
+{
+        cfs_spin_lock(&ns->ns_hash_lock);
+        ldlm_namespace_get_locked(ns);
+        cfs_spin_unlock(&ns->ns_hash_lock);
+}
+
+void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup)
+{
+        LASSERT(ns->ns_refcount > 0);
+        ns->ns_refcount--;
+        if (ns->ns_refcount == 0 && wakeup)
+                cfs_waitq_signal(&ns->ns_waitq);
+}
+
+void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup)
+{
+        cfs_spin_lock(&ns->ns_hash_lock);
+        ldlm_namespace_put_locked(ns, wakeup);
+        cfs_spin_unlock(&ns->ns_hash_lock);
 }
 
-static __u32 ldlm_hash_fn(struct ldlm_resource *parent, __u64 *name)
+/* Register @ns in the list of namespaces */
+void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
+{
+        cfs_mutex_down(ldlm_namespace_lock(client));
+        LASSERT(cfs_list_empty(&ns->ns_list_chain));
+        cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
+        cfs_atomic_inc(ldlm_namespace_nr(client));
+        cfs_mutex_up(ldlm_namespace_lock(client));
+}
+
+/* Unregister @ns from the list of namespaces */
+void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
+{
+        cfs_mutex_down(ldlm_namespace_lock(client));
+        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+        /*
+         * Some asserts and possibly other parts of code still using
+         * list_empty(&ns->ns_list_chain). This is why it is important
+         * to use list_del_init() here.
+         */
+        cfs_list_del_init(&ns->ns_list_chain);
+        cfs_atomic_dec(ldlm_namespace_nr(client));
+        cfs_mutex_up(ldlm_namespace_lock(client));
+}
+
+/* Should be called under ldlm_namespace_lock(client) taken */
+void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
+{
+        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+        LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
+        cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+}
+
+/* Should be called under ldlm_namespace_lock(client) taken */
+struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+{
+        LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
+        LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
+        return container_of(ldlm_namespace_list(client)->next,
+                struct ldlm_namespace, ns_list_chain);
+}
+static __u32 ldlm_hash_fn(struct ldlm_resource *parent,
+                          const struct ldlm_res_id *name)
 {
         __u32 hash = 0;
         int i;
 
         for (i = 0; i < RES_NAME_SIZE; i++)
-                hash += name[i];
+                hash += name->name[i];
 
         hash += (__u32)((unsigned long)parent >> 4);
 
@@ -245,65 +784,119 @@ static __u32 ldlm_hash_fn(struct ldlm_resource *parent, __u64 *name)
 static struct ldlm_resource *ldlm_resource_new(void)
 {
         struct ldlm_resource *res;
+        int idx;
 
-        res = kmem_cache_alloc(ldlm_resource_slab, SLAB_KERNEL);
-        if (res == NULL) {
-                LBUG();
+        OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, CFS_ALLOC_IO);
+        if (res == NULL)
                 return NULL;
-        }
+
         memset(res, 0, sizeof(*res));
 
-        INIT_LIST_HEAD(&res->lr_children);
-        INIT_LIST_HEAD(&res->lr_childof);
-        INIT_LIST_HEAD(&res->lr_granted);
-        INIT_LIST_HEAD(&res->lr_converting);
-        INIT_LIST_HEAD(&res->lr_waiting);
+        CFS_INIT_LIST_HEAD(&res->lr_children);
+        CFS_INIT_LIST_HEAD(&res->lr_childof);
+        CFS_INIT_LIST_HEAD(&res->lr_granted);
+        CFS_INIT_LIST_HEAD(&res->lr_converting);
+        CFS_INIT_LIST_HEAD(&res->lr_waiting);
+
+        /* initialize interval trees for each lock mode*/
+        for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+                res->lr_itree[idx].lit_size = 0;
+                res->lr_itree[idx].lit_mode = 1 << idx;
+                res->lr_itree[idx].lit_root = NULL;
+        }
 
-        atomic_set(&res->lr_refcount, 1);
+        cfs_atomic_set(&res->lr_refcount, 1);
+        cfs_spin_lock_init(&res->lr_lock);
+        lu_ref_init(&res->lr_reference);
+
+        /* one who creates the resource must unlock
+         * the semaphore after lvb initialization */
+        cfs_init_mutex_locked(&res->lr_lvb_sem);
 
         return res;
 }
 
+/* must be called with hash lock held */
+static struct ldlm_resource *
+ldlm_resource_find(struct ldlm_namespace *ns, const struct ldlm_res_id *name,
+                   __u32 hash)
+{
+        cfs_list_t *bucket, *tmp;
+        struct ldlm_resource *res;
+
+        LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
+        bucket = ns->ns_hash + hash;
+
+        cfs_list_for_each(tmp, bucket) {
+                res = cfs_list_entry(tmp, struct ldlm_resource, lr_hash);
+                if (memcmp(&res->lr_name, name, sizeof(res->lr_name)) == 0)
+                        return res;
+        }
+
+        return NULL;
+}
+
 /* Args: locked namespace
  * Returns: newly-allocated, referenced, unlocked resource */
-static struct ldlm_resource *ldlm_resource_add(struct ldlm_namespace *ns,
-                                               struct ldlm_resource *parent,
-                                               __u64 *name, __u32 type)
+static struct ldlm_resource *
+ldlm_resource_add(struct ldlm_namespace *ns, struct ldlm_resource *parent,
+                  const struct ldlm_res_id *name, __u32 hash, ldlm_type_t type)
 {
-        struct list_head *bucket;
-        struct ldlm_resource *res;
+        cfs_list_t *bucket;
+        struct ldlm_resource *res, *old_res;
         ENTRY;
 
-        if (type < LDLM_MIN_TYPE || type > LDLM_MAX_TYPE) {
-                LBUG();
-                RETURN(NULL);
-        }
+        LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
+                 "type: %d\n", type);
 
         res = ldlm_resource_new();
-        if (!res) {
-                LBUG();
+        if (!res)
                 RETURN(NULL);
-        }
-
-        spin_lock(&ns->ns_counter_lock);
-        ns->ns_resources++;
-        spin_unlock(&ns->ns_counter_lock);
 
-        memcpy(res->lr_name, name, sizeof(res->lr_name));
+        res->lr_name = *name;
         res->lr_namespace = ns;
-        ns->ns_refcount++;
-
         res->lr_type = type;
         res->lr_most_restr = LCK_NL;
 
-        bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
-        list_add(&res->lr_hash, bucket);
+        cfs_spin_lock(&ns->ns_hash_lock);
+        old_res = ldlm_resource_find(ns, name, hash);
+        if (old_res) {
+                /* someone won the race and added the resource before */
+                ldlm_resource_getref(old_res);
+                cfs_spin_unlock(&ns->ns_hash_lock);
+                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+                /* synchronize WRT resource creation */
+                if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+                        cfs_down(&old_res->lr_lvb_sem);
+                        cfs_up(&old_res->lr_lvb_sem);
+                }
+                RETURN(old_res);
+        }
+
+        /* we won! let's add the resource */
+        bucket = ns->ns_hash + hash;
+        cfs_list_add(&res->lr_hash, bucket);
+        ns->ns_resources++;
+        ldlm_namespace_get_locked(ns);
 
-        if (parent == NULL)
-                list_add(&res->lr_childof, &ns->ns_root_list);
-        else {
+        if (parent == NULL) {
+                cfs_list_add(&res->lr_childof, &ns->ns_root_list);
+        else {
                 res->lr_parent = parent;
-                list_add(&res->lr_childof, &parent->lr_children);
+                cfs_list_add(&res->lr_childof, &parent->lr_children);
+        }
+        cfs_spin_unlock(&ns->ns_hash_lock);
+
+        if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+                int rc;
+
+                OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
+                rc = ns->ns_lvbo->lvbo_init(res);
+                if (rc)
+                        CERROR("lvbo_init failed for resource "
+                               LPU64": rc %d\n", name->name[0], rc);
+                /* we create resource with locked lr_lvb_sem */
+                cfs_up(&res->lr_lvb_sem);
         }
 
         RETURN(res);
@@ -312,199 +905,293 @@ static struct ldlm_resource *ldlm_resource_add(struct ldlm_namespace *ns,
 /* Args: unlocked namespace
  * Locks: takes and releases ns->ns_lock and res->lr_lock
  * Returns: referenced, unlocked ldlm_resource or NULL */
-struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
-                                        struct ldlm_resource *parent,
-                                        __u64 *name, __u32 type, int create)
+struct ldlm_resource *
+ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
+                  const struct ldlm_res_id *name, ldlm_type_t type, int create)
 {
-        struct list_head *bucket;
-        struct list_head *tmp = bucket;
+        __u32 hash = ldlm_hash_fn(parent, name);
         struct ldlm_resource *res = NULL;
         ENTRY;
 
-        if (ns == NULL || ns->ns_hash == NULL) {
-                LBUG();
-                RETURN(NULL);
-        }
-
-        l_lock(&ns->ns_lock);
-        bucket = ns->ns_hash + ldlm_hash_fn(parent, name);
-
-        list_for_each(tmp, bucket) {
-                struct ldlm_resource *chk;
-                chk = list_entry(tmp, struct ldlm_resource, lr_hash);
-
-                if (memcmp(chk->lr_name, name, sizeof(chk->lr_name)) == 0) {
-                        res = chk;
-                        atomic_inc(&res->lr_refcount);
-                        EXIT;
-                        break;
+        LASSERT(ns != NULL);
+        LASSERT(ns->ns_hash != NULL);
+        LASSERT(name->name[0] != 0);
+
+        cfs_spin_lock(&ns->ns_hash_lock);
+        res = ldlm_resource_find(ns, name, hash);
+        if (res) {
+                ldlm_resource_getref(res);
+                cfs_spin_unlock(&ns->ns_hash_lock);
+                /* synchronize WRT resource creation */
+                if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+                        cfs_down(&res->lr_lvb_sem);
+                        cfs_up(&res->lr_lvb_sem);
                 }
+                RETURN(res);
         }
+        cfs_spin_unlock(&ns->ns_hash_lock);
 
-        if (res == NULL && create)
-                res = ldlm_resource_add(ns, parent, name, type);
-        l_unlock(&ns->ns_lock);
+        if (create == 0)
+                RETURN(NULL);
 
+        res = ldlm_resource_add(ns, parent, name, hash, type);
         RETURN(res);
 }
 
 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
 {
-        atomic_inc(&res->lr_refcount);
+        LASSERT(res != NULL);
+        LASSERT(res != LP_POISON);
+        cfs_atomic_inc(&res->lr_refcount);
+        CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
+               cfs_atomic_read(&res->lr_refcount));
         return res;
 }
 
-/* Returns 1 if the resource was freed, 0 if it remains. */
-int ldlm_resource_put(struct ldlm_resource *res)
+void __ldlm_resource_putref_final(struct ldlm_resource *res)
 {
-        int rc = 0;
+        struct ldlm_namespace *ns = res->lr_namespace;
 
-        if (atomic_dec_and_test(&res->lr_refcount)) {
-                struct ldlm_namespace *ns = res->lr_namespace;
-                ENTRY;
+        LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
 
-                l_lock(&ns->ns_lock);
+        if (!cfs_list_empty(&res->lr_granted)) {
+                ldlm_resource_dump(D_ERROR, res);
+                LBUG();
+        }
 
-                if (atomic_read(&res->lr_refcount) != 0) {
-                        /* We lost the race. */
-                        l_unlock(&ns->ns_lock);
-                        goto out;
-                }
+        if (!cfs_list_empty(&res->lr_converting)) {
+                ldlm_resource_dump(D_ERROR, res);
+                LBUG();
+        }
 
-                if (!list_empty(&res->lr_granted))
-                        LBUG();
+        if (!cfs_list_empty(&res->lr_waiting)) {
+                ldlm_resource_dump(D_ERROR, res);
+                LBUG();
+        }
 
-                if (!list_empty(&res->lr_converting))
-                        LBUG();
+        if (!cfs_list_empty(&res->lr_children)) {
+                ldlm_resource_dump(D_ERROR, res);
+                LBUG();
+        }
+
+        /* Pass 0 here to not wake ->ns_waitq up yet, we will do it few
+         * lines below when all children are freed. */
+        ldlm_namespace_put_locked(ns, 0);
+        cfs_list_del_init(&res->lr_hash);
+        cfs_list_del_init(&res->lr_childof);
+        lu_ref_fini(&res->lr_reference);
 
-                if (!list_empty(&res->lr_waiting))
-                        LBUG();
+        ns->ns_resources--;
+        if (ns->ns_resources == 0)
+                cfs_waitq_signal(&ns->ns_waitq);
+}
 
-                if (!list_empty(&res->lr_children))
-                        LBUG();
+/* Returns 1 if the resource was freed, 0 if it remains. */
+int ldlm_resource_putref(struct ldlm_resource *res)
+{
+        struct ldlm_namespace *ns = res->lr_namespace;
+        int rc = 0;
+        ENTRY;
 
-                ns->ns_refcount--;
-                list_del(&res->lr_hash);
-                list_del(&res->lr_childof);
+        CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
+               cfs_atomic_read(&res->lr_refcount) - 1);
+        LASSERTF(cfs_atomic_read(&res->lr_refcount) > 0, "%d",
+                 cfs_atomic_read(&res->lr_refcount));
+        LASSERTF(cfs_atomic_read(&res->lr_refcount) < LI_POISON, "%d",
+                 cfs_atomic_read(&res->lr_refcount));
+
+        if (cfs_atomic_dec_and_lock(&res->lr_refcount, &ns->ns_hash_lock)) {
+                __ldlm_resource_putref_final(res);
+                cfs_spin_unlock(&ns->ns_hash_lock);
+                if (res->lr_lvb_data)
+                        OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
+                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+                rc = 1;
+        }
 
-                kmem_cache_free(ldlm_resource_slab, res);
-                l_unlock(&ns->ns_lock);
+        RETURN(rc);
+}
 
-                spin_lock(&ns->ns_counter_lock);
-                ns->ns_resources--;
-                spin_unlock(&ns->ns_counter_lock);
+/* Returns 1 if the resource was freed, 0 if it remains. */
+int ldlm_resource_putref_locked(struct ldlm_resource *res)
+{
+        int rc = 0;
+        ENTRY;
 
+        CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
+               cfs_atomic_read(&res->lr_refcount) - 1);
+        LASSERT(cfs_atomic_read(&res->lr_refcount) > 0);
+        LASSERT(cfs_atomic_read(&res->lr_refcount) < LI_POISON);
+
+        LASSERT(cfs_atomic_read(&res->lr_refcount) >= 0);
+        if (cfs_atomic_dec_and_test(&res->lr_refcount)) {
+                __ldlm_resource_putref_final(res);
+                if (res->lr_lvb_data)
+                        OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
+                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
                 rc = 1;
-        } else {
-                ENTRY;
-        out:
-                if (atomic_read(&res->lr_refcount) < 0)
-                        LBUG();
         }
 
         RETURN(rc);
 }
 
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res, cfs_list_t *head,
                             struct ldlm_lock *lock)
 {
-        l_lock(&res->lr_namespace->ns_lock);
+        check_res_locked(res);
 
-        ldlm_resource_dump(res);
-        ldlm_lock_dump(lock);
+        ldlm_resource_dump(D_INFO, res);
+        CDEBUG(D_OTHER, "About to add this lock:\n");
+        ldlm_lock_dump(D_OTHER, lock, 0);
 
-        if (!list_empty(&lock->l_res_link))
-                LBUG();
+        if (lock->l_destroyed) {
+                CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+                return;
+        }
 
-        list_add(&lock->l_res_link, head);
-        l_unlock(&res->lr_namespace->ns_lock);
+        LASSERT(cfs_list_empty(&lock->l_res_link));
+
+        cfs_list_add_tail(&lock->l_res_link, head);
+}
+
+void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
+                                     struct ldlm_lock *new)
+{
+        struct ldlm_resource *res = original->l_resource;
+
+        check_res_locked(res);
+
+        ldlm_resource_dump(D_INFO, res);
+        CDEBUG(D_OTHER, "About to insert this lock after %p:\n", original);
+        ldlm_lock_dump(D_OTHER, new, 0);
+
+        if (new->l_destroyed) {
+                CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+                goto out;
+        }
+
+        LASSERT(cfs_list_empty(&new->l_res_link));
+
+        cfs_list_add(&new->l_res_link, &original->l_res_link);
+ out:;
 }
 
 void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
 {
-        l_lock(&lock->l_resource->lr_namespace->ns_lock);
-        list_del_init(&lock->l_res_link);
-        l_unlock(&lock->l_resource->lr_namespace->ns_lock);
+        int type = lock->l_resource->lr_type;
+
+        check_res_locked(lock->l_resource);
+        if (type == LDLM_IBITS || type == LDLM_PLAIN)
+                ldlm_unlink_lock_skiplist(lock);
+        else if (type == LDLM_EXTENT)
+                ldlm_extent_unlink_lock(lock);
+        cfs_list_del_init(&lock->l_res_link);
 }
 
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
 {
         desc->lr_type = res->lr_type;
-        memcpy(desc->lr_name, res->lr_name, sizeof(desc->lr_name));
-        memcpy(desc->lr_version, res->lr_version, sizeof(desc->lr_version));
+        desc->lr_name = res->lr_name;
 }
 
-void ldlm_dump_all_namespaces(void)
+void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
+
+        if (!((libcfs_debug | D_ERROR) & level))
+                return;
 
-        spin_lock(&ldlm_namespace_lock);
+        cfs_mutex_down(ldlm_namespace_lock(client));
 
-        list_for_each(tmp, &ldlm_namespace_list) {
+        cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
                 struct ldlm_namespace *ns;
-                ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
-                ldlm_namespace_dump(ns);
+                ns = cfs_list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+                ldlm_namespace_dump(level, ns);
         }
 
-        spin_unlock(&ldlm_namespace_lock);
+        cfs_mutex_up(ldlm_namespace_lock(client));
 }
 
-void ldlm_namespace_dump(struct ldlm_namespace *ns)
+void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
 {
-        struct list_head *tmp;
+        cfs_list_t *tmp;
 
-        l_lock(&ns->ns_lock);
-        CDEBUG(D_OTHER, "--- Namespace: %s (rc: %d, client: %d)\n", ns->ns_name,
-               ns->ns_refcount, ns->ns_client);
+        if (!((libcfs_debug | D_ERROR) & level))
+                return;
 
-        list_for_each(tmp, &ns->ns_root_list) {
-                struct ldlm_resource *res;
-                res = list_entry(tmp, struct ldlm_resource, lr_childof);
+        CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
+               ns->ns_name, ns->ns_refcount,
+               ns_is_client(ns) ? "client" : "server");
 
-                /* Once we have resources with children, this should really dump
-                 * them recursively. */
-                ldlm_resource_dump(res);
-        }
-        l_unlock(&ns->ns_lock);
-}
+        if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
+                return;
 
-void ldlm_resource_dump(struct ldlm_resource *res)
-{
-        struct list_head *tmp;
-        char name[256];
-
-        if (RES_NAME_SIZE != 3)
-                LBUG();
+        cfs_spin_lock(&ns->ns_hash_lock);
+        tmp = ns->ns_root_list.next;
+        while (tmp != &ns->ns_root_list) {
+                struct ldlm_resource *res;
+                res = cfs_list_entry(tmp, struct ldlm_resource, lr_childof);
 
-        snprintf(name, sizeof(name), "%Lx %Lx %Lx",
-                 (unsigned long long)res->lr_name[0],
-                 (unsigned long long)res->lr_name[1],
-                 (unsigned long long)res->lr_name[2]);
+                ldlm_resource_getref(res);
+                cfs_spin_unlock(&ns->ns_hash_lock);
+                LDLM_RESOURCE_ADDREF(res);
 
-        CDEBUG(D_OTHER, "--- Resource: %p (%s) (rc: %d)\n", res, name,
-               atomic_read(&res->lr_refcount));
-        CDEBUG(D_OTHER, "Namespace: %p (%s)\n", res->lr_namespace,
-               res->lr_namespace->ns_name);
-        CDEBUG(D_OTHER, "Parent: %p, root: %p\n", res->lr_parent, res->lr_root);
+                lock_res(res);
+                ldlm_resource_dump(level, res);
+                unlock_res(res);
 
-        CDEBUG(D_OTHER, "Granted locks:\n");
-        list_for_each(tmp, &res->lr_granted) {
-                struct ldlm_lock *lock;
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                ldlm_lock_dump(lock);
+                LDLM_RESOURCE_DELREF(res);
+                cfs_spin_lock(&ns->ns_hash_lock);
+                tmp = tmp->next;
+                ldlm_resource_putref_locked(res);
         }
+        ns->ns_next_dump = cfs_time_shift(10);
+        cfs_spin_unlock(&ns->ns_hash_lock);
+}
 
-        CDEBUG(D_OTHER, "Converting locks:\n");
-        list_for_each(tmp, &res->lr_converting) {
-                struct ldlm_lock *lock;
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                ldlm_lock_dump(lock);
+void ldlm_resource_dump(int level, struct ldlm_resource *res)
+{
+        cfs_list_t *tmp;
+        int pos;
+
+        CLASSERT(RES_NAME_SIZE == 4);
+
+        if (!((libcfs_debug | D_ERROR) & level))
+                return;
+
+        CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
+               ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
+               res->lr_name.name[2], res->lr_name.name[3],
+               cfs_atomic_read(&res->lr_refcount));
+
+        if (!cfs_list_empty(&res->lr_granted)) {
+                pos = 0;
+                CDEBUG(level, "Granted locks:\n");
+                cfs_list_for_each(tmp, &res->lr_granted) {
+                        struct ldlm_lock *lock;
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
+                        ldlm_lock_dump(level, lock, ++pos);
+                }
         }
-
-        CDEBUG(D_OTHER, "Waiting locks:\n");
-        list_for_each(tmp, &res->lr_waiting) {
-                struct ldlm_lock *lock;
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                ldlm_lock_dump(lock);
+        if (!cfs_list_empty(&res->lr_converting)) {
+                pos = 0;
+                CDEBUG(level, "Converting locks:\n");
+                cfs_list_for_each(tmp, &res->lr_converting) {
+                        struct ldlm_lock *lock;
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
+                        ldlm_lock_dump(level, lock, ++pos);
+                }
+        }
+        if (!cfs_list_empty(&res->lr_waiting)) {
+                pos = 0;
+                CDEBUG(level, "Waiting locks:\n");
+                cfs_list_for_each(tmp, &res->lr_waiting) {
+                        struct ldlm_lock *lock;
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
+                        ldlm_lock_dump(level, lock, ++pos);
+                }
         }
 }