Whamcloud - gitweb
LU-1842 ldlm: support for sending GL ASTs to multiple locks
[fs/lustre-release.git] / lustre / include / lustre_dlm.h
index 200ebb8..480bced 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Whamcloud, Inc.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -72,6 +72,7 @@ struct obd_device;
 #endif
 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
 #define LDLM_CTIME_AGE_LIMIT (10)
+#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
 
 typedef enum {
         ELDLM_OK = 0,
@@ -110,7 +111,7 @@ typedef enum {
 #define LDLM_FL_REPLAY         0x000100
 
 #define LDLM_FL_INTENT_ONLY    0x000200 /* don't grant lock, just do intent */
-#define LDLM_FL_LOCAL_ONLY     0x000400 /* see ldlm_cli_cancel_unused */
+#define LDLM_FL_LOCAL_ONLY     0x000400
 
 /* don't run the cancel callback under ldlm_cli_cancel_unused */
 #define LDLM_FL_FAILED         0x000800
@@ -118,14 +119,15 @@ typedef enum {
 #define LDLM_FL_HAS_INTENT     0x001000 /* lock request has intent */
 #define LDLM_FL_CANCELING      0x002000 /* lock cancel has already been sent */
 #define LDLM_FL_LOCAL          0x004000 /* local lock (ie, no srv/cli split) */
-#define LDLM_FL_WARN           0x008000 /* see ldlm_cli_cancel_unused */
 #define LDLM_FL_DISCARD_DATA   0x010000 /* discard (no writeback) on cancel */
 
 #define LDLM_FL_NO_TIMEOUT     0x020000 /* Blocked by group lock - wait
                                          * indefinitely */
 
 /* file & record locking */
-#define LDLM_FL_BLOCK_NOWAIT   0x040000 // server told not to wait if blocked
+#define LDLM_FL_BLOCK_NOWAIT   0x040000 /* server told not to wait if blocked.
+                                         * For AGL, OST will not send glimpse
+                                         * callback. */
 #define LDLM_FL_TEST_LOCK      0x080000 // return blocking lock
 
 /* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
@@ -149,6 +151,10 @@ typedef enum {
  * list. */
 #define LDLM_FL_KMS_IGNORE     0x200000
 
+/* Don't put lock into the LRU list, so that it is not canceled due to aging.
+ * Used by MGC locks, they are cancelled only at unmount or by callback. */
+#define LDLM_FL_NO_LRU         0x400000
+
 /* Immediatelly cancel such locks when they block some other locks. Send
  * cancel notification to original lock holder, but expect no reply. This is
  * for clients (like liblustre) that cannot be expected to reliably response
@@ -168,9 +174,6 @@ typedef enum {
  * w/o involving separate thread. in order to decrease cs rate */
 #define LDLM_FL_ATOMIC_CB      0x4000000
 
-/* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */
-#define LDLM_FL_ASYNC           0x8000000
-
 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
  * such that server send blocking ast for conflict locks to this client for
  * the 1st operation, whereas the 2nd operation has canceled this lock and
@@ -204,6 +207,10 @@ typedef enum {
  * emulation + race with upcoming bl_ast.  */
 #define LDLM_FL_FAIL_LOC       0x100000000ULL
 
+/* Used while processing the unused list to know that we have already
+ * handled this lock and decided to skip it */
+#define LDLM_FL_SKIPPED        0x200000000ULL
+
 /* The blocking callback is overloaded to perform two functions.  These flags
  * indicate which operation should be performed. */
 #define LDLM_CB_BLOCKING    1
@@ -258,9 +265,9 @@ static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
  *     led_lock
  *
  * lr_lock
- *     ns_unused_lock
+ *     ns_lock
  *
- * lr_lvb_sem
+ * lr_lvb_mutex
  *     lr_lock
  *
  */
@@ -327,10 +334,6 @@ struct ldlm_pool {
          */
         cfs_atomic_t           pl_cancel_rate;
         /**
-         * Grant speed (GR-CR) per T.
-         */
-        cfs_atomic_t           pl_grant_speed;
-        /**
          * Server lock volume. Protected by pl_lock.
          */
         __u64                  pl_server_lock_volume;
@@ -369,11 +372,14 @@ typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
                                void *req_cookie, ldlm_mode_t mode, int flags,
                                void *data);
 
+typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+
 struct ldlm_valblock_ops {
         int (*lvbo_init)(struct ldlm_resource *res);
         int (*lvbo_update)(struct ldlm_resource *res,
                            struct ptlrpc_request *r,
                            int increase);
+        int (*lvbo_free)(struct ldlm_resource *res);
 };
 
 typedef enum {
@@ -389,11 +395,41 @@ typedef enum {
 #define NS_DEFAULT_CONTENTION_SECONDS 2
 #define NS_DEFAULT_CONTENDED_LOCKS 32
 
+struct ldlm_ns_bucket {
+        /** refer back */
+        struct ldlm_namespace      *nsb_namespace;
+        /** estimated lock callback time */
+        struct adaptive_timeout     nsb_at_estimate;
+};
+
+enum {
+        /** ldlm namespace lock stats */
+        LDLM_NSS_LOCKS          = 0,
+        LDLM_NSS_LAST
+};
+
+typedef enum {
+        /** invalide type */
+        LDLM_NS_TYPE_UNKNOWN    = 0,
+        /** mdc namespace */
+        LDLM_NS_TYPE_MDC,
+        /** mds namespace */
+        LDLM_NS_TYPE_MDT,
+        /** osc namespace */
+        LDLM_NS_TYPE_OSC,
+        /** ost namespace */
+        LDLM_NS_TYPE_OST,
+        /** mgc namespace */
+        LDLM_NS_TYPE_MGC,
+        /** mgs namespace */
+        LDLM_NS_TYPE_MGT,
+} ldlm_ns_type_t;
+
 struct ldlm_namespace {
         /**
-         * Namespace name. Used for logging, etc.
+         * Backward link to obd, required for ldlm pool to store new SLV.
          */
-        char                  *ns_name;
+        struct obd_device     *ns_obd;
 
         /**
          * Is this a client-side lock tree?
@@ -401,31 +437,30 @@ struct ldlm_namespace {
         ldlm_side_t            ns_client;
 
         /**
-         * Namespce connect flags supported by server (may be changed via proc,
-         * lru resize may be disabled/enabled).
+         * resource hash
          */
-        __u64                  ns_connect_flags;
+        cfs_hash_t            *ns_rs_hash;
 
-         /**
-          * Client side orig connect flags supported by server.
-          */
-        __u64                  ns_orig_connect_flags;
+        /**
+         * serialize
+         */
+        cfs_spinlock_t         ns_lock;
 
         /**
-         * Hash table for namespace.
+         * big refcount (by bucket)
          */
-        cfs_list_t            *ns_hash;
-        cfs_spinlock_t         ns_hash_lock;
+        cfs_atomic_t           ns_bref;
 
-         /**
-          * Count of resources in the hash.
-          */
-        __u32                  ns_refcount;
+        /**
+         * Namespce connect flags supported by server (may be changed via proc,
+         * lru resize may be disabled/enabled).
+         */
+        __u64                  ns_connect_flags;
 
          /**
-          * All root resources in namespace.
+          * Client side orig connect flags supported by server.
           */
-        cfs_list_t             ns_root_list;
+        __u64                  ns_orig_connect_flags;
 
         /**
          * Position in global namespace list.
@@ -437,7 +472,6 @@ struct ldlm_namespace {
          */
         cfs_list_t             ns_unused_list;
         int                    ns_nr_unused;
-        cfs_spinlock_t         ns_unused_lock;
 
         unsigned int           ns_max_unused;
         unsigned int           ns_max_age;
@@ -452,8 +486,6 @@ struct ldlm_namespace {
          */
         cfs_time_t             ns_next_dump;
 
-        cfs_atomic_t           ns_locks;
-        __u64                  ns_resources;
         ldlm_res_policy        ns_policy;
         struct ldlm_valblock_ops *ns_lvbo;
         void                  *ns_lvbp;
@@ -479,11 +511,18 @@ struct ldlm_namespace {
         unsigned               ns_max_nolock_size;
 
         /**
-         * Backward link to obd, required for ldlm pool to store new SLV.
+         * Limit of parallel AST RPC count.
          */
-        struct obd_device     *ns_obd;
+        unsigned               ns_max_parallel_ast;
 
-        struct adaptive_timeout ns_at_estimate;/* estimated lock callback time*/
+        /* callback to cancel locks before replaying it during recovery */
+        ldlm_cancel_for_recovery ns_cancel_for_recovery;
+        /**
+         * ldlm lock stats
+         */
+        struct lprocfs_stats  *ns_stats;
+
+        unsigned               ns_stopping:1;   /* namespace cleanup */
 };
 
 static inline int ns_is_client(struct ldlm_namespace *ns)
@@ -512,15 +551,12 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
         return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
 }
 
-/*
- *
- * Resource hash table
- *
- */
-
-#define RES_HASH_BITS 12
-#define RES_HASH_SIZE (1UL << RES_HASH_BITS)
-#define RES_HASH_MASK (RES_HASH_SIZE - 1)
+static inline void ns_register_cancel(struct ldlm_namespace *ns,
+                                      ldlm_cancel_for_recovery arg)
+{
+        LASSERT(ns != NULL);
+        ns->ns_cancel_for_recovery = arg;
+}
 
 struct ldlm_lock;
 
@@ -532,6 +568,15 @@ typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
 typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
 
+struct ldlm_glimpse_work {
+       struct ldlm_lock        *gl_lock; /* lock to glimpse */
+       cfs_list_t               gl_list; /* linkage to other gl work structs */
+       __u32                    gl_flags;/* see LDLM_GL_WORK_* below */
+};
+
+/* the ldlm_glimpse_work is allocated on the stack and should not be freed */
+#define LDLM_GL_WORK_NOFREE 0x1
+
 /* Interval node data for each LDLM_EXTENT lock */
 struct ldlm_interval {
         struct interval_node li_node;   /* node for tree mgmt */
@@ -549,7 +594,39 @@ struct ldlm_interval_tree {
         struct interval_node *lit_root; /* actually ldlm_interval */
 };
 
-#define LUSTRE_TRACKS_LOCK_EXP_REFS (1)
+#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
+
+/* Cancel flag. */
+typedef enum {
+        LCF_ASYNC      = 0x1, /* Cancel locks asynchronously. */
+        LCF_LOCAL      = 0x2, /* Cancel locks locally, not notifing server */
+        LCF_BL_AST     = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
+                               * in the same RPC */
+} ldlm_cancel_flags_t;
+
+struct ldlm_flock {
+        __u64 start;
+        __u64 end;
+        __u64 owner;
+        __u64 blocking_owner;
+        struct obd_export *blocking_export;
+       /* Protected by the hash lock */
+       __u32 blocking_refs;
+        __u32 pid;
+};
+
+typedef union {
+        struct ldlm_extent l_extent;
+        struct ldlm_flock l_flock;
+        struct ldlm_inodebits l_inodebits;
+} ldlm_policy_data_t;
+
+void ldlm_convert_policy_to_wire(ldlm_type_t type,
+                                 const ldlm_policy_data_t *lpolicy,
+                                 ldlm_wire_policy_data_t *wpolicy);
+void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+                                  const ldlm_wire_policy_data_t *wpolicy,
+                                  ldlm_policy_data_t *lpolicy);
 
 struct ldlm_lock {
         /**
@@ -589,6 +666,12 @@ struct ldlm_lock {
         /**
          * Protected by lr_lock. Requested mode.
          */
+       /**
+        * Protected by per-bucket exp->exp_flock_hash locks. Per export hash
+        * of locks.
+        */
+       cfs_hlist_node_t         l_exp_flock_hash;
+
         ldlm_mode_t              l_req_mode;
         /**
          * Granted mode, also protected by lr_lock.
@@ -630,15 +713,6 @@ struct ldlm_lock {
         __u64                 l_flags;
         __u32                 l_readers;
         __u32                 l_writers;
-        /*
-         * Set for locks that were removed from class hash table and will be
-         * destroyed when last reference to them is released. Set by
-         * ldlm_lock_destroy_internal().
-         *
-         * Protected by lock and resource locks.
-         */
-        __u8                  l_destroyed;
-
         /**
          * If the lock is granted, a process sleeps on this waitq to learn when
          * it's no longer in use.  If the lock is not granted, a process sleeps
@@ -659,6 +733,36 @@ struct ldlm_lock {
 
         struct ldlm_extent    l_req_extent;
 
+        unsigned int          l_failed:1,
+        /*
+         * Set for locks that were removed from class hash table and will be
+         * destroyed when last reference to them is released. Set by
+         * ldlm_lock_destroy_internal().
+         *
+         * Protected by lock and resource locks.
+         */
+                              l_destroyed:1,
+       /*
+        * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+        *
+        * NB: compare with check_res_locked(), check this bit is cheaper,
+        * also, spin_is_locked() is deprecated for kernel code, one reason is
+        * because it works only for SMP so user needs add extra macros like
+        * LASSERT_SPIN_LOCKED for uniprocessor kernels.
+        */
+                             l_res_locked:1,
+       /*
+        * it's set once we call ldlm_add_waiting_lock_res_locked()
+        * to start the lock-timeout timer and it will never be reset.
+        *
+        * Protected by lock_res_and_lock().
+        */
+                             l_waited:1,
+        /**
+         * flag whether this is a server namespace lock.
+         */
+                              l_ns_srv:1;
+
         /*
          * Client-side-only members.
          */
@@ -670,10 +774,6 @@ struct ldlm_lock {
         void                 *l_lvb_data;
 
         void                 *l_ast_data;
-        cfs_spinlock_t        l_extents_list_lock;
-        cfs_list_t            l_extents_list;
-
-        cfs_list_t            l_cache_locks_list;
 
         /*
          * Server-side-only members.
@@ -694,6 +794,7 @@ struct ldlm_lock {
          */
         __u32                 l_pid;
 
+        int                   l_bl_ast_run;
         /**
          * For ldlm_add_ast_work_item().
          */
@@ -708,7 +809,6 @@ struct ldlm_lock {
         cfs_list_t            l_rk_ast;
 
         struct ldlm_lock     *l_blocking_lock;
-        int                   l_bl_ast_run;
 
         /**
          * Protected by lr_lock, linkages to "skip lists".
@@ -726,17 +826,18 @@ struct ldlm_lock {
         /** referenced export object */
         struct obd_export    *l_exp_refs_target;
 #endif
+        /** export blocking dlm lock list, protected by
+         * l_export->exp_bl_list_lock.
+         * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
+         * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock. */
+        cfs_list_t            l_exp_list;
 };
 
 struct ldlm_resource {
-        struct ldlm_namespace *lr_namespace;
+        struct ldlm_ns_bucket *lr_ns_bucket;
 
         /* protected by ns_hash_lock */
-        cfs_list_t             lr_hash;
-        struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
-        cfs_list_t             lr_children; /* list head for child resources */
-        cfs_list_t             lr_childof;  /* part of ns_root_list if root res,
-                                             * part of lr_children if child */
+        cfs_hlist_node_t       lr_hash;
         cfs_spinlock_t         lr_lock;
 
         /* protected by lr_lock */
@@ -751,8 +852,10 @@ struct ldlm_resource {
         struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];  /* interval trees*/
 
         /* Server-side-only lock value block elements */
-        cfs_semaphore_t        lr_lvb_sem;
+        /** to serialize lvbo_init */
+        cfs_mutex_t            lr_lvb_mutex;
         __u32                  lr_lvb_len;
+        /** protect by lr_lock */
         void                  *lr_lvb_data;
 
         /* when the resource was considered as contended */
@@ -761,8 +864,40 @@ struct ldlm_resource {
          * List of references to this resource. For debugging.
          */
         struct lu_ref          lr_reference;
+
+        struct inode          *lr_lvb_inode;
 };
 
+static inline char *
+ldlm_ns_name(struct ldlm_namespace *ns)
+{
+        return ns->ns_rs_hash->hs_name;
+}
+
+static inline struct ldlm_namespace *
+ldlm_res_to_ns(struct ldlm_resource *res)
+{
+        return res->lr_ns_bucket->nsb_namespace;
+}
+
+static inline struct ldlm_namespace *
+ldlm_lock_to_ns(struct ldlm_lock *lock)
+{
+        return ldlm_res_to_ns(lock->l_resource);
+}
+
+static inline char *
+ldlm_lock_to_ns_name(struct ldlm_lock *lock)
+{
+        return ldlm_ns_name(ldlm_lock_to_ns(lock));
+}
+
+static inline struct adaptive_timeout *
+ldlm_lock_to_ns_at(struct ldlm_lock *lock)
+{
+        return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
+}
+
 struct ldlm_ast_work {
         struct ldlm_lock      *w_lock;
         int                    w_blocking;
@@ -782,7 +917,6 @@ struct ldlm_enqueue_info {
         void *ei_cb_gl;  /* lock glimpse callback */
         void *ei_cb_wg;  /* lock weigh callback */
         void *ei_cbdata; /* Data to be passed into callbacks. */
-        short ei_async:1; /* async request */
 };
 
 extern struct obd_ops ldlm_obd_ops;
@@ -791,45 +925,37 @@ extern char *ldlm_lockname[];
 extern char *ldlm_typename[];
 extern char *ldlm_it2str(int it);
 #ifdef LIBCFS_DEBUG
-#define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
-        CFS_CHECK_STACK();                                              \
+#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do {      \
+        CFS_CHECK_STACK(msgdata, mask, cdls);                           \
                                                                         \
-        if (((level) & D_CANTMASK) != 0 ||                              \
-            ((libcfs_debug & (level)) != 0 &&                           \
-             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) {        \
-                static struct libcfs_debug_msg_data _ldlm_dbg_data =    \
-                DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM,              \
-                                    file, func, line);                  \
-                _ldlm_lock_debug(lock, level, &_ldlm_dbg_data, fmt,     \
-                                 ##a );                                 \
-        }                                                               \
+        if (((mask) & D_CANTMASK) != 0 ||                               \
+            ((libcfs_debug & (mask)) != 0 &&                            \
+             (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0))          \
+                _ldlm_lock_debug(lock, msgdata, fmt, ##a);              \
 } while(0)
 
-void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
-                      struct libcfs_debug_msg_data *data, const char *fmt,
-                      ...)
-        __attribute__ ((format (printf, 4, 5)));
+void _ldlm_lock_debug(struct ldlm_lock *lock,
+                      struct libcfs_debug_msg_data *data,
+                      const char *fmt, ...)
+        __attribute__ ((format (printf, 3, 4)));
 
-#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do {                    \
-        static cfs_debug_limit_state_t _ldlm_cdls;                      \
-        ldlm_lock_debug(&_ldlm_cdls, mask, lock,                        \
-                        __FILE__, __FUNCTION__, __LINE__,               \
-                        "### " fmt , ##a);                              \
+#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do {                         \
+        static cfs_debug_limit_state_t _ldlm_cdls;                           \
+        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls);              \
+        ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
 } while (0)
 
 #define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
 #define LDLM_WARN(lock, fmt, a...)  LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
 
-#define LDLM_DEBUG(lock, fmt, a...)   do {                              \
-        ldlm_lock_debug(NULL, D_DLMTRACE, lock,                         \
-                        __FILE__, __FUNCTION__, __LINE__,               \
-                         "### " fmt , ##a);                             \
+#define LDLM_DEBUG(lock, fmt, a...)   do {                                  \
+        LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL);              \
+        ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, "### " fmt , ##a);\
 } while (0)
 #else /* !LIBCFS_DEBUG */
+# define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) ((void)0)
 # define LDLM_DEBUG(lock, fmt, a...) ((void)0)
 # define LDLM_ERROR(lock, fmt, a...) ((void)0)
-# define ldlm_lock_debuf(cdls, level, lock, file, func, line, fmt, a...) \
-         ((void)0)
 #endif
 
 #define LDLM_DEBUG_NOLOCK(format, a...)                 \
@@ -851,10 +977,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
 
 int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
                           void *closure);
-int ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
-                           void *closure);
-int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
-                               ldlm_res_iterator_t iter, void *closure);
+void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
+                            void *closure);
 
 int ldlm_replay_locks(struct obd_import *imp);
 int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
@@ -866,34 +990,51 @@ int ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data);
 /* ldlm_extent.c */
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
 
+struct ldlm_callback_suite {
+        ldlm_completion_callback lcs_completion;
+        ldlm_blocking_callback   lcs_blocking;
+        ldlm_glimpse_callback    lcs_glimpse;
+        ldlm_weigh_callback      lcs_weigh;
+};
 
 /* ldlm_lockd.c */
+#ifdef HAVE_SERVER_SUPPORT
 int ldlm_server_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
                              void *data, int flag);
 int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data);
 int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data);
+int ldlm_glimpse_locks(struct ldlm_resource *res, cfs_list_t *gl_work_list);
 int ldlm_handle_enqueue(struct ptlrpc_request *req, ldlm_completion_callback,
                         ldlm_blocking_callback, ldlm_glimpse_callback);
+int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
+                         const struct ldlm_request *dlm_req,
+                         const struct ldlm_callback_suite *cbs);
 int ldlm_handle_convert(struct ptlrpc_request *req);
+int ldlm_handle_convert0(struct ptlrpc_request *req,
+                         const struct ldlm_request *dlm_req);
 int ldlm_handle_cancel(struct ptlrpc_request *req);
 int ldlm_request_cancel(struct ptlrpc_request *req,
                         const struct ldlm_request *dlm_req, int first);
+void ldlm_revoke_export_locks(struct obd_export *exp);
+#endif
 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
-void ldlm_revoke_export_locks(struct obd_export *exp);
 int ldlm_get_ref(void);
 void ldlm_put_ref(void);
 int ldlm_init_export(struct obd_export *exp);
 void ldlm_destroy_export(struct obd_export *exp);
 
 /* ldlm_lock.c */
+#ifdef HAVE_SERVER_SUPPORT
 ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
+#endif
 void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
 void ldlm_lock2handle(const struct ldlm_lock *lock,
                       struct lustre_handle *lockh);
 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
 void ldlm_cancel_callback(struct ldlm_lock *);
 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
+int ldlm_lock_set_data(struct lustre_handle *, void *);
 
 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
 {
@@ -917,10 +1058,10 @@ ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
                                        struct ptlrpc_request *r, int increase)
 {
-        if (res->lr_namespace->ns_lvbo &&
-            res->lr_namespace->ns_lvbo->lvbo_update) {
-                return res->lr_namespace->ns_lvbo->lvbo_update(res, r,
-                                                               increase);
+        if (ldlm_res_to_ns(res)->ns_lvbo &&
+            ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
+                return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
+                                                                 increase);
         }
         return 0;
 }
@@ -981,26 +1122,30 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
 int  ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
+void ldlm_lock_fail_match(struct ldlm_lock *lock);
 void ldlm_lock_allow_match(struct ldlm_lock *lock);
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
                             const struct ldlm_res_id *, ldlm_type_t type,
                             ldlm_policy_data_t *, ldlm_mode_t mode,
                             struct lustre_handle *, int unref);
+ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+                                        __u64 *bits);
 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
                                         __u32 *flags);
 void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
 void ldlm_lock_cancel(struct ldlm_lock *lock);
 void ldlm_reprocess_all(struct ldlm_resource *res);
 void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
-void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos);
 void ldlm_lock_dump_handle(int level, struct lustre_handle *);
 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
 
 /* resource.c */
 struct ldlm_namespace *
 ldlm_namespace_new(struct obd_device *obd, char *name,
-                   ldlm_side_t client, ldlm_appetite_t apt);
+                   ldlm_side_t client, ldlm_appetite_t apt,
+                   ldlm_ns_type_t ns_type);
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags);
 void ldlm_namespace_free(struct ldlm_namespace *ns,
                          struct obd_import *imp, int force);
@@ -1008,10 +1153,8 @@ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
 void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
 struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
-void ldlm_namespace_get_locked(struct ldlm_namespace *ns);
-void ldlm_namespace_put_locked(struct ldlm_namespace *ns, int wakeup);
 void ldlm_namespace_get(struct ldlm_namespace *ns);
-void ldlm_namespace_put(struct ldlm_namespace *ns, int wakeup);
+void ldlm_namespace_put(struct ldlm_namespace *ns);
 int ldlm_proc_setup(void);
 #ifdef LPROCFS
 void ldlm_proc_cleanup(void);
@@ -1045,13 +1188,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
         lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current());  \
 } while (0)
 
-struct ldlm_callback_suite {
-        ldlm_completion_callback lcs_completion;
-        ldlm_blocking_callback   lcs_blocking;
-        ldlm_glimpse_callback    lcs_glimpse;
-        ldlm_weigh_callback      lcs_weigh;
-};
-
 /* ldlm_request.c */
 int ldlm_expired_completion_wait(void *data);
 int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
@@ -1074,9 +1210,6 @@ int ldlm_prep_elc_req(struct obd_export *exp,
                       struct ptlrpc_request *req,
                       int version, int opc, int canceloff,
                       cfs_list_t *cancels, int count);
-int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
-                         const struct ldlm_request *dlm_req,
-                         const struct ldlm_callback_suite *cbs);
 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
                           ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
                           int *flags, void *lvb, __u32 lvb_len,
@@ -1095,24 +1228,26 @@ int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
                     void *data, __u32 data_len);
 int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
 int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_handle_convert0(struct ptlrpc_request *req,
-                         const struct ldlm_request *dlm_req);
 int ldlm_cli_cancel(struct lustre_handle *lockh);
 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
-                           int flags, void *opaque);
+                           ldlm_cancel_flags_t flags, void *opaque);
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
                                     const struct ldlm_res_id *res_id,
                                     ldlm_policy_data_t *policy,
-                                    ldlm_mode_t mode, int flags, void *opaque);
+                                    ldlm_mode_t mode,
+                                    ldlm_cancel_flags_t flags,
+                                    void *opaque);
 int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
-                        int count, int flags);
+                        int count, ldlm_cancel_flags_t flags);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
                                cfs_list_t *cancels,
                                ldlm_policy_data_t *policy,
                                ldlm_mode_t mode, int lock_flags,
-                               int cancel_flags, void *opaque);
+                               ldlm_cancel_flags_t cancel_flags, void *opaque);
+int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+                               ldlm_cancel_flags_t flags);
 int ldlm_cli_cancel_list(cfs_list_t *head, int count,
-                         struct ptlrpc_request *req, int flags);
+                         struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
 
 /* mds/handler.c */
 /* This has to be here because recursive inclusion sucks. */
@@ -1150,7 +1285,6 @@ static inline void lock_res_nested(struct ldlm_resource *res,
         cfs_spin_lock_nested(&res->lr_lock, mode);
 }
 
-
 static inline void unlock_res(struct ldlm_resource *res)
 {
         cfs_spin_unlock(&res->lr_lock);