Whamcloud - gitweb
b=17887
[fs/lustre-release.git] / lustre / include / lustre_dlm.h
index 10384f1..9edf487 100644 (file)
@@ -53,6 +53,7 @@
 #include <lustre_handles.h>
 #include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
 #include <interval_tree.h> /* for interval_node{}, ldlm_extent */
+#include <lu_ref.h>
 
 struct obd_ops;
 struct obd_device;
@@ -153,11 +154,6 @@ typedef enum {
 /* Flags flags inherited from parent lock when doing intents. */
 #define LDLM_INHERIT_FLAGS     (LDLM_FL_CANCEL_ON_BLOCK)
 
-/* These are flags that are mapped into the flags and ASTs of blocking locks */
-#define LDLM_AST_DISCARD_DATA  0x80000000 /* Add FL_DISCARD to blocking ASTs */
-/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
-#define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
-
 /* completion ast to be executed */
 #define LDLM_FL_CP_REQD        0x1000000
 
@@ -187,6 +183,23 @@ typedef enum {
 /* measure lock contention and return -EUSERS if locking contention is high */
 #define LDLM_FL_DENY_ON_CONTENTION 0x40000000
 
+/* These are flags that are mapped into the flags and ASTs of blocking locks */
+#define LDLM_AST_DISCARD_DATA  0x80000000 /* Add FL_DISCARD to blocking ASTs */
+
+/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
+#define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
+
+/* 
+ * --------------------------------------------------------------------------
+ * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
+ * 0x80000000 will not be sent over the wire.
+ * --------------------------------------------------------------------------
+ */
+
+/* Used for marking lock as an target for -EINTR while cp_ast sleep
+ * emulation + race with upcoming bl_ast.  */
+#define LDLM_FL_FAIL_LOC       0x100000000ULL
+
 /* The blocking callback is overloaded to perform two functions.  These flags
  * indicate which operation should be performed. */
 #define LDLM_CB_BLOCKING    1
@@ -200,6 +213,7 @@ typedef enum {
 #define LCK_COMPAT_CR  (LCK_COMPAT_CW | LCK_PR | LCK_PW)
 #define LCK_COMPAT_NL  (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
 #define LCK_COMPAT_GROUP  (LCK_GROUP | LCK_NL)
+#define LCK_COMPAT_COS (LCK_COS)
 
 extern ldlm_mode_t lck_compat_array[];
 
@@ -259,13 +273,13 @@ struct ldlm_pool_ops {
         int (*po_setup)(struct ldlm_pool *pl, int limit);
 };
 
-/** 
- * One second for pools thread check interval. Each pool has own period. 
+/**
+ * One second for pools thread check interval. Each pool has own period.
  */
 #define LDLM_POOLS_THREAD_PERIOD (1)
 
-/** 
- * 5% margin for modest pools. See ldlm_pool.c for details. 
+/**
+ * 5% margin for modest pools. See ldlm_pool.c for details.
  */
 #define LDLM_POOLS_MODEST_MARGIN (5)
 
@@ -363,14 +377,6 @@ typedef enum {
 } ldlm_appetite_t;
 
 /*
- * Default value for ->ns_shrink_thumb. If lock is not extent one its cost
- * is one page. Here we have 256 pages which is 1M on i386. Thus by default
- * all extent locks which have more than 1M long extent will be kept in lru,
- * others (including ibits locks) will be canceled on memory pressure event.
- */
-#define LDLM_LOCK_SHRINK_THUMB 256
-
-/*
  * Default values for the "max_nolock_size", "contention_time" and
  * "contended_locks" namespace tunables.
  */
@@ -430,18 +436,13 @@ struct ldlm_namespace {
 
         unsigned int           ns_max_unused;
         unsigned int           ns_max_age;
-
+        unsigned int           ns_timeouts;
          /**
           * Seconds.
           */
         unsigned int           ns_ctime_age_limit;
 
         /**
-         * Lower limit to number of pages in lock to keep it in cache.
-         */
-        unsigned int           ns_shrink_thumb;
-
-        /**
          * Next debug dump, jiffies.
          */
         cfs_time_t             ns_next_dump;
@@ -512,7 +513,7 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
  *
  */
 
-#define RES_HASH_BITS 10
+#define RES_HASH_BITS 12
 #define RES_HASH_SIZE (1UL << RES_HASH_BITS)
 #define RES_HASH_MASK (RES_HASH_SIZE - 1)
 
@@ -524,6 +525,7 @@ typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
 typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
                                         void *data);
 typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
+typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
 
 /* Interval node data for each LDLM_EXTENT lock */
 struct ldlm_interval {
@@ -543,7 +545,7 @@ struct ldlm_interval_tree {
 };
 
 struct ldlm_lock {
-        /** 
+        /**
          * Must be first in the structure.
          */
         struct portals_handle    l_handle;
@@ -551,34 +553,34 @@ struct ldlm_lock {
          * Lock reference count.
          */
         atomic_t                 l_refc;
-        /** 
+        /**
          * Internal spinlock protects l_resource.  we should hold this lock
          * first before grabbing res_lock.
          */
         spinlock_t               l_lock;
-        /** 
-         * ldlm_lock_change_resource() can change this. 
+        /**
+         * ldlm_lock_change_resource() can change this.
          */
         struct ldlm_resource    *l_resource;
-        /** 
+        /**
          * Protected by ns_hash_lock. List item for client side lru list.
          */
         struct list_head         l_lru;
-        /** 
-         * Protected by lr_lock, linkage to resource's lock queues. 
+        /**
+         * Protected by lr_lock, linkage to resource's lock queues.
          */
         struct list_head         l_res_link;
-        /** 
-         * Tree node for ldlm_extent. 
+        /**
+         * Tree node for ldlm_extent.
          */
         struct ldlm_interval    *l_tree_node;
-        /** 
+        /**
          * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
          * of locks.
          */
         struct hlist_node        l_exp_hash;
-        /** 
-         * Protected by lr_lock. Requested mode. 
+        /**
+         * Protected by lr_lock. Requested mode.
          */
         ldlm_mode_t              l_req_mode;
         /**
@@ -597,6 +599,7 @@ struct ldlm_lock {
          * Lock glimpse handler.
          */
         ldlm_glimpse_callback    l_glimpse_ast;
+        ldlm_weigh_callback      l_weigh_ast;
 
         /**
          * Lock export.
@@ -617,7 +620,7 @@ struct ldlm_lock {
         /*
          * Protected by lr_lock. Various counters: readers, writers, etc.
          */
-        __u32                 l_flags;
+        __u64                 l_flags;
         __u32                 l_readers;
         __u32                 l_writers;
         /*
@@ -629,27 +632,31 @@ struct ldlm_lock {
          */
         __u8                  l_destroyed;
 
-        /** 
+        /**
          * If the lock is granted, a process sleeps on this waitq to learn when
          * it's no longer in use.  If the lock is not granted, a process sleeps
-         * on this waitq to learn when it becomes granted. 
+         * on this waitq to learn when it becomes granted.
          */
         cfs_waitq_t           l_waitq;
 
-        struct timeval        l_enqueued_time;
+        /** 
+         * Seconds. it will be updated if there is any activity related to 
+         * the lock, e.g. enqueue the lock or send block AST.
+         */
+        cfs_time_t            l_last_activity;
 
         /**
-         * Jiffies. Should be converted to time if needed. 
+         * Jiffies. Should be converted to time if needed.
          */
         cfs_time_t            l_last_used;
 
         struct ldlm_extent    l_req_extent;
 
-        /* 
-         * Client-side-only members. 
+        /*
+         * Client-side-only members.
          */
-         
-        /** 
+
+        /**
          * Temporary storage for an LVB received during an enqueue operation.
          */
         __u32                 l_lvb_len;
@@ -662,43 +669,47 @@ struct ldlm_lock {
 
         struct list_head      l_cache_locks_list;
 
-        /* 
-         * Server-side-only members. 
+        /*
+         * Server-side-only members.
          */
 
-        /** 
+        /** connection cookie for the client originated the operation. */
+        __u64                 l_client_cookie;
+
+        /**
          * Protected by elt_lock. Callbacks pending.
          */
         struct list_head      l_pending_chain;
 
         cfs_time_t            l_callback_timeout;
 
-        /** 
-         * Pid which created this lock. 
+        /**
+         * Pid which created this lock.
          */
         __u32                 l_pid;
 
-        /** 
-         * For ldlm_add_ast_work_item(). 
+        /**
+         * For ldlm_add_ast_work_item().
          */
         struct list_head      l_bl_ast;
-        /** 
-         * For ldlm_add_ast_work_item(). 
+        /**
+         * For ldlm_add_ast_work_item().
          */
         struct list_head      l_cp_ast;
-        /** 
-         * For ldlm_add_ast_work_item(). 
+        /**
+         * For ldlm_add_ast_work_item().
          */
         struct list_head      l_rk_ast;
 
         struct ldlm_lock     *l_blocking_lock;
         int                   l_bl_ast_run;
 
-        /** 
-         * Protected by lr_lock, linkages to "skip lists". 
+        /**
+         * Protected by lr_lock, linkages to "skip lists".
          */
         struct list_head      l_sl_mode;
         struct list_head      l_sl_policy;
+        struct lu_ref         l_reference;
 };
 
 struct ldlm_resource {
@@ -730,6 +741,10 @@ struct ldlm_resource {
 
         /* when the resource was considered as contended */
         cfs_time_t             lr_contention_time;
+        /**
+         * List of references to this resource. For debugging.
+         */
+        struct lu_ref          lr_reference;
 };
 
 struct ldlm_ast_work {
@@ -749,6 +764,7 @@ struct ldlm_enqueue_info {
         void *ei_cb_bl;  /* blocking lock callback */
         void *ei_cb_cp;  /* lock completion callback */
         void *ei_cb_gl;  /* lock glimpse callback */
+        void *ei_cb_wg;  /* lock weigh callback */
         void *ei_cbdata; /* Data to be passed into callbacks. */
         short ei_async:1; /* async request */
 };
@@ -778,13 +794,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
                       ...)
         __attribute__ ((format (printf, 4, 5)));
 
-#define LDLM_ERROR(lock, fmt, a...) do {                                \
+#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do {                    \
         static cfs_debug_limit_state_t _ldlm_cdls;                      \
-        ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock,                     \
+        ldlm_lock_debug(&_ldlm_cdls, mask, lock,                        \
                         __FILE__, __FUNCTION__, __LINE__,               \
                         "### " fmt , ##a);                              \
 } while (0)
 
+#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
+#define LDLM_WARN(lock, fmt, a...)  LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
+
 #define LDLM_DEBUG(lock, fmt, a...)   do {                              \
         ldlm_lock_debug(NULL, D_DLMTRACE, lock,                         \
                         __FILE__, __FUNCTION__, __LINE__,               \
@@ -844,7 +863,7 @@ int ldlm_handle_cancel(struct ptlrpc_request *req);
 int ldlm_request_cancel(struct ptlrpc_request *req,
                         const struct ldlm_request *dlm_req, int first);
 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock);
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
 void ldlm_revoke_export_locks(struct obd_export *exp);
 int ldlm_get_ref(void);
 void ldlm_put_ref(void);
@@ -858,16 +877,27 @@ void ldlm_lock2handle(const struct ldlm_lock *lock,
                       struct lustre_handle *lockh);
 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
 void ldlm_cancel_callback(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *data);
 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
-                                      const struct lustre_handle *);
 
 static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
 {
         return __ldlm_handle2lock(h, 0);
 }
 
+#define LDLM_LOCK_REF_DEL(lock) \
+        lu_ref_del(&lock->l_reference, "handle", cfs_current())
+
+static inline struct ldlm_lock *
+ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
+{
+        struct ldlm_lock *lock;
+
+        lock = __ldlm_handle2lock(h, flags);
+        if (lock != NULL)
+                LDLM_LOCK_REF_DEL(lock);
+        return lock;
+}
+
 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
                                        struct lustre_msg *m, int buf_idx,
                                        int increase)
@@ -880,8 +910,27 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
         return 0;
 }
 
+int ldlm_error2errno(ldlm_error_t error);
+ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
+                                            * confuses user-space. */
+
+/**
+ * Release a temporary lock reference obtained by ldlm_handle2lock() or
+ * __ldlm_handle2lock().
+ */
 #define LDLM_LOCK_PUT(lock)                     \
 do {                                            \
+        LDLM_LOCK_REF_DEL(lock);                \
+        /*LDLM_DEBUG((lock), "put");*/          \
+        ldlm_lock_put(lock);                    \
+} while (0)
+
+/**
+ * Release a lock reference obtained by some other means (see
+ * LDLM_LOCK_PUT()).
+ */
+#define LDLM_LOCK_RELEASE(lock)                 \
+do {                                            \
         /*LDLM_DEBUG((lock), "put");*/          \
         ldlm_lock_put(lock);                    \
 } while (0)
@@ -901,7 +950,7 @@ do {                                            \
                 if (c-- == 0)                                   \
                         break;                                  \
                 list_del_init(&_lock->member);                  \
-                LDLM_LOCK_PUT(_lock);                           \
+                LDLM_LOCK_RELEASE(_lock);                       \
         }                                                       \
         LASSERT(c <= 0);                                        \
 })
@@ -911,17 +960,18 @@ void ldlm_lock_put(struct ldlm_lock *lock);
 void ldlm_lock_destroy(struct ldlm_lock *lock);
 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
 void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
+int  ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_allow_match(struct ldlm_lock *lock);
-int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **);
-void ldlm_lock_fast_release(void *, int);
+void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
 ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
                             const struct ldlm_res_id *, ldlm_type_t type,
                             ldlm_policy_data_t *, ldlm_mode_t mode,
-                            struct lustre_handle *);
+                            struct lustre_handle *, int unref);
 struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
                                         __u32 *flags);
+void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
 void ldlm_lock_cancel(struct ldlm_lock *lock);
 void ldlm_cancel_locks_for_export(struct obd_export *export);
 void ldlm_reprocess_all(struct ldlm_resource *res);
@@ -969,17 +1019,28 @@ void ldlm_resource_dump(int level, struct ldlm_resource *);
 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
                               const struct ldlm_res_id *);
 
+#define LDLM_RESOURCE_ADDREF(res) do {                                  \
+        lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current());  \
+} while (0)
+
+#define LDLM_RESOURCE_DELREF(res) do {                                  \
+        lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current());  \
+} while (0)
+
 struct ldlm_callback_suite {
         ldlm_completion_callback lcs_completion;
         ldlm_blocking_callback   lcs_blocking;
         ldlm_glimpse_callback    lcs_glimpse;
+        ldlm_weigh_callback      lcs_weigh;
 };
 
 /* ldlm_request.c */
 int ldlm_expired_completion_wait(void *data);
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
 int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                       void *data, int flag);
 int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
+int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data);
 int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
                      struct ldlm_enqueue_info *einfo,
@@ -1011,6 +1072,7 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
                            ldlm_completion_callback completion,
                            ldlm_glimpse_callback glimpse,
                            void *data, __u32 lvb_len, void *lvb_swabber,
+                           const __u64 *client_cookie,
                            struct lustre_handle *lockh);
 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
                     void *data, __u32 data_len);
@@ -1027,8 +1089,6 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
                                     ldlm_mode_t mode, int flags, void *opaque);
 int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
                         int count, int flags);
-int ldlm_cli_join_lru(struct ldlm_namespace *,
-                      const struct ldlm_res_id *, int join);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
                                struct list_head *cancels,
                                ldlm_policy_data_t *policy,
@@ -1053,11 +1113,27 @@ void intent_set_disposition(struct ldlm_reply *rep, int flag);
 #define IOC_LDLM_REGRESS_STOP           _IOWR('f', 43, long)
 #define IOC_LDLM_MAX_NR                 43
 
+/**
+ * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
+ * than one lock_res is dead-lock safe.
+ */
+enum lock_res_type {
+        LRT_NORMAL,
+        LRT_NEW
+};
+
 static inline void lock_res(struct ldlm_resource *res)
 {
         spin_lock(&res->lr_lock);
 }
 
+static inline void lock_res_nested(struct ldlm_resource *res,
+                                   enum lock_res_type mode)
+{
+        spin_lock_nested(&res->lr_lock, mode);
+}
+
+
 static inline void unlock_res(struct ldlm_resource *res)
 {
         spin_unlock(&res->lr_lock);