Whamcloud - gitweb
b=22244 delegate lock cancel to blocking thread
[fs/lustre-release.git] / lustre / include / lustre_dlm.h
index c83a841..362abdf 100644 (file)
 #ifndef _LUSTRE_DLM_H__
 #define _LUSTRE_DLM_H__
 
+/** \defgroup ldlm ldlm
+ *
+ * @{
+ */
+
 #if defined(__linux__)
 #include <linux/lustre_dlm.h>
 #elif defined(__APPLE__)
@@ -63,7 +68,7 @@ struct obd_device;
 /* 1.5 times the maximum 128 tasks available in VN mode */
 #define LDLM_DEFAULT_LRU_SIZE 196
 #else
-#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
+#define LDLM_DEFAULT_LRU_SIZE (100 * cfs_num_online_cpus())
 #endif
 #define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
 #define LDLM_CTIME_AGE_LIMIT (10)
@@ -105,7 +110,7 @@ typedef enum {
 #define LDLM_FL_REPLAY         0x000100
 
 #define LDLM_FL_INTENT_ONLY    0x000200 /* don't grant lock, just do intent */
-#define LDLM_FL_LOCAL_ONLY     0x000400 /* see ldlm_cli_cancel_unused */
+#define LDLM_FL_LOCAL_ONLY     0x000400
 
 /* don't run the cancel callback under ldlm_cli_cancel_unused */
 #define LDLM_FL_FAILED         0x000800
@@ -113,7 +118,7 @@ typedef enum {
 #define LDLM_FL_HAS_INTENT     0x001000 /* lock request has intent */
 #define LDLM_FL_CANCELING      0x002000 /* lock cancel has already been sent */
 #define LDLM_FL_LOCAL          0x004000 /* local lock (ie, no srv/cli split) */
-#define LDLM_FL_WARN           0x008000 /* see ldlm_cli_cancel_unused */
+/* was LDLM_FL_WARN  until 2.0.0  0x008000 */
 #define LDLM_FL_DISCARD_DATA   0x010000 /* discard (no writeback) on cancel */
 
 #define LDLM_FL_NO_TIMEOUT     0x020000 /* Blocked by group lock - wait
@@ -163,8 +168,7 @@ typedef enum {
  * w/o involving separate thread. in order to decrease cs rate */
 #define LDLM_FL_ATOMIC_CB      0x4000000
 
-/* Cancel lock asynchronously. See ldlm_cli_cancel_unused_resource. */
-#define LDLM_FL_ASYNC           0x8000000
+/* was LDLM_FL_ASYNC until 2.0.0 0x8000000 */
 
 /* It may happen that a client initiate 2 operations, e.g. unlink and mkdir,
  * such that server send blocking ast for conflict locks to this client for
@@ -188,7 +192,7 @@ typedef enum {
 /* Flags sent in AST lock_flags to be mapped into the receiving lock. */
 #define LDLM_AST_FLAGS         (LDLM_FL_DISCARD_DATA)
 
-/*
+/* 
  * --------------------------------------------------------------------------
  * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
  * 0x80000000 will not be sent over the wire.
@@ -278,9 +282,9 @@ struct ldlm_pool_ops {
 #define LDLM_POOLS_THREAD_PERIOD (1)
 
 /**
- * 5% margin for modest pools. See ldlm_pool.c for details.
+ * ~6% margin for modest pools. See ldlm_pool.c for details.
  */
-#define LDLM_POOLS_MODEST_MARGIN (5)
+#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
 
 /**
  * Default recalc period for server side pools in sec.
@@ -304,27 +308,27 @@ struct ldlm_pool {
         /**
          * Lock for protecting slv/clv updates.
          */
-        spinlock_t             pl_lock;
+        cfs_spinlock_t         pl_lock;
         /**
          * Number of allowed locks in in pool, both, client and server side.
          */
-        atomic_t               pl_limit;
+        cfs_atomic_t           pl_limit;
         /**
          * Number of granted locks in
          */
-        atomic_t               pl_granted;
+        cfs_atomic_t           pl_granted;
         /**
          * Grant rate per T.
          */
-        atomic_t               pl_grant_rate;
+        cfs_atomic_t           pl_grant_rate;
         /**
          * Cancel rate per T.
          */
-        atomic_t               pl_cancel_rate;
+        cfs_atomic_t           pl_cancel_rate;
         /**
          * Grant speed (GR-CR) per T.
          */
-        atomic_t               pl_grant_speed;
+        cfs_atomic_t           pl_grant_speed;
         /**
          * Server lock volume. Protected by pl_lock.
          */
@@ -337,7 +341,7 @@ struct ldlm_pool {
          * Lock volume factor. SLV on client is calculated as following:
          * server_slv * lock_volume_factor.
          */
-        atomic_t               pl_lock_volume_factor;
+        cfs_atomic_t           pl_lock_volume_factor;
         /**
          * Time when last slv from server was obtained.
          */
@@ -368,7 +372,7 @@ struct ldlm_valblock_ops {
         int (*lvbo_init)(struct ldlm_resource *res);
         int (*lvbo_update)(struct ldlm_resource *res,
                            struct ptlrpc_request *r,
-                           int buf_idx, int increase);
+                           int increase);
 };
 
 typedef enum {
@@ -409,8 +413,8 @@ struct ldlm_namespace {
         /**
          * Hash table for namespace.
          */
-        struct list_head      *ns_hash;
-        spinlock_t             ns_hash_lock;
+        cfs_list_t            *ns_hash;
+        cfs_spinlock_t         ns_hash_lock;
 
          /**
           * Count of resources in the hash.
@@ -420,19 +424,19 @@ struct ldlm_namespace {
          /**
           * All root resources in namespace.
           */
-        struct list_head       ns_root_list;
+        cfs_list_t             ns_root_list;
 
         /**
          * Position in global namespace list.
          */
-        struct list_head       ns_list_chain;
+        cfs_list_t             ns_list_chain;
 
         /**
          * All root resources in namespace.
          */
-        struct list_head       ns_unused_list;
+        cfs_list_t             ns_unused_list;
         int                    ns_nr_unused;
-        spinlock_t             ns_unused_lock;
+        cfs_spinlock_t         ns_unused_lock;
 
         unsigned int           ns_max_unused;
         unsigned int           ns_max_age;
@@ -447,7 +451,7 @@ struct ldlm_namespace {
          */
         cfs_time_t             ns_next_dump;
 
-        atomic_t               ns_locks;
+        cfs_atomic_t           ns_locks;
         __u64                  ns_resources;
         ldlm_res_policy        ns_policy;
         struct ldlm_valblock_ops *ns_lvbo;
@@ -530,7 +534,7 @@ typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
 /* Interval node data for each LDLM_EXTENT lock */
 struct ldlm_interval {
         struct interval_node li_node;   /* node for tree mgmt */
-        struct list_head     li_group;  /* the locks which have the same
+        cfs_list_t           li_group;  /* the locks which have the same
                                          * policy - group of the policy */
 };
 #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
@@ -546,6 +550,14 @@ struct ldlm_interval_tree {
 
 #define LUSTRE_TRACKS_LOCK_EXP_REFS (1)
 
+/* Cancel flag. */
+typedef enum {
+        LCF_ASYNC      = 0x1, /* Cancel locks asynchronously. */
+        LCF_LOCAL      = 0x2, /* Cancel locks locally, not notifing server */
+        LCF_BL_AST     = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
+                               * in the same RPC */
+} ldlm_cancel_flags_t;
+
 struct ldlm_lock {
         /**
          * Must be first in the structure.
@@ -554,12 +566,12 @@ struct ldlm_lock {
         /**
          * Lock reference count.
          */
-        atomic_t                 l_refc;
+        cfs_atomic_t             l_refc;
         /**
          * Internal spinlock protects l_resource.  we should hold this lock
          * first before grabbing res_lock.
          */
-        spinlock_t               l_lock;
+        cfs_spinlock_t           l_lock;
         /**
          * ldlm_lock_change_resource() can change this.
          */
@@ -567,11 +579,11 @@ struct ldlm_lock {
         /**
          * Protected by ns_hash_lock. List item for client side lru list.
          */
-        struct list_head         l_lru;
+        cfs_list_t               l_lru;
         /**
          * Protected by lr_lock, linkage to resource's lock queues.
          */
-        struct list_head         l_res_link;
+        cfs_list_t               l_res_link;
         /**
          * Tree node for ldlm_extent.
          */
@@ -580,7 +592,7 @@ struct ldlm_lock {
          * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
          * of locks.
          */
-        struct hlist_node        l_exp_hash;
+        cfs_hlist_node_t         l_exp_hash;
         /**
          * Protected by lr_lock. Requested mode.
          */
@@ -618,10 +630,6 @@ struct ldlm_lock {
         struct lustre_handle     l_remote_handle;
 
         ldlm_policy_data_t       l_policy_data;
-        /* traffic index indicating how busy the resource will be, if it is
-         * high, the lock's granted region will not be so big lest it conflicts
-         * other locks, causing frequent lock cancellation and re-enqueue */
-        int                   l_traffic;
 
         /*
          * Protected by lr_lock. Various counters: readers, writers, etc.
@@ -645,8 +653,8 @@ struct ldlm_lock {
          */
         cfs_waitq_t           l_waitq;
 
-        /**
-         * Seconds. it will be updated if there is any activity related to
+        /** 
+         * Seconds. it will be updated if there is any activity related to 
          * the lock, e.g. enqueue the lock or send block AST.
          */
         cfs_time_t            l_last_activity;
@@ -667,13 +675,12 @@ struct ldlm_lock {
          */
         __u32                 l_lvb_len;
         void                 *l_lvb_data;
-        void                 *l_lvb_swabber;
 
         void                 *l_ast_data;
-        spinlock_t            l_extents_list_lock;
-        struct list_head      l_extents_list;
+        cfs_spinlock_t        l_extents_list_lock;
+        cfs_list_t            l_extents_list;
 
-        struct list_head      l_cache_locks_list;
+        cfs_list_t            l_cache_locks_list;
 
         /*
          * Server-side-only members.
@@ -685,7 +692,7 @@ struct ldlm_lock {
         /**
          * Protected by elt_lock. Callbacks pending.
          */
-        struct list_head      l_pending_chain;
+        cfs_list_t            l_pending_chain;
 
         cfs_time_t            l_callback_timeout;
 
@@ -697,15 +704,15 @@ struct ldlm_lock {
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_bl_ast;
+        cfs_list_t            l_bl_ast;
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_cp_ast;
+        cfs_list_t            l_cp_ast;
         /**
          * For ldlm_add_ast_work_item().
          */
-        struct list_head      l_rk_ast;
+        cfs_list_t            l_rk_ast;
 
         struct ldlm_lock     *l_blocking_lock;
         int                   l_bl_ast_run;
@@ -713,8 +720,8 @@ struct ldlm_lock {
         /**
          * Protected by lr_lock, linkages to "skip lists".
          */
-        struct list_head      l_sl_mode;
-        struct list_head      l_sl_policy;
+        cfs_list_t            l_sl_mode;
+        cfs_list_t            l_sl_policy;
         struct lu_ref         l_reference;
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
         /* Debugging stuff for bug 20498, for tracking export
@@ -722,7 +729,7 @@ struct ldlm_lock {
         /** number of export references taken */
         int                   l_exp_refs_nr;
         /** link all locks referencing one export */
-        struct list_head      l_exp_refs_link;
+        cfs_list_t            l_exp_refs_link;
         /** referenced export object */
         struct obd_export    *l_exp_refs_target;
 #endif
@@ -732,26 +739,26 @@ struct ldlm_resource {
         struct ldlm_namespace *lr_namespace;
 
         /* protected by ns_hash_lock */
-        struct list_head       lr_hash;
+        cfs_list_t             lr_hash;
         struct ldlm_resource  *lr_parent;   /* 0 for a root resource */
-        struct list_head       lr_children; /* list head for child resources */
-        struct list_head       lr_childof;  /* part of ns_root_list if root res,
+        cfs_list_t             lr_children; /* list head for child resources */
+        cfs_list_t             lr_childof;  /* part of ns_root_list if root res,
                                              * part of lr_children if child */
-        spinlock_t             lr_lock;
+        cfs_spinlock_t         lr_lock;
 
         /* protected by lr_lock */
-        struct list_head       lr_granted;
-        struct list_head       lr_converting;
-        struct list_head       lr_waiting;
+        cfs_list_t             lr_granted;
+        cfs_list_t             lr_converting;
+        cfs_list_t             lr_waiting;
         ldlm_mode_t            lr_most_restr;
         ldlm_type_t            lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK} */
         struct ldlm_res_id     lr_name;
-        atomic_t               lr_refcount;
+        cfs_atomic_t           lr_refcount;
 
         struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];  /* interval trees*/
 
         /* Server-side-only lock value block elements */
-        struct semaphore       lr_lvb_sem;
+        cfs_semaphore_t        lr_lvb_sem;
         __u32                  lr_lvb_len;
         void                  *lr_lvb_data;
 
@@ -764,13 +771,13 @@ struct ldlm_resource {
 };
 
 struct ldlm_ast_work {
-        struct ldlm_lock *w_lock;
-        int               w_blocking;
-        struct ldlm_lock_desc w_desc;
-        struct list_head   w_list;
-        int w_flags;
-        void *w_data;
-        int w_datalen;
+        struct ldlm_lock      *w_lock;
+        int                    w_blocking;
+        struct ldlm_lock_desc  w_desc;
+        cfs_list_t             w_list;
+        int                    w_flags;
+        void                  *w_data;
+        int                    w_datalen;
 };
 
 /* ldlm_enqueue parameters common */
@@ -792,7 +799,7 @@ extern char *ldlm_typename[];
 extern char *ldlm_it2str(int it);
 #ifdef LIBCFS_DEBUG
 #define ldlm_lock_debug(cdls, level, lock, file, func, line, fmt, a...) do { \
-        CHECK_STACK();                                                  \
+        CFS_CHECK_STACK();                                              \
                                                                         \
         if (((level) & D_CANTMASK) != 0 ||                              \
             ((libcfs_debug & (level)) != 0 &&                           \
@@ -837,7 +844,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, __u32 mask,
 
 typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, int *flags,
                                       int first_enq, ldlm_error_t *err,
-                                      struct list_head *work_list);
+                                      cfs_list_t *work_list);
 
 /*
  * Iterators.
@@ -857,7 +864,7 @@ int ldlm_namespace_foreach_res(struct ldlm_namespace *ns,
                                ldlm_res_iterator_t iter, void *closure);
 
 int ldlm_replay_locks(struct obd_import *imp);
-void ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
+int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
                            ldlm_iterator_t iter, void *data);
 
 /* ldlm_flock.c */
@@ -915,12 +922,11 @@ ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
 }
 
 static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
-                                       struct ptlrpc_request *r, int buf_idx,
-                                       int increase)
+                                       struct ptlrpc_request *r, int increase)
 {
         if (res->lr_namespace->ns_lvbo &&
             res->lr_namespace->ns_lvbo->lvbo_update) {
-                return res->lr_namespace->ns_lvbo->lvbo_update(res, r, buf_idx,
+                return res->lr_namespace->ns_lvbo->lvbo_update(res, r,
                                                                increase);
         }
         return 0;
@@ -961,17 +967,17 @@ do {                                            \
         lock;                                   \
 })
 
-#define ldlm_lock_list_put(head, member, count)                 \
-({                                                              \
-        struct ldlm_lock *_lock, *_next;                        \
-        int c = count;                                          \
-        list_for_each_entry_safe(_lock, _next, head, member) {  \
-                if (c-- == 0)                                   \
-                        break;                                  \
-                list_del_init(&_lock->member);                  \
-                LDLM_LOCK_RELEASE(_lock);                       \
-        }                                                       \
-        LASSERT(c <= 0);                                        \
+#define ldlm_lock_list_put(head, member, count)                     \
+({                                                                  \
+        struct ldlm_lock *_lock, *_next;                            \
+        int c = count;                                              \
+        cfs_list_for_each_entry_safe(_lock, _next, head, member) {  \
+                if (c-- == 0)                                       \
+                        break;                                      \
+                cfs_list_del_init(&_lock->member);                  \
+                LDLM_LOCK_RELEASE(_lock);                           \
+        }                                                           \
+        LASSERT(c <= 0);                                            \
 })
 
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
@@ -1027,7 +1033,8 @@ struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
                                         ldlm_type_t type, int create);
 struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
 int ldlm_resource_putref(struct ldlm_resource *res);
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+void ldlm_resource_add_lock(struct ldlm_resource *res,
+                            cfs_list_t *head,
                             struct ldlm_lock *lock);
 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
@@ -1063,25 +1070,24 @@ int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
                      struct ldlm_enqueue_info *einfo,
                      const struct ldlm_res_id *res_id,
-                     ldlm_policy_data_t *policy, int *flags,
-                     void *lvb, __u32 lvb_len, void *lvb_swabber,
-                     struct lustre_handle *lockh, int async);
+                     ldlm_policy_data_t const *policy, int *flags,
+                     void *lvb, __u32 lvb_len, struct lustre_handle *lockh,
+                     int async);
 int ldlm_prep_enqueue_req(struct obd_export *exp,
                           struct ptlrpc_request *req,
-                          struct list_head *cancels,
+                          cfs_list_t *cancels,
                           int count);
 int ldlm_prep_elc_req(struct obd_export *exp,
                       struct ptlrpc_request *req,
                       int version, int opc, int canceloff,
-                      struct list_head *cancels, int count);
+                      cfs_list_t *cancels, int count);
 int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
                          const struct ldlm_request *dlm_req,
                          const struct ldlm_callback_suite *cbs);
 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
                           ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
                           int *flags, void *lvb, __u32 lvb_len,
-                          void *lvb_swabber, struct lustre_handle *lockh,
-                          int rc);
+                          struct lustre_handle *lockh, int rc);
 int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
                            const struct ldlm_res_id *res_id,
                            ldlm_type_t type, ldlm_policy_data_t *policy,
@@ -1089,7 +1095,7 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
                            ldlm_blocking_callback blocking,
                            ldlm_completion_callback completion,
                            ldlm_glimpse_callback glimpse,
-                           void *data, __u32 lvb_len, void *lvb_swabber,
+                           void *data, __u32 lvb_len,
                            const __u64 *client_cookie,
                            struct lustre_handle *lockh);
 int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
@@ -1100,20 +1106,24 @@ int ldlm_handle_convert0(struct ptlrpc_request *req,
                          const struct ldlm_request *dlm_req);
 int ldlm_cli_cancel(struct lustre_handle *lockh);
 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
-                           int flags, void *opaque);
+                           ldlm_cancel_flags_t flags, void *opaque);
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
                                     const struct ldlm_res_id *res_id,
                                     ldlm_policy_data_t *policy,
-                                    ldlm_mode_t mode, int flags, void *opaque);
-int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
-                        int count, int flags);
+                                    ldlm_mode_t mode,
+                                    ldlm_cancel_flags_t flags,
+                                    void *opaque);
+int ldlm_cli_cancel_req(struct obd_export *exp, cfs_list_t *head,
+                        int count, ldlm_cancel_flags_t flags);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
-                               struct list_head *cancels,
+                               cfs_list_t *cancels,
                                ldlm_policy_data_t *policy,
                                ldlm_mode_t mode, int lock_flags,
-                               int cancel_flags, void *opaque);
-int ldlm_cli_cancel_list(struct list_head *head, int count,
-                         struct ptlrpc_request *req, int flags);
+                               ldlm_cancel_flags_t cancel_flags, void *opaque);
+int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+                               ldlm_cancel_flags_t flags);
+int ldlm_cli_cancel_list(cfs_list_t *head, int count,
+                         struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
 
 /* mds/handler.c */
 /* This has to be here because recursive inclusion sucks. */
@@ -1142,19 +1152,19 @@ enum lock_res_type {
 
 static inline void lock_res(struct ldlm_resource *res)
 {
-        spin_lock(&res->lr_lock);
+        cfs_spin_lock(&res->lr_lock);
 }
 
 static inline void lock_res_nested(struct ldlm_resource *res,
                                    enum lock_res_type mode)
 {
-        spin_lock_nested(&res->lr_lock, mode);
+        cfs_spin_lock_nested(&res->lr_lock, mode);
 }
 
 
 static inline void unlock_res(struct ldlm_resource *res)
 {
-        spin_unlock(&res->lr_lock);
+        cfs_spin_unlock(&res->lr_lock);
 }
 
 static inline void check_res_locked(struct ldlm_resource *res)
@@ -1186,4 +1196,7 @@ void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
+
+/** @} ldlm */
+
 #endif