struct ldlm_resource;
struct ldlm_namespace;
-typedef int (*ldlm_pool_recalc_t)(struct ldlm_pool *pl);
-
-typedef int (*ldlm_pool_shrink_t)(struct ldlm_pool *pl,
- int nr, unsigned int gfp_mask);
-
-enum {
- LDLM_POOL_CTL_RECALC = 1 << 0, /* Pool recalc is enabled */
- LDLM_POOL_CTL_SHRINK = 1 << 1, /* Pool shrink is enabled */
- LDLM_POOL_CTL_FULL = (LDLM_POOL_CTL_RECALC | LDLM_POOL_CTL_SHRINK)
+struct ldlm_pool_ops {
+ int (*po_recalc)(struct ldlm_pool *pl);
+ int (*po_shrink)(struct ldlm_pool *pl, int nr,
+ unsigned int gfp_mask);
+ int (*po_setup)(struct ldlm_pool *pl, int limit);
};
/* One second for pools thread check interval. */
#define LDLM_POOLS_MODEST_MARGIN (5)
/* A change to SLV in % after which we want to wake up pools thread asap. */
-#define LDLM_POOLS_FAST_SLV_CHANGE (5)
+#define LDLM_POOLS_FAST_SLV_CHANGE (50)
struct ldlm_pool {
/* Common pool fields */
- cfs_proc_dir_entry_t *pl_proc_dir; /* Pool proc directory. */
- char pl_name[100]; /* Pool name, should be long
- * enough to contain complex
- * proc entry name. */
- spinlock_t pl_lock; /* Lock for protecting slv/clv
- * updates. */
- atomic_t pl_limit; /* Number of allowed locks in
- * in pool, both, client and
- * server side. */
- atomic_t pl_granted; /* Number of granted locks. */
- atomic_t pl_grant_rate; /* Grant rate per T. */
- atomic_t pl_cancel_rate; /* Cancel rate per T. */
- atomic_t pl_grant_speed; /* Grant speed (GR - CR) per T. */
- __u64 pl_server_lock_volume; /* Server lock volume. Protected
- * by pl_lock. */
- cfs_time_t pl_update_time; /* Time when last slv from server
- * was obtained. */
- ldlm_pool_recalc_t pl_recalc; /* Recalc callback func pointer. */
- ldlm_pool_shrink_t pl_shrink; /* Shrink callback func pointer. */
- int pl_control; /* Pool features mask */
-
- /* Server side pool fields */
- atomic_t pl_grant_plan; /* Planned number of granted
- * locks for next T. */
- atomic_t pl_grant_step; /* Grant plan step for next T. */
-
- /* Client side pool related fields */
+ cfs_proc_dir_entry_t *pl_proc_dir; /* Pool proc directory. */
+ char pl_name[100]; /* Pool name, should be long
+ * enough to contain complex
+ * proc entry name. */
+ spinlock_t pl_lock; /* Lock for protecting slv/clv
+ * updates. */
+ atomic_t pl_limit; /* Number of allowed locks in
+ * in pool, both, client and
+ * server side. */
+ atomic_t pl_granted; /* Number of granted locks. */
+ atomic_t pl_grant_rate; /* Grant rate per T. */
+ atomic_t pl_cancel_rate; /* Cancel rate per T. */
+ atomic_t pl_grant_speed; /* Grant speed (GR-CR) per T. */
+ __u64 pl_server_lock_volume; /* Server lock volume.
+ * Protected by pl_lock */
atomic_t pl_lock_volume_factor; /* Lock volume factor. */
- struct lprocfs_stats *pl_stats; /* Pool statistics. */
-};
-static inline int pool_recalc_enabled(struct ldlm_pool *pl)
-{
- return pl->pl_control & LDLM_POOL_CTL_RECALC;
-}
+ time_t pl_recalc_time; /* Time when last slv from
+ * server was obtained. */
+ struct ldlm_pool_ops *pl_ops; /* Recalc and shrink ops. */
-static inline int pool_shrink_enabled(struct ldlm_pool *pl)
-{
- return pl->pl_control & LDLM_POOL_CTL_SHRINK;
-}
+ int pl_grant_plan; /* Planned number of granted
+ * locks for next T. */
+ int pl_grant_step; /* Grant plan step for next
+ * T. */
+
+ struct lprocfs_stats *pl_stats; /* Pool statistics. */
+};
typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
void *req_cookie, ldlm_mode_t mode, int flags,
LDLM_NAMESPACE_MODEST = 1 << 1
} ldlm_appetite_t;
+/* Default value for ->ns_shrink_thumb. If lock is not extent one its cost
+ * is one page. Here we have 256 pages which is 1M on i386. Thus by default
+ * all extent locks which have more than 1M long extent will be kept in lru,
+ * others (including ibits locks) will be canceled on memory pressure event. */
+#define LDLM_LOCK_SHRINK_THUMB 256
+
struct ldlm_namespace {
char *ns_name;
ldlm_side_t ns_client; /* is this a client-side lock tree? */
- __u64 ns_connect_flags; /* client side connect flags
- * supported by server */
- struct list_head *ns_hash; /* hash table for ns */
+ __u64 ns_connect_flags; /* ns connect flags supported
+ * by server (may be changed via proc,
+ * lru resize may be disabled/enabled) */
+ __u64 ns_orig_connect_flags; /* client side orig connect
+ * flags supported by server */
+ struct list_head *ns_hash; /* hash table for ns */
spinlock_t ns_hash_lock;
__u32 ns_refcount; /* count of resources in the hash */
struct list_head ns_root_list; /* all root resources in ns */
unsigned int ns_max_unused;
unsigned int ns_max_age;
+
+ /* Lower limit to number of pages in lock to keep it in cache */
+ unsigned int ns_shrink_thumb;
cfs_time_t ns_next_dump; /* next debug dump, jiffies */
atomic_t ns_locks;
ldlm_policy_data_t *, ldlm_mode_t mode,
struct lustre_handle *);
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
- int *flags);
+ __u32 *flags);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_cancel_locks_for_export(struct obd_export *export);
void ldlm_reprocess_all(struct ldlm_resource *res);
ldlm_policy_data_t *policy, int *flags,
void *lvb, __u32 lvb_len, void *lvb_swabber,
struct lustre_handle *lockh, int async);
-struct ptlrpc_request *ldlm_prep_enqueue_req(struct obd_export *exp,
- int bufcount, int *size,
- struct list_head *head, int count);
+int ldlm_prep_enqueue_req(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ struct list_head *cancels,
+ int count);
+int ldlm_prep_elc_req(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count);
int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
const struct ldlm_request *dlm_req,
const struct ldlm_callback_suite *cbs);
struct lustre_handle *lockh);
int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
void *data, __u32 data_len);
-int ldlm_cli_convert(struct lustre_handle *, int new_mode, int *flags);
+int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
int ldlm_handle_convert0(struct ptlrpc_request *req,
const struct ldlm_request *dlm_req);
struct list_head *cancels,
ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
- int flags, void *opaque);
+ int cancel_flags, void *opaque);
int ldlm_cli_cancel_list(struct list_head *head, int count,
- struct ptlrpc_request *req, int off, int flags);
+ struct ptlrpc_request *req, int flags);
/* mds/handler.c */
/* This has to be here because recursive inclusion sucks. */
int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
unsigned int gfp_mask);
void ldlm_pool_fini(struct ldlm_pool *pl);
-int ldlm_pool_setup(struct ldlm_pool *pl, __u32 limit);
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
int ldlm_pool_recalc(struct ldlm_pool *pl);
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
__u32 ldlm_pool_get_limit(struct ldlm_pool *pl);