#include <lustre_handles.h>
#include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
+#include <lu_ref.h>
struct obd_ops;
struct obd_device;
#define LDLM_FL_BLOCK_WAIT 0x000008
#define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
-#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */
+#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
+ * queued for sending. */
#define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
#define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
* list. */
#define LDLM_FL_KMS_IGNORE 0x200000
-/* Don't drop lock covering mmapped file in LRU */
-#define LDLM_FL_NO_LRU 0x400000
-
/* Immediatelly cancel such locks when they block some other locks. Send
- cancel notification to original lock holder, but expect no reply. */
+ * cancel notification to original lock holder, but expect no reply. This is
+ * for clients (like liblustre) that cannot be expected to reliably response
+ * to blocking ast. */
#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
/* Flags flags inherited from parent lock when doing intents. */
#define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
-/* These are flags that are mapped into the flags and ASTs of blocking locks */
-#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
-/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
-#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
-
/* completion ast to be executed */
#define LDLM_FL_CP_REQD 0x1000000
/* measure lock contention and return -EUSERS if locking contention is high */
#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
+/* These are flags that are mapped into the flags and ASTs of blocking locks */
+#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
+
+/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
+#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
+
+/*
+ * --------------------------------------------------------------------------
+ * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
+ * 0x80000000 will not be sent over the wire.
+ * --------------------------------------------------------------------------
+ */
+
+/* Used for marking lock as an target for -EINTR while cp_ast sleep
+ * emulation + race with upcoming bl_ast. */
+#define LDLM_FL_FAIL_LOC 0x100000000ULL
+
/* The blocking callback is overloaded to perform two functions. These flags
* indicate which operation should be performed. */
#define LDLM_CB_BLOCKING 1
#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
+#define LCK_COMPAT_COS (LCK_COS)
extern ldlm_mode_t lck_compat_array[];
int (*po_setup)(struct ldlm_pool *pl, int limit);
};
-/* One second for pools thread check interval. */
+/**
+ * One second for pools thread check interval. Each pool has own period.
+ */
#define LDLM_POOLS_THREAD_PERIOD (1)
-/* 5% margin for modest pools. See ldlm_pool.c for details. */
+/**
+ * 5% margin for modest pools. See ldlm_pool.c for details.
+ */
#define LDLM_POOLS_MODEST_MARGIN (5)
-/* A change to SLV in % after which we want to wake up pools thread asap. */
-#define LDLM_POOLS_FAST_SLV_CHANGE (50)
+/**
+ * Default recalc period for server side pools in sec.
+ */
+#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
+
+/**
+ * Default recalc period for client side pools in sec.
+ */
+#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
struct ldlm_pool {
/**
*/
time_t pl_recalc_time;
/**
+ * Recalc period for pool.
+ */
+ time_t pl_recalc_period;
+ /**
* Recalc and shrink ops.
*/
struct ldlm_pool_ops *pl_ops;
/**
- * Planned number of granted locks for next T.
+ * Number of planned locks for next period.
*/
int pl_grant_plan;
/**
- * Grant plan step for next T.
- */
- int pl_grant_step;
- /**
* Pool statistics.
*/
struct lprocfs_stats *pl_stats;
} ldlm_appetite_t;
/*
- * Default value for ->ns_shrink_thumb. If lock is not extent one its cost
- * is one page. Here we have 256 pages which is 1M on i386. Thus by default
- * all extent locks which have more than 1M long extent will be kept in lru,
- * others (including ibits locks) will be canceled on memory pressure event.
- */
-#define LDLM_LOCK_SHRINK_THUMB 256
-
-/*
* Default values for the "max_nolock_size", "contention_time" and
* "contended_locks" namespace tunables.
*/
unsigned int ns_max_unused;
unsigned int ns_max_age;
-
+ unsigned int ns_timeouts;
/**
* Seconds.
*/
unsigned int ns_ctime_age_limit;
/**
- * Lower limit to number of pages in lock to keep it in cache.
- */
- unsigned int ns_shrink_thumb;
-
- /**
* Next debug dump, jiffies.
*/
cfs_time_t ns_next_dump;
*
*/
-#define RES_HASH_BITS 10
+#define RES_HASH_BITS 12
#define RES_HASH_SIZE (1UL << RES_HASH_BITS)
#define RES_HASH_MASK (RES_HASH_SIZE - 1)
typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
void *data);
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
+typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
};
struct ldlm_lock {
- struct portals_handle l_handle; // must be first in the structure
- atomic_t l_refc;
-
- /* internal spinlock protects l_resource. we should hold this lock
- * first before grabbing res_lock.*/
- spinlock_t l_lock;
-
- /* ldlm_lock_change_resource() can change this */
- struct ldlm_resource *l_resource;
-
- /* protected by ns_hash_lock. FIXME */
- struct list_head l_lru;
-
- /* protected by lr_lock, linkage to resource's lock queues */
- struct list_head l_res_link;
-
- struct ldlm_interval *l_tree_node; /* tree node for ldlm_extent */
-
- /* protected by led_lock */
- struct list_head l_export_chain; // per-export chain of locks
-
- /* protected by lr_lock */
- ldlm_mode_t l_req_mode;
- ldlm_mode_t l_granted_mode;
-
+ /**
+ * Must be first in the structure.
+ */
+ struct portals_handle l_handle;
+ /**
+ * Lock reference count.
+ */
+ atomic_t l_refc;
+ /**
+ * Internal spinlock protects l_resource. we should hold this lock
+ * first before grabbing res_lock.
+ */
+ spinlock_t l_lock;
+ /**
+ * ldlm_lock_change_resource() can change this.
+ */
+ struct ldlm_resource *l_resource;
+ /**
+ * Protected by ns_hash_lock. List item for client side lru list.
+ */
+ struct list_head l_lru;
+ /**
+ * Protected by lr_lock, linkage to resource's lock queues.
+ */
+ struct list_head l_res_link;
+ /**
+ * Tree node for ldlm_extent.
+ */
+ struct ldlm_interval *l_tree_node;
+ /**
+ * Protected by per-bucket exp->exp_lock_hash locks. Per export hash
+ * of locks.
+ */
+ struct hlist_node l_exp_hash;
+ /**
+ * Protected by lr_lock. Requested mode.
+ */
+ ldlm_mode_t l_req_mode;
+ /**
+ * Granted mode, also protected by lr_lock.
+ */
+ ldlm_mode_t l_granted_mode;
+ /**
+ * Lock enqueue completion handler.
+ */
ldlm_completion_callback l_completion_ast;
+ /**
+ * Lock blocking ast handler.
+ */
ldlm_blocking_callback l_blocking_ast;
+ /**
+ * Lock glimpse handler.
+ */
ldlm_glimpse_callback l_glimpse_ast;
+ ldlm_weigh_callback l_weigh_ast;
- struct obd_export *l_export;
- struct obd_export *l_conn_export;
+ /**
+ * Lock export.
+ */
+ struct obd_export *l_export;
+ /**
+ * Lock connection export.
+ */
+ struct obd_export *l_conn_export;
- struct lustre_handle l_remote_handle;
- ldlm_policy_data_t l_policy_data;
+ /**
+ * Remote lock handle.
+ */
+ struct lustre_handle l_remote_handle;
- /* protected by lr_lock */
- __u32 l_flags;
+ ldlm_policy_data_t l_policy_data;
+
+ /*
+ * Protected by lr_lock. Various counters: readers, writers, etc.
+ */
+ __u64 l_flags;
__u32 l_readers;
__u32 l_writers;
+ /*
+ * Set for locks that were removed from class hash table and will be
+ * destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks.
+ */
__u8 l_destroyed;
- /* If the lock is granted, a process sleeps on this waitq to learn when
+ /**
+ * If the lock is granted, a process sleeps on this waitq to learn when
* it's no longer in use. If the lock is not granted, a process sleeps
- * on this waitq to learn when it becomes granted. */
+ * on this waitq to learn when it becomes granted.
+ */
cfs_waitq_t l_waitq;
- struct timeval l_enqueued_time;
- cfs_time_t l_last_used; /* jiffies */
+ /**
+ * Seconds. it will be updated if there is any activity related to
+ * the lock, e.g. enqueue the lock or send block AST.
+ */
+ cfs_time_t l_last_activity;
+
+ /**
+ * Jiffies. Should be converted to time if needed.
+ */
+ cfs_time_t l_last_used;
+
struct ldlm_extent l_req_extent;
- /* Client-side-only members */
- __u32 l_lvb_len; /* temporary storage for */
- void *l_lvb_data; /* an LVB received during */
- void *l_lvb_swabber; /* an enqueue */
+ /*
+ * Client-side-only members.
+ */
+
+ /**
+ * Temporary storage for an LVB received during an enqueue operation.
+ */
+ __u32 l_lvb_len;
+ void *l_lvb_data;
+ void *l_lvb_swabber;
+
void *l_ast_data;
spinlock_t l_extents_list_lock;
struct list_head l_extents_list;
struct list_head l_cache_locks_list;
- /* Server-side-only members */
+ /*
+ * Server-side-only members.
+ */
+
+ /** connection cookie for the client originated the operation. */
+ __u64 l_client_cookie;
+
+ /**
+ * Protected by elt_lock. Callbacks pending.
+ */
+ struct list_head l_pending_chain;
- /* protected by elt_lock */
- struct list_head l_pending_chain; /* callbacks pending */
- cfs_time_t l_callback_timeout; /* jiffies */
+ cfs_time_t l_callback_timeout;
- __u32 l_pid; /* pid which created this lock */
+ /**
+ * Pid which created this lock.
+ */
+ __u32 l_pid;
- /* for ldlm_add_ast_work_item() */
+ /**
+ * For ldlm_add_ast_work_item().
+ */
struct list_head l_bl_ast;
+ /**
+ * For ldlm_add_ast_work_item().
+ */
struct list_head l_cp_ast;
+ /**
+ * For ldlm_add_ast_work_item().
+ */
+ struct list_head l_rk_ast;
+
struct ldlm_lock *l_blocking_lock;
int l_bl_ast_run;
- /* protected by lr_lock, linkages to "skip lists" */
+ /**
+ * Protected by lr_lock, linkages to "skip lists".
+ */
struct list_head l_sl_mode;
struct list_head l_sl_policy;
+ struct lu_ref l_reference;
};
struct ldlm_resource {
/* when the resource was considered as contended */
cfs_time_t lr_contention_time;
+ /**
+ * List of references to this resource. For debugging.
+ */
+ struct lu_ref lr_reference;
};
struct ldlm_ast_work {
void *ei_cb_bl; /* blocking lock callback */
void *ei_cb_cp; /* lock completion callback */
void *ei_cb_gl; /* lock glimpse callback */
+ void *ei_cb_wg; /* lock weigh callback */
void *ei_cbdata; /* Data to be passed into callbacks. */
+ short ei_async:1; /* async request */
};
extern struct obd_ops ldlm_obd_ops;
...)
__attribute__ ((format (printf, 4, 5)));
-#define LDLM_ERROR(lock, fmt, a...) do { \
+#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
static cfs_debug_limit_state_t _ldlm_cdls; \
- ldlm_lock_debug(&_ldlm_cdls, D_ERROR, lock, \
+ ldlm_lock_debug(&_ldlm_cdls, mask, lock, \
__FILE__, __FUNCTION__, __LINE__, \
"### " fmt , ##a); \
} while (0)
+#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
+#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
+
#define LDLM_DEBUG(lock, fmt, a...) do { \
ldlm_lock_debug(NULL, D_DLMTRACE, lock, \
__FILE__, __FUNCTION__, __LINE__, \
"### " fmt , ##a); \
} while (0)
-#else
-#define LDLM_DEBUG(lock, fmt, a...) ((void)0)
-#define LDLM_ERROR(lock, fmt, a...) ((void)0)
+#else /* !LIBCFS_DEBUG */
+# define LDLM_DEBUG(lock, fmt, a...) ((void)0)
+# define LDLM_ERROR(lock, fmt, a...) ((void)0)
+# define ldlm_lock_debuf(cdls, level, lock, file, func, line, fmt, a...) \
+ ((void)0)
#endif
#define LDLM_DEBUG_NOLOCK(format, a...) \
int ldlm_request_cancel(struct ptlrpc_request *req,
const struct ldlm_request *dlm_req, int first);
int ldlm_del_waiting_lock(struct ldlm_lock *lock);
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock);
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
void ldlm_revoke_export_locks(struct obd_export *exp);
int ldlm_get_ref(void);
void ldlm_put_ref(void);
+int ldlm_init_export(struct obd_export *exp);
+void ldlm_destroy_export(struct obd_export *exp);
/* ldlm_lock.c */
ldlm_processing_policy ldlm_get_processing_policy(struct ldlm_resource *res);
struct lustre_handle *lockh);
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
void ldlm_cancel_callback(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *data);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
- const struct lustre_handle *);
static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
{
return __ldlm_handle2lock(h, 0);
}
+#define LDLM_LOCK_REF_DEL(lock) \
+ lu_ref_del(&lock->l_reference, "handle", cfs_current())
+
+static inline struct ldlm_lock *
+ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
+{
+ struct ldlm_lock *lock;
+
+ lock = __ldlm_handle2lock(h, flags);
+ if (lock != NULL)
+ LDLM_LOCK_REF_DEL(lock);
+ return lock;
+}
+
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct lustre_msg *m, int buf_idx,
int increase)
return 0;
}
+int ldlm_error2errno(ldlm_error_t error);
+ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
+ * confuses user-space. */
+
+/**
+ * Release a temporary lock reference obtained by ldlm_handle2lock() or
+ * __ldlm_handle2lock().
+ */
#define LDLM_LOCK_PUT(lock) \
do { \
+ LDLM_LOCK_REF_DEL(lock); \
+ /*LDLM_DEBUG((lock), "put");*/ \
+ ldlm_lock_put(lock); \
+} while (0)
+
+/**
+ * Release a lock reference obtained by some other means (see
+ * LDLM_LOCK_PUT()).
+ */
+#define LDLM_LOCK_RELEASE(lock) \
+do { \
/*LDLM_DEBUG((lock), "put");*/ \
ldlm_lock_put(lock); \
} while (0)
if (c-- == 0) \
break; \
list_del_init(&_lock->member); \
- LDLM_LOCK_PUT(_lock); \
+ LDLM_LOCK_RELEASE(_lock); \
} \
LASSERT(c <= 0); \
})
void ldlm_lock_destroy(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
+int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
-int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **);
-void ldlm_lock_fast_release(void *, int);
+void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
const struct ldlm_res_id *, ldlm_type_t type,
ldlm_policy_data_t *, ldlm_mode_t mode,
- struct lustre_handle *);
+ struct lustre_handle *, int unref);
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
__u32 *flags);
+void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_cancel_locks_for_export(struct obd_export *export);
void ldlm_reprocess_all(struct ldlm_resource *res);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
const struct ldlm_res_id *);
+#define LDLM_RESOURCE_ADDREF(res) do { \
+ lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+} while (0)
+
+#define LDLM_RESOURCE_DELREF(res) do { \
+ lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+} while (0)
+
struct ldlm_callback_suite {
ldlm_completion_callback lcs_completion;
ldlm_blocking_callback lcs_blocking;
ldlm_glimpse_callback lcs_glimpse;
+ ldlm_weigh_callback lcs_weigh;
};
/* ldlm_request.c */
int ldlm_expired_completion_wait(void *data);
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
+int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data);
int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
void *data, __u32 lvb_len, void *lvb_swabber,
+ const __u64 *client_cookie,
struct lustre_handle *lockh);
int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
void *data, __u32 data_len);
ldlm_mode_t mode, int flags, void *opaque);
int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
int count, int flags);
-int ldlm_cli_join_lru(struct ldlm_namespace *,
- const struct ldlm_res_id *, int join);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
ldlm_policy_data_t *policy,
#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
#define IOC_LDLM_MAX_NR 43
+/**
+ * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
+ * than one lock_res is dead-lock safe.
+ */
+enum lock_res_type {
+ LRT_NORMAL,
+ LRT_NEW
+};
+
static inline void lock_res(struct ldlm_resource *res)
{
spin_lock(&res->lr_lock);
}
+static inline void lock_res_nested(struct ldlm_resource *res,
+ enum lock_res_type mode)
+{
+ spin_lock_nested(&res->lr_lock, mode);
+}
+
+
static inline void unlock_res(struct ldlm_resource *res)
{
spin_unlock(&res->lr_lock);
void ldlm_pools_recalc(ldlm_side_t client);
int ldlm_pools_init(void);
void ldlm_pools_fini(void);
-void ldlm_pools_wakeup(void);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client);