#include <lustre_handles.h>
#include <lustre_export.h> /* for obd_export, for LDLM_DEBUG */
#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
+#include <lu_ref.h>
struct obd_ops;
struct obd_device;
#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
+#define LCK_COMPAT_COS (LCK_COS)
extern ldlm_mode_t lck_compat_array[];
int (*po_setup)(struct ldlm_pool *pl, int limit);
};
-/**
- * One second for pools thread check interval. Each pool has own period.
+/**
+ * One second for pools thread check interval. Each pool has own period.
*/
#define LDLM_POOLS_THREAD_PERIOD (1)
-/**
- * 5% margin for modest pools. See ldlm_pool.c for details.
+/**
+ * 5% margin for modest pools. See ldlm_pool.c for details.
*/
#define LDLM_POOLS_MODEST_MARGIN (5)
unsigned int ns_max_unused;
unsigned int ns_max_age;
-
+ unsigned int ns_timeouts;
/**
* Seconds.
*/
/**
* Lower limit to number of pages in lock to keep it in cache.
*/
- unsigned int ns_shrink_thumb;
+ unsigned long ns_shrink_thumb;
/**
* Next debug dump, jiffies.
typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, int flags,
void *data);
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
+typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
/* Interval node data for each LDLM_EXTENT lock */
struct ldlm_interval {
};
struct ldlm_lock {
- /**
+ /**
* Must be first in the structure.
*/
struct portals_handle l_handle;
* Lock reference count.
*/
atomic_t l_refc;
- /**
+ /**
* Internal spinlock protects l_resource. we should hold this lock
* first before grabbing res_lock.
*/
spinlock_t l_lock;
- /**
- * ldlm_lock_change_resource() can change this.
+ /**
+ * ldlm_lock_change_resource() can change this.
*/
struct ldlm_resource *l_resource;
- /**
+ /**
* Protected by ns_hash_lock. List item for client side lru list.
*/
struct list_head l_lru;
- /**
- * Protected by lr_lock, linkage to resource's lock queues.
+ /**
+ * Protected by lr_lock, linkage to resource's lock queues.
*/
struct list_head l_res_link;
- /**
- * Tree node for ldlm_extent.
+ /**
+ * Tree node for ldlm_extent.
*/
struct ldlm_interval *l_tree_node;
- /**
+ /**
* Protected by per-bucket exp->exp_lock_hash locks. Per export hash
* of locks.
*/
struct hlist_node l_exp_hash;
- /**
- * Protected by lr_lock. Requested mode.
+ /**
+ * Protected by lr_lock. Requested mode.
*/
ldlm_mode_t l_req_mode;
/**
* Lock glimpse handler.
*/
ldlm_glimpse_callback l_glimpse_ast;
+ ldlm_weigh_callback l_weigh_ast;
/**
* Lock export.
*/
__u8 l_destroyed;
- /**
+ /**
* If the lock is granted, a process sleeps on this waitq to learn when
* it's no longer in use. If the lock is not granted, a process sleeps
- * on this waitq to learn when it becomes granted.
+ * on this waitq to learn when it becomes granted.
*/
cfs_waitq_t l_waitq;
struct timeval l_enqueued_time;
/**
- * Jiffies. Should be converted to time if needed.
+ * Jiffies. Should be converted to time if needed.
*/
cfs_time_t l_last_used;
struct ldlm_extent l_req_extent;
- /*
- * Client-side-only members.
+ /*
+ * Client-side-only members.
*/
-
- /**
+
+ /**
* Temporary storage for an LVB received during an enqueue operation.
*/
__u32 l_lvb_len;
struct list_head l_cache_locks_list;
- /*
- * Server-side-only members.
+ /*
+ * Server-side-only members.
*/
- /**
+ /** connection cookie for the client originated the operation. */
+ __u64 l_client_cookie;
+
+ /**
* Protected by elt_lock. Callbacks pending.
*/
struct list_head l_pending_chain;
cfs_time_t l_callback_timeout;
- /**
- * Pid which created this lock.
+ /**
+ * Pid which created this lock.
*/
__u32 l_pid;
- /**
- * For ldlm_add_ast_work_item().
+ /**
+ * For ldlm_add_ast_work_item().
*/
struct list_head l_bl_ast;
- /**
- * For ldlm_add_ast_work_item().
+ /**
+ * For ldlm_add_ast_work_item().
*/
struct list_head l_cp_ast;
- /**
- * For ldlm_add_ast_work_item().
+ /**
+ * For ldlm_add_ast_work_item().
*/
struct list_head l_rk_ast;
struct ldlm_lock *l_blocking_lock;
int l_bl_ast_run;
- /**
- * Protected by lr_lock, linkages to "skip lists".
+ /**
+ * Protected by lr_lock, linkages to "skip lists".
*/
struct list_head l_sl_mode;
struct list_head l_sl_policy;
+ struct lu_ref l_reference;
};
struct ldlm_resource {
/* when the resource was considered as contended */
cfs_time_t lr_contention_time;
+ /**
+ * List of references to this resource. For debugging.
+ */
+ struct lu_ref lr_reference;
};
struct ldlm_ast_work {
void *ei_cb_bl; /* blocking lock callback */
void *ei_cb_cp; /* lock completion callback */
void *ei_cb_gl; /* lock glimpse callback */
+ void *ei_cb_wg; /* lock weigh callback */
void *ei_cbdata; /* Data to be passed into callbacks. */
short ei_async:1; /* async request */
};
struct lustre_handle *lockh);
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, int flags);
void ldlm_cancel_callback(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *data);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-struct ldlm_lock *ldlm_handle2lock_ns(struct ldlm_namespace *,
- const struct lustre_handle *);
static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
{
return __ldlm_handle2lock(h, 0);
}
+#define LDLM_LOCK_REF_DEL(lock) \
+ lu_ref_del(&lock->l_reference, "handle", cfs_current())
+
+static inline struct ldlm_lock *
+ldlm_handle2lock_long(const struct lustre_handle *h, int flags)
+{
+ struct ldlm_lock *lock;
+
+ lock = __ldlm_handle2lock(h, flags);
+ if (lock != NULL)
+ LDLM_LOCK_REF_DEL(lock);
+ return lock;
+}
+
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct lustre_msg *m, int buf_idx,
int increase)
return 0;
}
+int ldlm_error2errno(ldlm_error_t error);
+ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
+ * confuses user-space. */
+
+/**
+ * Release a temporary lock reference obtained by ldlm_handle2lock() or
+ * __ldlm_handle2lock().
+ */
#define LDLM_LOCK_PUT(lock) \
do { \
+ LDLM_LOCK_REF_DEL(lock); \
+ /*LDLM_DEBUG((lock), "put");*/ \
+ ldlm_lock_put(lock); \
+} while (0)
+
+/**
+ * Release a lock reference obtained by some other means (see
+ * LDLM_LOCK_PUT()).
+ */
+#define LDLM_LOCK_RELEASE(lock) \
+do { \
/*LDLM_DEBUG((lock), "put");*/ \
ldlm_lock_put(lock); \
} while (0)
if (c-- == 0) \
break; \
list_del_init(&_lock->member); \
- LDLM_LOCK_PUT(_lock); \
+ LDLM_LOCK_RELEASE(_lock); \
} \
LASSERT(c <= 0); \
})
void ldlm_lock_destroy(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
+int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
-int ldlm_lock_fast_match(struct ldlm_lock *, int, obd_off, obd_off, void **);
-void ldlm_lock_fast_release(void *, int);
ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
const struct ldlm_res_id *, ldlm_type_t type,
ldlm_policy_data_t *, ldlm_mode_t mode,
- struct lustre_handle *);
+ struct lustre_handle *, int unref);
struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
__u32 *flags);
+void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_cancel_locks_for_export(struct obd_export *export);
void ldlm_reprocess_all(struct ldlm_resource *res);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
const struct ldlm_res_id *);
+#define LDLM_RESOURCE_ADDREF(res) do { \
+ lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+} while (0)
+
+#define LDLM_RESOURCE_DELREF(res) do { \
+ lu_ref_del(&(res)->lr_reference, __FUNCTION__, cfs_current()); \
+} while (0)
+
struct ldlm_callback_suite {
ldlm_completion_callback lcs_completion;
ldlm_blocking_callback lcs_blocking;
ldlm_glimpse_callback lcs_glimpse;
+ ldlm_weigh_callback lcs_weigh;
};
/* ldlm_request.c */
int ldlm_expired_completion_wait(void *data);
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag);
int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
+int ldlm_completion_ast_async(struct ldlm_lock *lock, int flags, void *data);
int ldlm_completion_ast(struct ldlm_lock *lock, int flags, void *data);
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
ldlm_completion_callback completion,
ldlm_glimpse_callback glimpse,
void *data, __u32 lvb_len, void *lvb_swabber,
+ const __u64 *client_cookie,
struct lustre_handle *lockh);
int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
void *data, __u32 data_len);
ldlm_mode_t mode, int flags, void *opaque);
int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
int count, int flags);
-int ldlm_cli_join_lru(struct ldlm_namespace *,
- const struct ldlm_res_id *, int join);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
ldlm_policy_data_t *policy,
#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
#define IOC_LDLM_MAX_NR 43
+/**
+ * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
+ * than one lock_res is dead-lock safe.
+ */
+enum lock_res_type {
+ LRT_NORMAL,
+ LRT_NEW
+};
+
static inline void lock_res(struct ldlm_resource *res)
{
spin_lock(&res->lr_lock);
}
+static inline void lock_res_nested(struct ldlm_resource *res,
+ enum lock_res_type mode)
+{
+ spin_lock_nested(&res->lr_lock, mode);
+}
+
+
static inline void unlock_res(struct ldlm_resource *res)
{
spin_unlock(&res->lr_lock);