* Internal structures per lock type..
*/
union {
- struct interval_node l_tree_node;
- struct ldlm_ibits_node *l_ibits_node;
+ /* LDLM_EXTENT locks only */
+ struct {
+ /* Originally requested extent for the extent lock. */
+ struct ldlm_extent l_req_extent;
+ struct interval_node l_tree_node;
+ };
+ /* LDLM_PLAIN and LDLM_IBITS locks */
+ struct {
+ /**
+ * Protected by lr_lock, linkages to "skip lists".
+ * For more explanations of skip lists see ldlm/ldlm_inodebits.c
+ */
+ struct list_head l_sl_mode;
+ struct list_head l_sl_policy;
+
+ struct ldlm_ibits_node *l_ibits_node;
+ };
+ /* LDLM_FLOCK locks */
+ struct {
+ struct interval_node l_tree_node_flock;
+ /**
+ * Per export hash of flock locks.
+ * Protected by per-bucket exp->exp_flock_hash locks.
+ */
+ struct hlist_node l_exp_flock_hash;
+ };
};
/**
* Per export hash of locks.
*/
struct hlist_node l_exp_hash;
/**
- * Per export hash of flock locks.
- * Protected by per-bucket exp->exp_flock_hash locks.
- */
- struct hlist_node l_exp_flock_hash;
- /**
* Requested mode.
* Protected by lr_lock.
*/
*/
ktime_t l_last_used;
- /** Originally requested extent for the extent lock. */
- struct ldlm_extent l_req_extent;
-
/*
* Client-side-only members.
*/
*/
struct ldlm_lock *l_blocking_lock;
- /**
- * Protected by lr_lock, linkages to "skip lists".
- * For more explanations of skip lists see ldlm/ldlm_inodebits.c
- */
- struct list_head l_sl_mode;
- struct list_head l_sl_policy;
-
/** Reference tracking structure to debug leaked locks. */
struct lu_ref l_reference;
#if LUSTRE_TRACKS_LOCK_EXP_REFS
struct ldlm_resource *res = lock->l_resource;
struct interval_node **root = &res->lr_flock_node.lfn_root;
- if (!interval_is_intree(&lock->l_tree_node)) /* duplicate unlink */
+ if (!interval_is_intree(&lock->l_tree_node_flock)) /* duplicate unlink */
return;
- interval_erase(&lock->l_tree_node, root);
+ interval_erase(&lock->l_tree_node_flock, root);
}
static inline void
LASSERT(ldlm_is_granted(lock));
- LASSERT(!interval_is_intree(&lock->l_tree_node));
+ LASSERT(!interval_is_intree(&lock->l_tree_node_flock));
- rc = interval_set(&lock->l_tree_node, extent->start, extent->end);
+ rc = interval_set(&lock->l_tree_node_flock, extent->start, extent->end);
LASSERT(!rc);
root = &res->lr_flock_node.lfn_root;
- interval_insert(&lock->l_tree_node, root);
+ interval_insert(&lock->l_tree_node_flock, root);
/* Add the locks into list */
ldlm_resource_add_lock(res, head, lock);
struct interval_node **root = &res->lr_flock_node.lfn_root;
struct ldlm_extent *extent = &lock->l_policy_data.l_extent;
- interval_erase(&lock->l_tree_node, root);
- interval_set(&lock->l_tree_node, extent->start, extent->end);
- interval_insert(&lock->l_tree_node, root);
+ interval_erase(&lock->l_tree_node_flock, root);
+ interval_set(&lock->l_tree_node_flock, extent->start, extent->end);
+ interval_insert(&lock->l_tree_node_flock, root);
EXIT;
}
INIT_LIST_HEAD(&lock->l_rk_ast);
init_waitqueue_head(&lock->l_waitq);
lock->l_blocking_lock = NULL;
- INIT_LIST_HEAD(&lock->l_sl_mode);
- INIT_LIST_HEAD(&lock->l_sl_policy);
+ switch (resource->lr_type) {
+ case LDLM_IBITS:
+ case LDLM_PLAIN:
+ INIT_LIST_HEAD(&lock->l_sl_mode);
+ INIT_LIST_HEAD(&lock->l_sl_policy);
+ break;
+ case LDLM_FLOCK:
+ INIT_HLIST_NODE(&lock->l_exp_flock_hash);
+ break;
+ case LDLM_EXTENT:
+ case LDLM_MAX_TYPE:
+ break;
+ }
INIT_HLIST_NODE(&lock->l_exp_hash);
- INIT_HLIST_NODE(&lock->l_exp_flock_hash);
lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
LDLM_NSS_LOCKS);