From: Mr NeilBrown Date: Wed, 7 Feb 2024 05:21:48 +0000 (+1100) Subject: LU-17276 ldlm: convert flock locks to linux interval tree. X-Git-Tag: 2.15.64~71 X-Git-Url: https://git.whamcloud.com/gitweb?a=commitdiff_plain;h=1c635e263f0aaf4827cf69cbd48b7290efc9e085;p=fs%2Flustre-release.git LU-17276 ldlm: convert flock locks to linux interval tree. Convert to using the linux interval tree code. When the range of a lock is changed as part of adding or removing an overlapping range, the lock is removed and readded to the tree. Signed-off-by: Mr NeilBrown Change-Id: I747b625af1e83210b12daac5102600a3de173a2a Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/53950 Tested-by: Maloo Tested-by: jenkins Reviewed-by: Oleg Drokin Reviewed-by: Yang Sheng Reviewed-by: James Simmons --- diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 9628255..d02f1c6 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -721,7 +721,7 @@ struct ldlm_ibits_node { struct ldlm_flock_node { atomic_t lfn_unlock_pending; bool lfn_needs_reprocess; - struct interval_node *lfn_root; + struct interval_tree_root lfn_root; }; /** Whether to track references to exports by LDLM locks. */ @@ -834,12 +834,15 @@ struct ldlm_lock { }; /* LDLM_FLOCK locks */ struct { - struct interval_node l_tree_node_flock; /** * Per export hash of flock locks. * Protected by per-bucket exp->exp_flock_hash locks. */ struct hlist_node l_exp_flock_hash; + struct ldlm_lock *l_same_owner; + /* interval tree */ + struct rb_node l_fl_rb; + u64 l_fl_subtree_last; }; }; /** diff --git a/lustre/ldlm/ldlm_flock.c b/lustre/ldlm/ldlm_flock.c index a289ee0..964e436 100644 --- a/lustre/ldlm/ldlm_flock.c +++ b/lustre/ldlm/ldlm_flock.c @@ -61,6 +61,12 @@ #include #include "ldlm_internal.h" +#include + +#define START(node) ((node)->l_policy_data.l_flock.start) +#define LAST(node) ((node)->l_policy_data.l_flock.end) +INTERVAL_TREE_DEFINE(struct ldlm_lock, l_fl_rb, u64, l_fl_subtree_last, + START, LAST, static inline, flock); int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, void *data, int flag); @@ -116,16 +122,16 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req) &req->l_exp_flock_hash); } -/** Remove cancelled lock from resource interval tree. */ +/* Remove cancelled lock from resource interval tree. */ void ldlm_flock_unlink_lock(struct ldlm_lock *lock) { struct ldlm_resource *res = lock->l_resource; - struct interval_node **root = &res->lr_flock_node.lfn_root; - if (!interval_is_intree(&lock->l_tree_node_flock)) /* duplicate unlink */ + if (RB_EMPTY_NODE(&lock->l_fl_rb)) /* duplicate unlink */ return; - interval_erase(&lock->l_tree_node_flock, root); + flock_remove(lock, &res->lr_flock_node.lfn_root); + RB_CLEAR_NODE(&lock->l_fl_rb); } static inline void @@ -275,41 +281,31 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock, } #endif /* HAVE_SERVER_SUPPORT */ -/** Add newly granted lock into interval tree for the resource. */ -static void ldlm_flock_add_lock(struct ldlm_resource *res, - struct list_head *head, - struct ldlm_lock *lock) +/* Add newly granted lock into interval tree for the resource */ +void ldlm_flock_add_lock(struct ldlm_resource *res, + struct list_head *head, + struct ldlm_lock *lock) { - struct interval_node **root; - struct ldlm_extent *extent = &lock->l_policy_data.l_extent; - int rc; LASSERT(ldlm_is_granted(lock)); + LASSERT(RB_EMPTY_NODE(&lock->l_fl_rb)); - LASSERT(!interval_is_intree(&lock->l_tree_node_flock)); - - rc = interval_set(&lock->l_tree_node_flock, extent->start, extent->end); - LASSERT(!rc); + flock_insert(lock, &res->lr_flock_node.lfn_root); - root = &res->lr_flock_node.lfn_root; - interval_insert(&lock->l_tree_node_flock, root); - - /* Add the locks into list */ ldlm_resource_add_lock(res, head, lock); } static void -ldlm_flock_range_update(struct ldlm_lock *lock, struct ldlm_lock *req) +ldlm_flock_range_update(struct ldlm_lock *lock, u64 start, u64 end) { struct ldlm_resource *res = lock->l_resource; - struct interval_node **root = &res->lr_flock_node.lfn_root; - struct ldlm_extent *extent = &lock->l_policy_data.l_extent; - - interval_erase(&lock->l_tree_node_flock, root); - interval_set(&lock->l_tree_node_flock, extent->start, extent->end); - interval_insert(&lock->l_tree_node_flock, root); - EXIT; + if (!RB_EMPTY_NODE(&lock->l_fl_rb)) + flock_remove(lock, &res->lr_flock_node.lfn_root); + START(lock) = start; + LAST(lock) = end; + if (!RB_EMPTY_NODE(&lock->l_fl_rb)) + flock_insert(lock, &res->lr_flock_node.lfn_root); } /** @@ -363,6 +359,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, } reprocess: + if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) { /* This loop determines where this processes locks start * in the resource lr_granted list. @@ -508,22 +505,16 @@ reprocess: break; if (new->l_policy_data.l_flock.start < - lock->l_policy_data.l_flock.start) { - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.start; - } else { - new->l_policy_data.l_flock.start = - lock->l_policy_data.l_flock.start; - } + lock->l_policy_data.l_flock.start) + ldlm_flock_range_update(lock, START(new), LAST(lock)); + else + ldlm_flock_range_update(new, START(lock), LAST(new)); if (new->l_policy_data.l_flock.end > - lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.end = - new->l_policy_data.l_flock.end; - } else { - new->l_policy_data.l_flock.end = - lock->l_policy_data.l_flock.end; - } + lock->l_policy_data.l_flock.end) + ldlm_flock_range_update(lock, START(lock), LAST(new)); + else + ldlm_flock_range_update(new, START(new), LAST(lock)); if (added) { ldlm_flock_destroy(lock, mode, *flags); @@ -548,8 +539,7 @@ reprocess: lock->l_policy_data.l_flock.start) { if (new->l_policy_data.l_flock.end < lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.end + 1; + ldlm_flock_range_update(lock, LAST(new)+1, LAST(lock)); break; } ldlm_flock_destroy(lock, lock->l_req_mode, *flags); @@ -557,9 +547,7 @@ reprocess: } if (new->l_policy_data.l_flock.end >= lock->l_policy_data.l_flock.end) { - lock->l_policy_data.l_flock.end = - new->l_policy_data.l_flock.start - 1; - ldlm_flock_range_update(lock, req); + ldlm_flock_range_update(lock, START(lock), LAST(new) - 1); continue; } @@ -603,8 +591,7 @@ reprocess: lock->l_policy_data.l_flock.start; new2->l_policy_data.l_flock.end = new->l_policy_data.l_flock.start - 1; - lock->l_policy_data.l_flock.start = - new->l_policy_data.l_flock.end + 1; + ldlm_flock_range_update(lock, LAST(new) + 1, LAST(lock)); new2->l_conn_export = lock->l_conn_export; if (lock->l_export != NULL) { new2->l_export = class_export_lock_get(lock->l_export, diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index 71e2dcc..88131aa 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -220,6 +220,8 @@ int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, enum ldlm_error *err, struct list_head *work_list); int ldlm_init_flock_export(struct obd_export *exp); void ldlm_destroy_flock_export(struct obd_export *exp); +void ldlm_flock_add_lock(struct ldlm_resource *req, struct list_head *head, + struct ldlm_lock *lock); void ldlm_flock_unlink_lock(struct ldlm_lock *lock); /* l_lock.c */ diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index d28a653..57f7b03 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -481,6 +481,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) break; case LDLM_FLOCK: INIT_HLIST_NODE(&lock->l_exp_flock_hash); + RB_CLEAR_NODE(&lock->l_fl_rb); break; case LDLM_EXTENT: RB_CLEAR_NODE(&lock->l_rb); @@ -1172,7 +1173,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock)) RETURN_EXIT; - ldlm_resource_add_lock(res, &res->lr_granted, lock); + ldlm_flock_add_lock(res, &res->lr_granted, lock); } else { LBUG(); } diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 420cc62..f6997f5 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -1419,7 +1419,7 @@ static bool ldlm_resource_inodebits_new(struct ldlm_resource *res) static bool ldlm_resource_flock_new(struct ldlm_resource *res) { res->lr_flock_node.lfn_needs_reprocess = false; - res->lr_flock_node.lfn_root = NULL; + res->lr_flock_node.lfn_root = INTERVAL_TREE_ROOT; atomic_set(&res->lr_flock_node.lfn_unlock_pending, 0); return true;