Whamcloud - gitweb
LU-17276 ldlm: add interval in flock 47/53447/11
authorYang Sheng <ys@whamcloud.com>
Wed, 13 Dec 2023 20:30:36 +0000 (04:30 +0800)
committerOleg Drokin <green@whamcloud.com>
Thu, 15 Feb 2024 07:08:04 +0000 (07:08 +0000)
Add necessary changes for using interval tree in flock.

Signed-off-by: Yang Sheng <ys@whamcloud.com>
Change-Id: I94c416b4215b863b54eccfe7025f2976fe40181a
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/53447
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Alex Zhuravlev <bzzz@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/lustre_dlm.h
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_resource.c

index a5a3404..f4c78ad 100644 (file)
@@ -730,6 +730,7 @@ struct ldlm_ibits_node {
 struct ldlm_flock_node {
        atomic_t                lfn_unlock_pending;
        bool                    lfn_needs_reprocess;
+       struct interval_node   *lfn_root;
 };
 
 /** Whether to track references to exports by LDLM locks. */
index c734f18..9ecafb6 100644 (file)
@@ -958,7 +958,8 @@ static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 
        ENTRY;
 
-       LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
+       LASSERT(lock->l_resource->lr_type == LDLM_EXTENT ||
+               lock->l_resource->lr_type == LDLM_FLOCK);
        OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
        if (node == NULL)
                RETURN(NULL);
@@ -982,7 +983,8 @@ void ldlm_interval_attach(struct ldlm_interval *n,
                          struct ldlm_lock *l)
 {
        LASSERT(l->l_tree_node == NULL);
-       LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
+       LASSERT(l->l_resource->lr_type == LDLM_EXTENT ||
+               l->l_resource->lr_type == LDLM_FLOCK);
 
        list_add_tail(&l->l_sl_policy, &n->li_group);
        l->l_tree_node = n;
index c2da61a..a8a4e41 100644 (file)
@@ -116,6 +116,24 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
                             &req->l_exp_flock_hash);
 }
 
+/** Remove cancelled lock from resource interval tree. */
+void ldlm_flock_unlink_lock(struct ldlm_lock *lock)
+{
+       struct ldlm_resource *res = lock->l_resource;
+       struct ldlm_interval *node = lock->l_tree_node;
+
+       if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
+               return;
+
+       node = ldlm_interval_detach(lock);
+       if (node) {
+               struct interval_node **root = &res->lr_flock_node.lfn_root;
+
+               interval_erase(&node->li_node, root);
+               ldlm_interval_free(node);
+       }
+}
+
 static inline void
 ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
 {
@@ -137,6 +155,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
                 */
                ldlm_lock_decref_internal_nolock(lock, mode);
        }
+       ldlm_flock_unlink_lock(lock);
 
        ldlm_lock_destroy_nolock(lock);
        EXIT;
@@ -264,6 +283,64 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
 }
 #endif /* HAVE_SERVER_SUPPORT */
 
+/** Add newly granted lock into interval tree for the resource. */
+static void ldlm_flock_add_lock(struct ldlm_resource *res,
+                               struct list_head *head,
+                               struct ldlm_lock *lock)
+{
+       struct interval_node *found, **root;
+       struct ldlm_interval *node = lock->l_tree_node;
+       struct ldlm_extent *extent = &lock->l_policy_data.l_extent;
+       int rc;
+
+       LASSERT(ldlm_is_granted(lock));
+
+       LASSERT(node != NULL);
+       LASSERT(!interval_is_intree(&node->li_node));
+
+       rc = interval_set(&node->li_node, extent->start, extent->end);
+       LASSERT(!rc);
+
+       root = &res->lr_flock_node.lfn_root;
+       found = interval_insert(&node->li_node, root);
+       if (found) { /* The same extent found. */
+               struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+
+               LASSERT(tmp != NULL);
+               ldlm_interval_free(tmp);
+               ldlm_interval_attach(to_ldlm_interval(found), lock);
+       }
+
+       /* Add the locks into list */
+       ldlm_resource_add_lock(res, head, lock);
+}
+
+static void
+ldlm_flock_range_update(struct ldlm_lock *lock, struct ldlm_lock *req)
+{
+       struct ldlm_resource *res = lock->l_resource;
+       struct interval_node *found, **root = &res->lr_flock_node.lfn_root;
+       struct ldlm_interval *node;
+       struct ldlm_extent *extent = &lock->l_policy_data.l_extent;
+
+       node = ldlm_interval_detach(lock);
+       if (!node) {
+               node = ldlm_interval_detach(req);
+               LASSERT(node);
+       } else {
+               interval_erase(&node->li_node, root);
+       }
+       interval_set(&node->li_node, extent->start, extent->end);
+
+       found = interval_insert(&node->li_node, root);
+       if (found) { /* The policy group found. */
+               ldlm_interval_free(node);
+               node = to_ldlm_interval(found);
+       }
+       ldlm_interval_attach(node, lock);
+       EXIT;
+}
+
 /**
  * Process a granting attempt for flock lock.
  * Must be called under ns lock held.
@@ -501,6 +578,7 @@ reprocess:
                    lock->l_policy_data.l_flock.end) {
                        lock->l_policy_data.l_flock.end =
                                new->l_policy_data.l_flock.start - 1;
+                       ldlm_flock_range_update(lock, req);
                        continue;
                }
 
@@ -561,7 +639,7 @@ reprocess:
                                                         lock->l_granted_mode);
 
                /* insert new2 at lock */
-               ldlm_resource_add_lock(res, &lock->l_res_link, new2);
+               ldlm_flock_add_lock(res, &lock->l_res_link, new2);
                LDLM_LOCK_RELEASE(new2);
                break;
        }
@@ -581,7 +659,7 @@ reprocess:
                 * lock for the next owner, or might not be a lock at
                 * all, but instead points at the head of the list
                 */
-               ldlm_resource_add_lock(res, &lock->l_res_link, req);
+               ldlm_flock_add_lock(res, &lock->l_res_link, req);
        }
 
        if (*flags != LDLM_FL_WAIT_NOREPROC) {
index dd90ea2..db03672 100644 (file)
@@ -215,6 +215,7 @@ int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
                            enum ldlm_error *err, struct list_head *work_list);
 int ldlm_init_flock_export(struct obd_export *exp);
 void ldlm_destroy_flock_export(struct obd_export *exp);
+void ldlm_flock_unlink_lock(struct ldlm_lock *lock);
 
 /* l_lock.c */
 void l_check_ns_lock(struct ldlm_namespace *ns);
index 7c0a486..8448819 100644 (file)
@@ -233,7 +233,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
                 if (lock->l_lvb_data != NULL)
                         OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
 
-               if (res->lr_type == LDLM_EXTENT) {
+               if (res->lr_type == LDLM_EXTENT || res->lr_type == LDLM_FLOCK) {
                        ldlm_interval_free(ldlm_interval_detach(lock));
                } else if (res->lr_type == LDLM_IBITS) {
                        if (lock->l_ibits_node != NULL)
@@ -1707,6 +1707,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
 
        switch (type) {
        case LDLM_EXTENT:
+       case LDLM_FLOCK:
                rc = ldlm_extent_alloc_lock(lock);
                break;
        case LDLM_IBITS:
index 2d09fdd..09cdaab 100644 (file)
@@ -1415,6 +1415,7 @@ static bool ldlm_resource_inodebits_new(struct ldlm_resource *res)
 static bool ldlm_resource_flock_new(struct ldlm_resource *res)
 {
        res->lr_flock_node.lfn_needs_reprocess = false;
+       res->lr_flock_node.lfn_root = NULL;
        atomic_set(&res->lr_flock_node.lfn_unlock_pending, 0);
 
        return true;
@@ -1709,6 +1710,9 @@ void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
        case LDLM_IBITS:
                ldlm_inodebits_unlink_lock(lock);
                break;
+       case LDLM_FLOCK:
+               ldlm_flock_unlink_lock(lock);
+               break;
        }
        list_del_init(&lock->l_res_link);
 }