Whamcloud - gitweb
LU-16046 ldlm: group lock fix 38/48038/6
authorVitaly Fertman <c17818@cray.com>
Wed, 8 Jun 2022 20:05:45 +0000 (23:05 +0300)
committerOleg Drokin <green@whamcloud.com>
Sat, 15 Oct 2022 05:58:38 +0000 (05:58 +0000)
The original LU-9964 fix had a problem because with many pages in
memory grouplock unlock takes 10+ seconds just to discard them.

The current patch makes grouplock unlock asynchronous. It introduces
a logic similar to the original one, but on mdc/osc layer.

add a new test similar to sanity_244b but for DOM layout files.

HPE-bug-id: LUS-10644, LUS-10906
Signed-off-by: Vitaly Fertman <vitaly.fertman@hpe.com>
Change-Id: Ib6d6a3a41baff5b0161468abfd959f52e2a1b497
Reviewed-on: https://es-gerrit.dev.cray.com/159856
Reviewed-by: Andriy Skulysh <andriy.skulysh@hpe.com>
Reviewed-by: Alexander Boyko <c17825@cray.com>
Tested-by: Elena Gryaznova <c17455@cray.com>
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/48038
Reviewed-by: Alexander <alexander.boyko@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
lustre/include/lustre_osc.h
lustre/mdc/mdc_dev.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/tests/sanity.sh

index 8b574b8..f98bc68 100644 (file)
@@ -311,6 +311,11 @@ struct osc_object {
 
        const struct osc_object_operations *oo_obj_ops;
        bool                    oo_initialized;
+
+       wait_queue_head_t       oo_group_waitq;
+       struct mutex            oo_group_mutex;
+       __u64                   oo_group_users;
+       unsigned long           oo_group_gid;
 };
 
 static inline void osc_build_res_name(struct osc_object *osc,
@@ -648,6 +653,16 @@ int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
 int osc_object_find_cbdata(const struct lu_env *env, struct cl_object *obj,
                           ldlm_iterator_t iter, void *data);
 int osc_object_prune(const struct lu_env *env, struct cl_object *obj);
+void osc_grouplock_inc_locked(struct osc_object *osc, struct ldlm_lock *lock);
+void osc_grouplock_dec(struct osc_object *osc, struct ldlm_lock *lock);
+int osc_grouplock_enqueue_init(const struct lu_env *env,
+                              struct osc_object *obj,
+                              struct osc_lock *oscl,
+                              struct lustre_handle *lh);
+void osc_grouplock_enqueue_fini(const struct lu_env *env,
+                               struct osc_object *obj,
+                               struct osc_lock *oscl,
+                               struct lustre_handle *lh);
 
 /* osc_request.c */
 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd);
index 4f9e3cc..8813ff7 100644 (file)
@@ -339,6 +339,7 @@ static int mdc_dlm_canceling(const struct lu_env *env,
         * the object has been destroyed. */
        if (obj != NULL) {
                struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+               void *data;
 
                /* Destroy pages covered by the extent of the DLM lock */
                result = mdc_lock_flush(env, cl2osc(obj), cl_index(obj, 0),
@@ -348,12 +349,17 @@ static int mdc_dlm_canceling(const struct lu_env *env,
                 */
                /* losing a lock, update kms */
                lock_res_and_lock(dlmlock);
+               data = dlmlock->l_ast_data;
                dlmlock->l_ast_data = NULL;
                cl_object_attr_lock(obj);
                attr->cat_kms = 0;
                cl_object_attr_update(env, obj, attr, CAT_KMS);
                cl_object_attr_unlock(obj);
                unlock_res_and_lock(dlmlock);
+
+               /* Skip dec in case mdc_object_ast_clear() did it */
+               if (data && dlmlock->l_req_mode == LCK_GROUP)
+                       osc_grouplock_dec(cl2osc(obj), dlmlock);
                cl_object_put(env, obj);
        }
        RETURN(result);
@@ -464,7 +470,7 @@ void mdc_lock_lvb_update(const struct lu_env *env, struct osc_object *osc,
 }
 
 static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
-                            struct lustre_handle *lockh)
+                            struct lustre_handle *lockh, int errcode)
 {
        struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
        struct ldlm_lock *dlmlock;
@@ -516,6 +522,9 @@ static void mdc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
 
        LASSERT(oscl->ols_state != OLS_GRANTED);
        oscl->ols_state = OLS_GRANTED;
+
+       if (errcode != ELDLM_LOCK_MATCHED && dlmlock->l_req_mode == LCK_GROUP)
+               osc_grouplock_inc_locked(osc, dlmlock);
        EXIT;
 }
 
@@ -550,7 +559,7 @@ static int mdc_lock_upcall(void *cookie, struct lustre_handle *lockh,
 
        CDEBUG(D_INODE, "rc %d, err %d\n", rc, errcode);
        if (rc == 0)
-               mdc_lock_granted(env, oscl, lockh);
+               mdc_lock_granted(env, oscl, lockh, errcode);
 
        /* Error handling, some errors are tolerable. */
        if (oscl->ols_glimpse && rc == -ENAVAIL) {
@@ -844,9 +853,9 @@ int mdc_enqueue_send(const struct lu_env *env, struct obd_export *exp,
  *
  * This function does not wait for the network communication to complete.
  */
-static int mdc_lock_enqueue(const struct lu_env *env,
-                           const struct cl_lock_slice *slice,
-                           struct cl_io *unused, struct cl_sync_io *anchor)
+static int __mdc_lock_enqueue(const struct lu_env *env,
+                             const struct cl_lock_slice *slice,
+                             struct cl_io *unused, struct cl_sync_io *anchor)
 {
        struct osc_thread_info *info = osc_env_info(env);
        struct osc_io *oio = osc_env_io(env);
@@ -933,6 +942,28 @@ out:
        RETURN(result);
 }
 
+static int mdc_lock_enqueue(const struct lu_env *env,
+                           const struct cl_lock_slice *slice,
+                           struct cl_io *unused, struct cl_sync_io *anchor)
+{
+       struct osc_object *obj = cl2osc(slice->cls_obj);
+       struct osc_lock *oscl = cl2osc_lock(slice);
+       struct lustre_handle lh = { 0 };
+       int rc;
+
+       if (oscl->ols_cl.cls_lock->cll_descr.cld_mode == CLM_GROUP) {
+               rc = osc_grouplock_enqueue_init(env, obj, oscl, &lh);
+               if (rc < 0)
+                       return rc;
+       }
+
+       rc = __mdc_lock_enqueue(env, slice, unused, anchor);
+
+       if (oscl->ols_cl.cls_lock->cll_descr.cld_mode == CLM_GROUP)
+               osc_grouplock_enqueue_fini(env, obj, oscl, &lh);
+       return rc;
+}
+
 static const struct cl_lock_operations mdc_lock_lockless_ops = {
        .clo_fini = osc_lock_fini,
        .clo_enqueue = mdc_lock_enqueue,
@@ -973,8 +1004,6 @@ int mdc_lock_init(const struct lu_env *env, struct cl_object *obj,
 
        ols->ols_flags = flags;
        ols->ols_speculative = !!(enqflags & CEF_SPECULATIVE);
-       if (lock->cll_descr.cld_mode == CLM_GROUP)
-               ols->ols_flags |= LDLM_FL_ATOMIC_CB;
 
        if (ols->ols_flags & LDLM_FL_HAS_INTENT) {
                ols->ols_flags |= LDLM_FL_BLOCK_GRANTED;
@@ -1475,6 +1504,9 @@ static int mdc_object_ast_clear(struct ldlm_lock *lock, void *data)
                memcpy(lvb, &oinfo->loi_lvb, sizeof(oinfo->loi_lvb));
                cl_object_attr_unlock(&osc->oo_cl);
                ldlm_clear_lvb_cached(lock);
+
+               if (lock->l_req_mode == LCK_GROUP)
+                       osc_grouplock_dec(osc, lock);
        }
        RETURN(LDLM_ITER_CONTINUE);
 }
index 6453b31..eda6084 100644 (file)
@@ -202,7 +202,7 @@ void osc_lock_lvb_update(const struct lu_env *env,
 }
 
 static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
-                            struct lustre_handle *lockh)
+                            struct lustre_handle *lockh, int errcode)
 {
        struct osc_object *osc = cl2osc(oscl->ols_cl.cls_obj);
        struct ldlm_lock *dlmlock;
@@ -255,7 +255,129 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
 
        LASSERT(oscl->ols_state != OLS_GRANTED);
        oscl->ols_state = OLS_GRANTED;
+
+       if (errcode != ELDLM_LOCK_MATCHED && dlmlock->l_req_mode == LCK_GROUP)
+               osc_grouplock_inc_locked(osc, dlmlock);
+}
+
+void osc_grouplock_inc_locked(struct osc_object *osc, struct ldlm_lock *lock)
+{
+       LASSERT(lock->l_req_mode == LCK_GROUP);
+
+       if (osc->oo_group_users == 0)
+               osc->oo_group_gid = lock->l_policy_data.l_extent.gid;
+       osc->oo_group_users++;
+
+       LDLM_DEBUG(lock, "users %llu gid %llu\n",
+                  osc->oo_group_users,
+                  lock->l_policy_data.l_extent.gid);
+}
+EXPORT_SYMBOL(osc_grouplock_inc_locked);
+
+void osc_grouplock_dec(struct osc_object *osc, struct ldlm_lock *lock)
+{
+       LASSERT(lock->l_req_mode == LCK_GROUP);
+
+       mutex_lock(&osc->oo_group_mutex);
+
+       LASSERT(osc->oo_group_users > 0);
+       osc->oo_group_users--;
+       if (osc->oo_group_users == 0) {
+               osc->oo_group_gid = 0;
+               wake_up_all(&osc->oo_group_waitq);
+       }
+       mutex_unlock(&osc->oo_group_mutex);
+
+       LDLM_DEBUG(lock, "users %llu gid %lu\n",
+                  osc->oo_group_users, osc->oo_group_gid);
 }
+EXPORT_SYMBOL(osc_grouplock_dec);
+
+int osc_grouplock_enqueue_init(const struct lu_env *env,
+                              struct osc_object *obj,
+                              struct osc_lock *oscl,
+                              struct lustre_handle *lh)
+{
+       struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
+       int rc = 0;
+       ENTRY;
+
+       LASSERT(need->cld_mode == CLM_GROUP);
+
+       while (true) {
+               bool check_gid = true;
+
+               if (oscl->ols_flags & LDLM_FL_BLOCK_NOWAIT) {
+                       if (!mutex_trylock(&obj->oo_group_mutex))
+                               RETURN(-EAGAIN);
+               } else {
+                       mutex_lock(&obj->oo_group_mutex);
+               }
+
+               /**
+                * If a grouplock of the same gid already exists, match it
+                * here in advance. Otherwise, if that lock is being cancelled
+                * there is a chance to get 2 grouplocks for the same file.
+                */
+               if (obj->oo_group_users &&
+                   obj->oo_group_gid == need->cld_gid) {
+                       struct osc_thread_info *info = osc_env_info(env);
+                       struct ldlm_res_id *resname = &info->oti_resname;
+                       union ldlm_policy_data *policy = &info->oti_policy;
+                       struct cl_lock *lock = oscl->ols_cl.cls_lock;
+                       __u64 flags = oscl->ols_flags | LDLM_FL_BLOCK_GRANTED;
+                       struct ldlm_namespace *ns;
+                       enum ldlm_mode mode;
+
+                       ns = osc_export(obj)->exp_obd->obd_namespace;
+                       ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
+                       osc_lock_build_policy(env, lock, policy);
+                       mode = ldlm_lock_match(ns, flags, resname,
+                                              oscl->ols_einfo.ei_type, policy,
+                                              oscl->ols_einfo.ei_mode, lh);
+                       if (mode)
+                               oscl->ols_flags |= LDLM_FL_MATCH_LOCK;
+                       else
+                               check_gid = false;
+               }
+
+               /**
+                * If a grouplock exists but cannot be matched, let it to flush
+                * and wait just for zero users for now.
+                */
+               if (obj->oo_group_users == 0 ||
+                   (check_gid && obj->oo_group_gid == need->cld_gid))
+                       break;
+
+               mutex_unlock(&obj->oo_group_mutex);
+               if (oscl->ols_flags & LDLM_FL_BLOCK_NOWAIT)
+                       RETURN(-EAGAIN);
+
+               rc = l_wait_event_abortable(obj->oo_group_waitq,
+                                           !obj->oo_group_users);
+               if (rc)
+                       RETURN(rc);
+       }
+
+       RETURN(0);
+}
+EXPORT_SYMBOL(osc_grouplock_enqueue_init);
+
+void osc_grouplock_enqueue_fini(const struct lu_env *env,
+                               struct osc_object *obj,
+                               struct osc_lock *oscl,
+                               struct lustre_handle *lh)
+{
+       ENTRY;
+
+       LASSERT(oscl->ols_cl.cls_lock->cll_descr.cld_mode == CLM_GROUP);
+
+       /* If a user was added on enqueue_init, decref it */
+       if (lustre_handle_is_used(lh))
+               ldlm_lock_decref(lh, oscl->ols_einfo.ei_mode);
+       mutex_unlock(&obj->oo_group_mutex);
+}
+EXPORT_SYMBOL(osc_grouplock_enqueue_fini);
 
 /**
  * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
@@ -287,7 +409,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
        }
 
        if (rc == 0)
-               osc_lock_granted(env, oscl, lockh);
+               osc_lock_granted(env, oscl, lockh, errcode);
 
        /* Error handling, some errors are tolerable. */
        if (oscl->ols_glimpse && rc == -ENAVAIL) {
@@ -424,6 +546,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
                struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
                struct cl_attr *attr = &osc_env_info(env)->oti_attr;
                __u64 old_kms;
+               void *data;
 
                /* Destroy pages covered by the extent of the DLM lock */
                result = osc_lock_flush(cl2osc(obj),
@@ -435,6 +558,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
                lock_res_and_lock(dlmlock);
                /* clearing l_ast_data after flushing data,
                 * to let glimpse ast find the lock and the object */
+               data = dlmlock->l_ast_data;
                dlmlock->l_ast_data = NULL;
                cl_object_attr_lock(obj);
                /* Must get the value under the lock to avoid race. */
@@ -447,6 +571,9 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
                cl_object_attr_unlock(obj);
                unlock_res_and_lock(dlmlock);
 
+               /* Skip dec in case osc_object_ast_clear() did it */
+               if (data && dlmlock->l_req_mode == LCK_GROUP)
+                       osc_grouplock_dec(cl2osc(obj), dlmlock);
                cl_object_put(env, obj);
        }
        RETURN(result);
@@ -938,9 +1065,9 @@ EXPORT_SYMBOL(osc_lock_enqueue_wait);
  *
  * This function does not wait for the network communication to complete.
  */
-static int osc_lock_enqueue(const struct lu_env *env,
-                           const struct cl_lock_slice *slice,
-                           struct cl_io *unused, struct cl_sync_io *anchor)
+static int __osc_lock_enqueue(const struct lu_env *env,
+                             const struct cl_lock_slice *slice,
+                             struct cl_io *unused, struct cl_sync_io *anchor)
 {
        struct osc_thread_info          *info  = osc_env_info(env);
        struct osc_io                   *oio   = osc_env_io(env);
@@ -1060,6 +1187,29 @@ out:
        RETURN(result);
 }
 
+static int osc_lock_enqueue(const struct lu_env *env,
+                           const struct cl_lock_slice *slice,
+                           struct cl_io *unused, struct cl_sync_io *anchor)
+{
+       struct osc_object *obj = cl2osc(slice->cls_obj);
+       struct osc_lock *oscl = cl2osc_lock(slice);
+       struct lustre_handle lh = { 0 };
+       int rc;
+
+       if (oscl->ols_cl.cls_lock->cll_descr.cld_mode == CLM_GROUP) {
+               rc = osc_grouplock_enqueue_init(env, obj, oscl, &lh);
+               if (rc < 0)
+                       return rc;
+       }
+
+       rc = __osc_lock_enqueue(env, slice, unused, anchor);
+
+       if (oscl->ols_cl.cls_lock->cll_descr.cld_mode == CLM_GROUP)
+               osc_grouplock_enqueue_fini(env, obj, oscl, &lh);
+
+       return rc;
+}
+
 /**
  * Breaks a link between osc_lock and dlm_lock.
  */
index ea1d290..1fcf879 100644 (file)
@@ -88,6 +88,10 @@ int osc_object_init(const struct lu_env *env, struct lu_object *obj,
 
        atomic_set(&osc->oo_nr_ios, 0);
        init_waitqueue_head(&osc->oo_io_waitq);
+       init_waitqueue_head(&osc->oo_group_waitq);
+       mutex_init(&osc->oo_group_mutex);
+       osc->oo_group_users = 0;
+       osc->oo_group_gid = 0;
 
        LASSERT(osc->oo_obj_ops != NULL);
 
@@ -115,6 +119,7 @@ void osc_object_free(const struct lu_env *env, struct lu_object *obj)
        LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
        LASSERT(list_empty(&osc->oo_ol_list));
        LASSERT(atomic_read(&osc->oo_nr_ios) == 0);
+       LASSERT(osc->oo_group_users == 0);
 
        lu_object_fini(obj);
        /* osc doen't contain an lu_object_header, so we don't need call_rcu */
@@ -230,6 +235,17 @@ static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
                memcpy(lvb, &oinfo->loi_lvb, sizeof(oinfo->loi_lvb));
                cl_object_attr_unlock(&osc->oo_cl);
                ldlm_clear_lvb_cached(lock);
+
+               /**
+                * Object is being destroyed and gets unlinked from the lock,
+                * IO is finished and no cached data is left under the lock. As
+                * grouplock is immediately marked CBPENDING it is not reused.
+                * It will also be not possible to flush data later due to a
+                * NULL l_ast_data - enough conditions to let new grouplocks to
+                * be enqueued even if the lock still exists on client.
+                */
+               if (lock->l_req_mode == LCK_GROUP)
+                       osc_grouplock_dec(osc, lock);
        }
        RETURN(LDLM_ITER_CONTINUE);
 }
index ae0ded9..437d86a 100755 (executable)
@@ -43,7 +43,6 @@ always_except LU-6493  42b
 always_except LU-14541 277
 always_except LU-9054  312
 always_except LU-8411  407
-always_except LU-16046 244b
 
 if $SHARED_KEY; then
        always_except LU-14181 64e 64f
@@ -21345,14 +21344,13 @@ test_244a()
 }
 run_test 244a "sendfile with group lock tests"
 
-test_244b()
+test_grouplock_244()
 {
-       [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+       [ $PARALLEL == "yes" ] && skip "skip parallel run"
 
        local threads=50
        local size=$((1024*1024))
 
-       test_mkdir $DIR/$tdir
        for i in $(seq 1 $threads); do
                local file=$DIR/$tdir/file_$((i / 10))
                $MULTIOP $file OG1234w$size_$((i % 3))w$size_$((i % 4))g1234c &
@@ -21361,9 +21359,25 @@ test_244b()
        for i in $(seq 1 $threads); do
                wait ${pids[$i]}
        done
+
+}
+
+test_244b()
+{
+       test_mkdir $DIR/$tdir
+       $LFS setstripe -E 10M -E -1 -c 1 $DIR/$tdir
+       test_grouplock_244
 }
 run_test 244b "multi-threaded write with group lock"
 
+test_244c()
+{
+       test_mkdir $DIR/$tdir
+       $LFS setstripe -E 1M -L mdt -E -1 -c 1 $DIR/$tdir
+       test_grouplock_244
+}
+run_test 244c "multi-threaded write with group lock on DOM file"
+
 test_245a() {
        local flagname="multi_mod_rpcs"
        local connect_data_name="max_mod_rpcs"