Whamcloud - gitweb
DDN-4630 sec: protect against concurrent mi_ginfo change
authorSebastien Buisson <sbuisson@ddn.com>
Thu, 22 Feb 2024 12:44:57 +0000 (13:44 +0100)
committerAndreas Dilger <adilger@whamcloud.com>
Sat, 24 Feb 2024 03:47:28 +0000 (03:47 +0000)
With the INTERNAL upcall mechanism, we put in the upcall cache the
groups received from the client, by appending them to a list built
from previous requests.
An existing entry is never modified once it is marked as VALID, it is
replaced with a new one, with a larger groups list. However, the group
info associated with an entry can change when updated from NEW to
VALID. This means the number of groups can only grow from 0 (group
info not set) to the current number of collected groups.
In case of concurrent cache entry update, we need to check the group
info and start over adding the groups associated with the current
request.

Fixes: 4515e5365f ("LU-17015 build: rework upcall cache")
Signed-off-by: Sebastien Buisson <sbuisson@ddn.com>
Change-Id: Ie7088bdbfcae396602b59e2ab07fbfbbb14d96af
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/54146
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
lustre/mdt/mdt_identity.c
lustre/obdclass/upcall_cache_internal.c

index e8a355b..a529db0 100644 (file)
 static void mdt_identity_entry_init(struct upcall_cache_entry *entry,
                                    void *unused)
 {
-       entry->u.identity.mi_uc_entry = entry;
+       struct md_identity *identity = &entry->u.identity;
+
+       memset(identity, 0, sizeof(*identity));
+       identity->mi_uc_entry = entry;
 }
 
 static void mdt_identity_entry_free(struct upcall_cache *cache,
index 01dc37b..4690494 100644 (file)
@@ -83,7 +83,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache,
        struct md_identity *identity;
        bool supp_in_ginfo[2];
        gid_t *groups = NULL, *glist_p;
-       int groups_num, i, rc = 0;
+       int i, groups_num, ginfo_ngroups = 0, rc = 0;
 
        if (*grouplist || !uc || uc->uc_suppgids[0] == -1)
                /* grouplist already built, or no supp groups
@@ -91,22 +91,27 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache,
                 */
                goto out;
 
+restart:
+       groups_num = 0;
+       /* We just deal with NEW and VALID entries. Other states will
+        * be handled by the caller, no need to return an error.
+        */
+       if (!UC_CACHE_IS_NEW(entry) && !UC_CACHE_IS_VALID(entry))
+               goto out;
+
        identity = &entry->u.identity;
        *fsgid = uc->uc_fsgid;
        supp_in_ginfo[0] = false;
        supp_in_ginfo[1] = (uc->uc_suppgids[1] == -1);
-
-       if (!identity->mi_ginfo || !identity->mi_ginfo->ngroups)
-               groups_num = 0;
-       else
-               groups_num = identity->mi_ginfo->ngroups;
+       if (identity->mi_ginfo && identity->mi_ginfo->ngroups)
+               ginfo_ngroups = identity->mi_ginfo->ngroups;
 
        /* check if provided supp groups are already in cache */
        for (i = 0; i < 2 && uc->uc_suppgids[i] != -1; i++) {
                if (unlikely(uc->uc_suppgids[i] == uc->uc_fsuid)) {
                        /* Do not place user's group ID in group list */
                        supp_in_ginfo[i] = true;
-               } else if (groups_num) {
+               } else if (ginfo_ngroups) {
                        atomic_inc(&identity->mi_ginfo->usage);
                        supp_in_ginfo[i] =
                                lustre_groups_search(identity->mi_ginfo,
@@ -128,7 +133,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache,
                        groups_num++;
                if (!supp_in_ginfo[1])
                        groups_num++;
-               CFS_ALLOC_PTR_ARRAY(groups, groups_num);
+               CFS_ALLOC_PTR_ARRAY(groups, groups_num + ginfo_ngroups);
                if (groups == NULL)
                        GOTO(out, rc = -ENOMEM);
 
@@ -138,10 +143,24 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache,
                                *(glist_p++) = uc->uc_suppgids[i];
                }
 
-               if (identity->mi_ginfo && identity->mi_ginfo->ngroups) {
+               /* An existing entry is never modified once it is marked as
+                * VALID. But it can change when updated from NEW to VALID,
+                * for instance the mi_ginfo can be set. This means the number
+                * of groups can only grow from 0 (mi_ginfo not set) to
+                * mi_ginfo->ngroups.
+                * So only copy mi_ginfo to the groups array if necessary space
+                * was allocated for it.
+                * In case we detect a concurrent change in mi_ginfo->ngroups,
+                * just start over.
+                */
+               if (ginfo_ngroups) {
                        atomic_inc(&identity->mi_ginfo->usage);
                        lustre_list_from_groups(glist_p, identity->mi_ginfo);
                        atomic_dec(&identity->mi_ginfo->usage);
+               } else if (identity->mi_ginfo && identity->mi_ginfo->ngroups) {
+                       CFS_FREE_PTR_ARRAY(groups, groups_num + ginfo_ngroups);
+                       groups = NULL;
+                       goto restart;
                }
 
                if (!UC_CACHE_IS_NEW(entry)) {
@@ -160,7 +179,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache,
 out:
        if (groups) {
                *grouplist = groups;
-               *ngroups = groups_num;
+               *ngroups = groups_num + ginfo_ngroups;
        }
        return rc;
 }