From d9869f52e96985ad66e97c4058a4eb40650c9d34 Mon Sep 17 00:00:00 2001 From: Sebastien Buisson Date: Thu, 22 Feb 2024 13:44:57 +0100 Subject: [PATCH] DDN-4630 sec: protect against concurrent mi_ginfo change With the INTERNAL upcall mechanism, we put in the upcall cache the groups received from the client, by appending them to a list built from previous requests. An existing entry is never modified once it is marked as VALID, it is replaced with a new one, with a larger groups list. However, the group info associated with an entry can change when updated from NEW to VALID. This means the number of groups can only grow from 0 (group info not set) to the current number of collected groups. In case of concurrent cache entry update, we need to check the group info and start over adding the groups associated with the current request. Fixes: 4515e5365f ("LU-17015 build: rework upcall cache") Signed-off-by: Sebastien Buisson Change-Id: Ie7088bdbfcae396602b59e2ab07fbfbbb14d96af Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/54146 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Oleg Drokin Reviewed-by: Andreas Dilger --- lustre/mdt/mdt_identity.c | 5 ++++- lustre/obdclass/upcall_cache_internal.c | 39 ++++++++++++++++++++++++--------- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/lustre/mdt/mdt_identity.c b/lustre/mdt/mdt_identity.c index e8a355b..a529db0 100644 --- a/lustre/mdt/mdt_identity.c +++ b/lustre/mdt/mdt_identity.c @@ -42,7 +42,10 @@ static void mdt_identity_entry_init(struct upcall_cache_entry *entry, void *unused) { - entry->u.identity.mi_uc_entry = entry; + struct md_identity *identity = &entry->u.identity; + + memset(identity, 0, sizeof(*identity)); + identity->mi_uc_entry = entry; } static void mdt_identity_entry_free(struct upcall_cache *cache, diff --git a/lustre/obdclass/upcall_cache_internal.c b/lustre/obdclass/upcall_cache_internal.c index 01dc37b..4690494 100644 --- a/lustre/obdclass/upcall_cache_internal.c +++ b/lustre/obdclass/upcall_cache_internal.c @@ -83,7 +83,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache, struct md_identity *identity; bool supp_in_ginfo[2]; gid_t *groups = NULL, *glist_p; - int groups_num, i, rc = 0; + int i, groups_num, ginfo_ngroups = 0, rc = 0; if (*grouplist || !uc || uc->uc_suppgids[0] == -1) /* grouplist already built, or no supp groups @@ -91,22 +91,27 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache, */ goto out; +restart: + groups_num = 0; + /* We just deal with NEW and VALID entries. Other states will + * be handled by the caller, no need to return an error. + */ + if (!UC_CACHE_IS_NEW(entry) && !UC_CACHE_IS_VALID(entry)) + goto out; + identity = &entry->u.identity; *fsgid = uc->uc_fsgid; supp_in_ginfo[0] = false; supp_in_ginfo[1] = (uc->uc_suppgids[1] == -1); - - if (!identity->mi_ginfo || !identity->mi_ginfo->ngroups) - groups_num = 0; - else - groups_num = identity->mi_ginfo->ngroups; + if (identity->mi_ginfo && identity->mi_ginfo->ngroups) + ginfo_ngroups = identity->mi_ginfo->ngroups; /* check if provided supp groups are already in cache */ for (i = 0; i < 2 && uc->uc_suppgids[i] != -1; i++) { if (unlikely(uc->uc_suppgids[i] == uc->uc_fsuid)) { /* Do not place user's group ID in group list */ supp_in_ginfo[i] = true; - } else if (groups_num) { + } else if (ginfo_ngroups) { atomic_inc(&identity->mi_ginfo->usage); supp_in_ginfo[i] = lustre_groups_search(identity->mi_ginfo, @@ -128,7 +133,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache, groups_num++; if (!supp_in_ginfo[1]) groups_num++; - CFS_ALLOC_PTR_ARRAY(groups, groups_num); + CFS_ALLOC_PTR_ARRAY(groups, groups_num + ginfo_ngroups); if (groups == NULL) GOTO(out, rc = -ENOMEM); @@ -138,10 +143,24 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache, *(glist_p++) = uc->uc_suppgids[i]; } - if (identity->mi_ginfo && identity->mi_ginfo->ngroups) { + /* An existing entry is never modified once it is marked as + * VALID. But it can change when updated from NEW to VALID, + * for instance the mi_ginfo can be set. This means the number + * of groups can only grow from 0 (mi_ginfo not set) to + * mi_ginfo->ngroups. + * So only copy mi_ginfo to the groups array if necessary space + * was allocated for it. + * In case we detect a concurrent change in mi_ginfo->ngroups, + * just start over. + */ + if (ginfo_ngroups) { atomic_inc(&identity->mi_ginfo->usage); lustre_list_from_groups(glist_p, identity->mi_ginfo); atomic_dec(&identity->mi_ginfo->usage); + } else if (identity->mi_ginfo && identity->mi_ginfo->ngroups) { + CFS_FREE_PTR_ARRAY(groups, groups_num + ginfo_ngroups); + groups = NULL; + goto restart; } if (!UC_CACHE_IS_NEW(entry)) { @@ -160,7 +179,7 @@ int upcall_cache_get_entry_internal(struct upcall_cache *cache, out: if (groups) { *grouplist = groups; - *ngroups = groups_num; + *ngroups = groups_num + ginfo_ngroups; } return rc; } -- 1.8.3.1