Whamcloud - gitweb
LU-14739 quota: nodemap squashed root cannot bypass quota
authorSebastien Buisson <sbuisson@ddn.com>
Fri, 11 Jun 2021 14:49:47 +0000 (16:49 +0200)
committerAndreas Dilger <adilger@whamcloud.com>
Wed, 10 Nov 2021 15:58:59 +0000 (15:58 +0000)
When root on client is squashed via a nodemap's squash_uid/squash_gid,
its IOs must not bypass quota enforcement as it normally does without
squashing.
So on client side, do not set OBD_BRW_FROM_GRANT for every page being
used by root. And on server side, check if root is squashed via a
nodemap and remove OBD_BRW_NOQUOTA.

Lustre-change: https://review.whamcloud.com/43988
Lustre-commit: a4fbe7341baf12c00c6048bb290f8aa26c05cbac

Signed-off-by: Sebastien Buisson <sbuisson@ddn.com>
Change-Id: I95b31277273589e363193cba8b84870f008bb07a
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-on: https://review.whamcloud.com/45485
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
lustre/ofd/ofd_io.c
lustre/osc/osc_cache.c
lustre/tests/sanity-quota.sh

index a3c2ac3..85fea0b 100644 (file)
@@ -1456,6 +1456,26 @@ int ofd_commitrw(const struct lu_env *env, int cmd, struct obd_export *exp,
 
        if (cmd == OBD_BRW_WRITE) {
                struct lu_nodemap *nodemap;
+               __u32 mapped_uid, mapped_gid;
+
+               nodemap = nodemap_get_from_exp(exp);
+               mapped_uid = nodemap_map_id(nodemap, NODEMAP_UID,
+                                           NODEMAP_FS_TO_CLIENT,
+                                           oa->o_uid);
+               mapped_gid = nodemap_map_id(nodemap, NODEMAP_GID,
+                                           NODEMAP_FS_TO_CLIENT,
+                                           oa->o_gid);
+
+               if (!IS_ERR_OR_NULL(nodemap)) {
+                       /* do not bypass quota enforcement if squashed uid */
+                       if (unlikely(mapped_uid == nodemap->nm_squash_uid)) {
+                               int idx;
+
+                               for (idx = 0; idx < npages; idx++)
+                                       lnb[idx].lnb_flags &= ~OBD_BRW_NOQUOTA;
+                       }
+                       nodemap_putref(nodemap);
+               }
 
                valid = OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLPROJID |
                        OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
@@ -1520,16 +1540,8 @@ int ofd_commitrw(const struct lu_env *env, int cmd, struct obd_export *exp,
                /* Convert back to client IDs. LU-9671.
                 * nodemap_get_from_exp() may fail due to nodemap deactivated,
                 * server ID will be returned back to client in that case. */
-               nodemap = nodemap_get_from_exp(exp);
-               if (nodemap != NULL && !IS_ERR(nodemap)) {
-                       oa->o_uid = nodemap_map_id(nodemap, NODEMAP_UID,
-                                                  NODEMAP_FS_TO_CLIENT,
-                                                  oa->o_uid);
-                       oa->o_gid = nodemap_map_id(nodemap, NODEMAP_GID,
-                                                  NODEMAP_FS_TO_CLIENT,
-                                                  oa->o_gid);
-                       nodemap_putref(nodemap);
-               }
+               oa->o_uid = mapped_uid;
+               oa->o_gid = mapped_gid;
        } else if (cmd == OBD_BRW_READ) {
                rc = ofd_commitrw_read(env, ofd, fid, objcount,
                                       npages, lnb);
index 696cb44..20290cb 100644 (file)
@@ -2336,7 +2336,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
        }
 
        /* check if the file's owner/group is over quota */
-       if (!(cmd & OBD_BRW_NOQUOTA)) {
+       if (!io->ci_noquota) {
                struct cl_object *obj;
                struct cl_attr   *attr;
                unsigned int qid[LL_MAXQUOTAS];
index 93c6136..9256dc7 100755 (executable)
@@ -4908,6 +4908,71 @@ function cleanup_quota_test_75()
        cleanup_quota_test
 }
 
+test_75()
+{
+       local limit=10 # MB
+       local testfile="$DIR/$tdir/$tfile-0"
+
+       setup_quota_test || error "setup quota failed with $?"
+       stack_trap cleanup_quota_test_75 EXIT
+
+       # enable ost quota
+       set_ost_qtype $QTYPE || error "enable ost quota failed"
+
+       # test for user
+       log "User $TSTUSR quota block hardlimit:$limit MB"
+       $LFS setquota -u $TSTID -b 0 -B ${limit}M -i 0 -I 0 $DIR ||
+               error "set user quota failed"
+
+       # make sure the system is clean
+       local used=$(getquota -u $TSTID global curspace)
+       [ $used -ne 0 ] && error "Used space ($used) for user $TSTUSR not 0."
+
+       chmod 777 $DIR/$tdir || error "chmod 777 $DIR/$tdir failed"
+
+       do_facet mgs $LCTL nodemap_activate 1
+       wait_nm_sync active
+       do_facet mgs $LCTL nodemap_modify --name default \
+               --property admin --value 0
+       do_facet mgs $LCTL nodemap_modify --name default \
+               --property trusted --value 0
+       do_facet mgs $LCTL nodemap_modify --name default \
+               --property deny_unknown --value 0
+       do_facet mgs $LCTL nodemap_modify --name default \
+               --property squash_uid --value $TSTID
+       do_facet mgs $LCTL nodemap_modify --name default \
+               --property squash_gid --value $TSTID
+       cancel_lru_locks mdc
+       wait_nm_sync default admin_nodemap
+       wait_nm_sync default trusted_nodemap
+       wait_nm_sync default squash_uid
+
+       log "Write..."
+       $DD of=$testfile bs=1M count=$((limit/2)) ||
+               quota_error u $TSTID \
+                       "root write failure, but expect success"
+
+       log "Write out of block quota ..."
+       # possibly a cache write, ignore failure
+       $DD of=$testfile bs=1M count=$((limit/2)) seek=$((limit/2)) || true
+       # flush cache, ensure noquota flag is set on client
+       cancel_lru_locks osc
+       sync; sync_all_data || true
+       # sync forced cache flush, but did not guarantee that slave
+       # got new edquot through glimpse, so wait to make sure
+       sleep 5
+       $DD of=$testfile bs=1M count=1 seek=$limit conv=fsync &&
+               quota_error u $TSTID \
+                       "user write success, but expect EDQUOT"
+       rm -f $testfile
+       wait_delete_completed || error "wait_delete_completed failed"
+       sync_all_data || true
+       used=$(getquota -u $TSTUSR global curspace)
+       [ $used -eq 0 ] || quota_error u $TSTID \
+               "user quota not released after deletion"
+}
+run_test 75 "nodemap squashed root respects quota enforcement"
+
 test_76() {
        ! is_project_quota_supported &&
                skip "skip project quota unsupported"