Whamcloud - gitweb
LU-17034 tests: memory corruption in PQ
authorSergey Cheremencev <scherementsev@ddn.com>
Thu, 31 Aug 2023 13:11:52 +0000 (17:11 +0400)
committerAndreas Dilger <adilger@whamcloud.com>
Fri, 1 Sep 2023 13:16:51 +0000 (13:16 +0000)
Add conf-sanity_33c to test that there is no
memory corruption in PQ. The test uses OST
with index 0x7c6 to cause access out of
lqeg_arr which size is 64 by default.

Test-Parameters: trivial testlist=conf-sanity env=ONLY=33c
Signed-off-by: Sergey Cheremencev <scherementsev@ddn.com>
Change-Id: I401ce80b86701ff611df5f7078b6aecad147d6db
Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/52198
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
lustre/tests/conf-sanity.sh
lustre/tests/test-framework.sh

index 0190e7d..79fa3b1 100644 (file)
@@ -2621,6 +2621,73 @@ test_33b() {     # was test_34
 }
 run_test 33b "Drop cancel during umount"
 
+test_33c() {
+       #local FSNAME2=test-$testnum
+       local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+       local tstid=${TSTID:-"$(id -u $TSTUSR)"}
+       local mkfsoptions
+       local qpool="qpool1"
+
+       [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
+
+       local fs2mdsdev=$(mdsdevname 1_2)
+       local fs2ostdev=$(ostdevname 1_2)
+       local fs2mdsvdev=$(mdsvdevname 1_2)
+       local fs2ostvdev=$(ostvdevname 1_2)
+
+       if [ "$mds1_FSTYPE" == ldiskfs ]; then
+               mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
+       fi
+
+       if combined_mgs_mds; then
+               local mgs_flag="--mgs"
+       fi
+
+       add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --fsname=${FSNAME} \
+               --reformat $mgs_flag $mkfsoptions $fs2mdsdev $fs2mdsvdev ||
+               exit 10
+       add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \
+               --fsname=${FSNAME} --index=0x7c6 --reformat $fs2ostdev \
+               $fs2ostvdev || exit 10
+
+
+       start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT
+       start fs2ost $fs2ostdev $OST_MOUNT_OPTS
+
+       mount_client $MOUNT || error "client start failed"
+       mkdir_on_mdt0 $DIR/$tdir || "cannot create $DIR/$tdir"
+       chmod 0777 $DIR/$tdir || "chown failed"
+       if [[ $PERM_CMD == *"set_param -P"* ]]; then
+               do_facet mgs $PERM_CMD \
+                       osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE
+       else
+               do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
+                       error "set ost quota type failed"
+       fi
+
+       pool_add $qpool || error "pool_add failed"
+       pool_add_targets $qpool 0x7c6
+
+       $LFS setquota -u $tstid -B20M -b 0 $MOUNT
+       $LFS setquota -g $tstid -B20M -b 0 $MOUNT
+       $LFS setquota -u $tstid -B20M -b 0 --pool $qpool $MOUNT
+       $LFS setquota -g $tstid -B20M -b 0 --pool $qpool $MOUNT
+
+       for i in {1..10}; do
+               runas -u $tstid -g $tstid dd if=/dev/zero of=$DIR/$tdir/f1 \
+                       bs=1M count=30 oflag=direct
+               sleep 3
+               rm -f $DIR/$tdir/f1
+       done
+
+       destroy_pools
+       umount_client $MOUNT || error "client start failed"
+       stop fs2ost -f
+       stop fs2mds -f
+       cleanup_nocli || error "cleanup_nocli failed with $?"
+}
+run_test 33c "Mount ost with a large index number"
+
 test_34a() {
        setup
        do_facet client "bash runmultiop_bg_pause $DIR/file O_c"
index f67c2d3..f4ebddf 100755 (executable)
@@ -9422,6 +9422,7 @@ pool_add_targets() {
 
        if [ -z $last ]; then
                local list=$first
+               last=$first
        else
                local list=$(seq $first $step $last)
        fi