Whamcloud - gitweb
LU-9899 tests: mount client on MGS for tests with pools 06/28806/10
authorJames Nunez <james.a.nunez@intel.com>
Thu, 5 Oct 2017 19:51:50 +0000 (13:51 -0600)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 22 Nov 2017 03:55:21 +0000 (03:55 +0000)
When a Lustre file system has the MGS and MDS on separate
nodes, the file system must be mounted on the MGS to allow
OST pools to work properly.

There are several tests that deal with creating, adding
to and deleting OST pools. Create functions to mount and
unmount the file system on the MGS in test-framework and call
these functions when necessary for the Lustre test suites
conf-sanity test 82b, 103
sanity tests 27D, 200, 220, 406 and
replay-single test 85b.

Test-Parameters: combinedmdsmgs=false testlist=sanity,replay-single,conf-sanity

Signed-off-by: James Nunez <james.a.nunez@intel.com>
Change-Id: I4b3e331b8d1ea6c3f8c9ea8a571e26f66f4535f8
Reviewed-on: https://review.whamcloud.com/28806
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/tests/conf-sanity.sh
lustre/tests/replay-single.sh
lustre/tests/sanity.sh
lustre/tests/test-framework.sh

index 829f33a..6e272a1 100644 (file)
@@ -68,8 +68,8 @@ if ! combined_mgs_mds; then
        ALWAYS_EXCEPT="$ALWAYS_EXCEPT  43b     53b     54b"
        # bug number for skipped test: LU-9875 LU-9879 LU-9879 LU-9879 LU-9879
        ALWAYS_EXCEPT="$ALWAYS_EXCEPT  70e     80      84      87      100"
-       # bug number for skipped test: LU-8110 LU-9400 LU-9879 LU-9879 LU-9879
-       ALWAYS_EXCEPT="$ALWAYS_EXCEPT  102     103     104     105     107"
+       # bug number for skipped test: LU-8110 LU-9879 LU-9879 LU-9879
+       ALWAYS_EXCEPT="$ALWAYS_EXCEPT  102     104     105     107"
 fi
 
 # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
@@ -5921,6 +5921,9 @@ cleanup_82b() {
        # Remove OSTs from a pool and destroy the pool.
        destroy_pool $ost_pool || true
 
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
        restore_ostindex
 }
 
@@ -5960,6 +5963,10 @@ test_82b() { # LU-4665
        done
 
        mount_client $MOUNT || error "mount client $MOUNT failed"
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        wait_osts_up
        $LFS df $MOUNT || error "$LFS df $MOUNT failed"
        mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
@@ -7309,7 +7316,7 @@ test_renamefs() {
 
        echo "rename $FSNAME to $newname"
 
-       if [ ! combined_mgs_mds ]; then
+       if ! combined_mgs_mds ; then
                local facet=$(mgsdevname)
 
                do_facet mgs \
@@ -7381,6 +7388,9 @@ test_103() {
        cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
                error "(2) Fail to copy test-framework.sh"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
                error "(3) Fail to create $FSNAME.pool1"
        # name the pool name as the fsname
@@ -7392,6 +7402,9 @@ test_103() {
        $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 ||
                error "(6) Fail to setstripe on $DIR/$tdir/d0"
 
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
        KEEP_ZPOOL=true
        stopall
 
@@ -7401,6 +7414,9 @@ test_103() {
        FSNAME="mylustre"
        setupall
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        test_103_check_pool $save_fsname 7
 
        if [ $OSTCOUNT -ge 2 ]; then
@@ -7409,6 +7425,9 @@ test_103() {
 
        $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 ||
                error "(16) Fail to setstripe on $DIR/$tdir/f0"
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 
        stopall
 
@@ -7417,8 +7436,14 @@ test_103() {
        FSNAME="tfs"
        setupall
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        test_103_check_pool $save_fsname 17
 
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
        stopall
 
        test_renamefs $save_fsname
index 6cdcb39..c5602c4 100755 (executable)
@@ -3078,6 +3078,10 @@ run_test 85a "check the cancellation of unused locks during recovery(IBITS)"
 test_85b() { #bug 16774
        lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        create_pool $FSNAME.$TESTNAME ||
                error "unable to create pool $TESTNAME"
        do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $FSNAME-OST0000 ||
@@ -3115,6 +3119,10 @@ test_85b() { #bug 16774
        do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
                error "unable to destroy the pool $TESTNAME"
 
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
+
        if [ $count2 -ge $count ]; then
                error "unused locks are not canceled"
        fi
index 7c3285f..4859d9e 100755 (executable)
@@ -2059,6 +2059,10 @@ test_27D() {
        local ost_list=$(seq $first_ost $ost_step $last_ost)
        local ost_range="$first_ost $last_ost $ost_step"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        test_mkdir $DIR/$tdir
        pool_add $POOL || error "pool_add failed"
        pool_add_targets $POOL $ost_range || error "pool_add_targets failed"
@@ -2073,6 +2077,10 @@ test_27D() {
                error "llapi_layout_test failed"
 
        destroy_test_pools || error "destroy test pools failed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 27D "validate llapi_layout API"
 
@@ -12293,8 +12301,12 @@ test_200() {
        local test_path=$POOL_ROOT/$POOL_DIR_NAME
        local file_dir=$POOL_ROOT/file_tst
        local subdir=$test_path/subdir
-
        local rc=0
+
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        while : ; do
                # former test_200a test_200b
                pool_add $POOL                          || { rc=$? ; break; }
@@ -12314,7 +12326,7 @@ test_200() {
                pool_create_files $POOL $file_dir $files "$ost_list" \
                                                        || { rc=$? ; break; }
                # former test_200g test_200h
-               pool_lfs_df $POOL                       || { rc=$? ; break; }
+               pool_lfs_df $POOL                       || { rc=$? ; break; }
                pool_file_rel_path $POOL $test_path     || { rc=$? ; break; }
 
                # former test_201a test_201b test_201c
@@ -12322,11 +12334,15 @@ test_200() {
 
                local f=$test_path/$tfile
                pool_remove_all_targets $POOL $f        || { rc=$? ; break; }
-               pool_remove $POOL $f                    || { rc=$? ; break; }
+               pool_remove $POOL $f                    || { rc=$? ; break; }
                break
        done
 
        destroy_test_pools
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
        return $rc
 }
 run_test 200 "OST pools"
@@ -13004,6 +13020,10 @@ test_220() { #LU-325
 
        $LFS df -i
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_val=-1
        #define OBD_FAIL_OST_ENOINO              0x229
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_loc=0x229
@@ -13034,10 +13054,16 @@ test_220() { #LU-325
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_val=0
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_loc=0
 
-       do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $OST || return 4
-       do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME || return 5
+       do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $OST ||
+               error "$LCTL pool_remove $FSNAME.$TESTNAME $OST failed"
+       do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
+               error "$LCTL pool_destroy $FSNAME.$TESTNAME failed"
        echo "unlink $MDSOBJS files @$next_id..."
-       unlinkmany $DIR/$tdir/f $MDSOBJS || return 6
+       unlinkmany $DIR/$tdir/f $MDSOBJS || error "unlinkmany failed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 220 "preallocated MDS objects still used if ENOSPC from OST"
 
@@ -14559,6 +14585,9 @@ test_253() {
                        osp.$mdtosc_proc1.reserved_mb_low)
        echo "prev high watermark $last_wm_h, prev low watermark $last_wm_l"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        create_pool $FSNAME.$TESTNAME || error "Pool creation failed"
        do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $ost_name ||
                error "Adding $ost_name to pool failed"
@@ -14623,6 +14652,10 @@ test_253() {
                error "Remove $ost_name from pool failed"
        do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
                error "Pool destroy fialed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 253 "Check object allocation limit"
 
@@ -16762,8 +16795,11 @@ test_406() {
        local def_stripe_size=$($GETSTRIPE -S $MOUNT)
        local def_stripe_offset=$($GETSTRIPE -i $MOUNT)
        local def_pool=$($GETSTRIPE -p $MOUNT)
-
        local test_pool=$TESTNAME
+
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        pool_add $test_pool || error "pool_add failed"
        pool_add_targets $test_pool 0 $(($OSTCOUNT - 1)) 1 ||
                error "pool_add_targets failed"
@@ -16825,6 +16861,10 @@ test_406() {
        local f=$DIR/$tdir/$tfile
        pool_remove_all_targets $test_pool $f
        pool_remove $test_pool $f
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 406 "DNE support fs default striping"
 
index 34d8580..be82fec 100755 (executable)
@@ -1864,6 +1864,19 @@ zconf_umount() {
     fi
 }
 
+# Mount the file system on the MGS
+mount_mgs_client() {
+       do_facet mgs "mkdir -p $MOUNT"
+       zconf_mount $mgs_HOST $MOUNT $MOUNT_OPTS ||
+               error "unable to mount $MOUNT on MGS"
+}
+
+# Unmount the file system on the MGS
+umount_mgs_client() {
+       zconf_umount $mgs_HOST $MOUNT
+       do_facet mgs "rm -rf $MOUNT"
+}
+
 # nodes is comma list
 sanity_mount_check_nodes () {
     local nodes=$1