Whamcloud - gitweb
LU-9899 tests: mount client on MGS for tests with pools
[fs/lustre-release.git] / lustre / tests / sanity.sh
index c9101d4..4859d9e 100755 (executable)
@@ -2059,6 +2059,10 @@ test_27D() {
        local ost_list=$(seq $first_ost $ost_step $last_ost)
        local ost_range="$first_ost $last_ost $ost_step"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        test_mkdir $DIR/$tdir
        pool_add $POOL || error "pool_add failed"
        pool_add_targets $POOL $ost_range || error "pool_add_targets failed"
@@ -2073,6 +2077,10 @@ test_27D() {
                error "llapi_layout_test failed"
 
        destroy_test_pools || error "destroy test pools failed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 27D "validate llapi_layout API"
 
@@ -12293,8 +12301,12 @@ test_200() {
        local test_path=$POOL_ROOT/$POOL_DIR_NAME
        local file_dir=$POOL_ROOT/file_tst
        local subdir=$test_path/subdir
-
        local rc=0
+
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        while : ; do
                # former test_200a test_200b
                pool_add $POOL                          || { rc=$? ; break; }
@@ -12314,7 +12326,7 @@ test_200() {
                pool_create_files $POOL $file_dir $files "$ost_list" \
                                                        || { rc=$? ; break; }
                # former test_200g test_200h
-               pool_lfs_df $POOL                       || { rc=$? ; break; }
+               pool_lfs_df $POOL                       || { rc=$? ; break; }
                pool_file_rel_path $POOL $test_path     || { rc=$? ; break; }
 
                # former test_201a test_201b test_201c
@@ -12322,11 +12334,15 @@ test_200() {
 
                local f=$test_path/$tfile
                pool_remove_all_targets $POOL $f        || { rc=$? ; break; }
-               pool_remove $POOL $f                    || { rc=$? ; break; }
+               pool_remove $POOL $f                    || { rc=$? ; break; }
                break
        done
 
        destroy_test_pools
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
        return $rc
 }
 run_test 200 "OST pools"
@@ -13004,6 +13020,10 @@ test_220() { #LU-325
 
        $LFS df -i
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
+
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_val=-1
        #define OBD_FAIL_OST_ENOINO              0x229
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_loc=0x229
@@ -13034,10 +13054,16 @@ test_220() { #LU-325
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_val=0
        do_facet ost$((OSTIDX + 1)) lctl set_param fail_loc=0
 
-       do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $OST || return 4
-       do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME || return 5
+       do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $OST ||
+               error "$LCTL pool_remove $FSNAME.$TESTNAME $OST failed"
+       do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
+               error "$LCTL pool_destroy $FSNAME.$TESTNAME failed"
        echo "unlink $MDSOBJS files @$next_id..."
-       unlinkmany $DIR/$tdir/f $MDSOBJS || return 6
+       unlinkmany $DIR/$tdir/f $MDSOBJS || error "unlinkmany failed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 220 "preallocated MDS objects still used if ENOSPC from OST"
 
@@ -13209,8 +13235,6 @@ test_225b () {
              skip_env "Need to mount OST to test" && return
        fi
 
-       [ $MDSCOUNT -ge 2 ] &&
-               skip "skipping now for more than one MDT" && return
        local mds=$(facet_host $SINGLEMDS)
        local target=$(do_nodes $mds 'lctl dl' | \
                       awk "{if (\$2 == \"UP\" && \$3 == \"mdt\") {print \$4}}")
@@ -14561,6 +14585,9 @@ test_253() {
                        osp.$mdtosc_proc1.reserved_mb_low)
        echo "prev high watermark $last_wm_h, prev low watermark $last_wm_l"
 
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        create_pool $FSNAME.$TESTNAME || error "Pool creation failed"
        do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $ost_name ||
                error "Adding $ost_name to pool failed"
@@ -14625,6 +14652,10 @@ test_253() {
                error "Remove $ost_name from pool failed"
        do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
                error "Pool destroy fialed"
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 253 "Check object allocation limit"
 
@@ -16764,8 +16795,11 @@ test_406() {
        local def_stripe_size=$($GETSTRIPE -S $MOUNT)
        local def_stripe_offset=$($GETSTRIPE -i $MOUNT)
        local def_pool=$($GETSTRIPE -p $MOUNT)
-
        local test_pool=$TESTNAME
+
+       if ! combined_mgs_mds ; then
+               mount_mgs_client
+       fi
        pool_add $test_pool || error "pool_add failed"
        pool_add_targets $test_pool 0 $(($OSTCOUNT - 1)) 1 ||
                error "pool_add_targets failed"
@@ -16827,6 +16861,10 @@ test_406() {
        local f=$DIR/$tdir/$tfile
        pool_remove_all_targets $test_pool $f
        pool_remove $test_pool $f
+
+       if ! combined_mgs_mds ; then
+               umount_mgs_client
+       fi
 }
 run_test 406 "DNE support fs default striping"