X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=inline;f=lustre%2Ftests%2Fconf-sanity.sh;h=7e0785968525f87670ed1cb5953f57f4d3cf70b4;hb=b3d388e60a5bc2b7b091814e1338f9f0bebfa416;hp=59fa2944f6ea8725c708df8b21b09e1e4338b586;hpb=73be3386684f35ad401f0d57be90cea61327aee8;p=fs%2Flustre-release.git diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 59fa294..7e07859 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -49,13 +49,14 @@ MDSDEV1_2=$fs2mds_DEV OSTDEV1_2=$fs2ost_DEV OSTDEV2_2=$fs3ost_DEV +# bug number for skipped test: LU-11915 +ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110" + if ! combined_mgs_mds; then - # bug number for skipped test: LU-9860 LU-9860 LU-9860 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 43b 53b 54b" - # bug number for skipped test: LU-9875 LU-9879 LU-9879 LU-9879 LU-9879 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 70e 80 84 87 100" - # bug number for skipped test: LU-8110 LU-9879 LU-9879 LU-9879 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 102 104 105 107" + # bug number for skipped test: LU-11991 LU-11990 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 32a 32b 32c 32d 32e 66" + # bug number for skipped test: LU-9897 LU-12032 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 84 123F" fi # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time @@ -63,19 +64,14 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init) fi -[ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: - ALWAYS_EXCEPT="$ALWAYS_EXCEPT" -# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! - init_logging # require_dsh_mds || exit 0 require_dsh_ost || exit 0 -# 8 22 (min)" -[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69" +# 8 22 40 165 (min) +[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69 106 111" assert_DIR @@ -275,6 +271,10 @@ check_mount2() { echo "setup double mount lustre success" } +generate_name() { + cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $1 | head -n 1 +} + build_test_filter if [ "$ONLY" == "setup" ]; then @@ -475,10 +475,7 @@ test_5e() { run_test 5e "delayed connect, don't crash (bug 10268)" test_5f() { - if combined_mgs_mds ; then - skip "needs separate mgs and mds" - return 0 - fi + combined_mgs_mds && skip "needs separate mgs and mds" grep " $MOUNT " /etc/mtab && error false "unexpected entry in mtab before mount" && return 10 @@ -514,7 +511,7 @@ run_test 5f "mds down, cleanup after failed mount (bug 2712)" test_5g() { modprobe lustre [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] && - { skip "automount of debugfs missing before 2.9.53" && return 0; } + skip "automount of debugfs missing before 2.9.53" umount /sys/kernel/debug $LCTL get_param -n devices | egrep -v "error" && \ error "lctl can't access debugfs data" @@ -522,6 +519,32 @@ test_5g() { } run_test 5g "handle missing debugfs" +test_5h() { + setup + + stop mds1 + #define OBD_FAIL_MDS_FS_SETUP 0x135 + do_facet mds1 "$LCTL set_param fail_loc=0x80000135" + start_mdt 1 && error "start mdt should fail" + start_mdt 1 || error "start mdt failed" + client_up || error "client_up failed" + cleanup +} +run_test 5h "start mdt failure at mdt_fs_setup()" + +test_5i() { + setup + + stop mds1 + #define OBD_FAIL_QUOTA_INIT 0xA05 + do_facet mds1 "$LCTL set_param fail_loc=0x80000A05" + start_mdt 1 && error "start mdt should fail" + start_mdt 1 || error "start mdt failed" + client_up || error "client_up failed" + cleanup +} +run_test 5i "start mdt failure at mdt_quota_init()" + test_6() { setup manual_umount_client @@ -582,7 +605,6 @@ run_test 9 "test ptldebug and subsystem for mkfs" test_17() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi setup @@ -611,7 +633,6 @@ run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should ret test_18() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) @@ -650,7 +671,7 @@ test_18() { log "use file $MDSDEV with MIN=$MIN" fi - [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return + [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" echo "mount mds with large journal..." @@ -741,12 +762,10 @@ test_21c() { run_test 21c "start mds between two osts, stop mds last" test_21d() { - if combined_mgs_mds ; then - skip "need separate mgs device" && return 0 - fi - stopall + combined_mgs_mds && skip "need separate mgs device" - reformat + stopall + reformat start_mgs || error "unable to start MGS" start_ost || error "unable to start OST1" @@ -774,7 +793,7 @@ cleanup_21e() { test_21e() { # LU-5863 if [[ -z "$fs3ost_DEV" || -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -920,7 +939,7 @@ test_24a() { if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then is_blkdev $SINGLEMDS $MDSDEV && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST @@ -984,12 +1003,14 @@ run_test 24a "Multiple MDTs on a single node" test_24b() { local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + combined_mgs_mds || + skip "needs combined MGT and MDT device" if [ -z "$fs2mds_DEV" ]; then local dev=${SINGLEMDS}_dev local MDSDEV=${!dev} is_blkdev $SINGLEMDS $MDSDEV && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -998,15 +1019,17 @@ test_24b() { add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --mgs --fsname=${FSNAME}2 \ --reformat $fs2mdsdev $fs2mdsvdev || exit 10 setup - start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && { + cleanup error "start MDS should fail" + } stop fs2mds -f cleanup || error "cleanup failed with rc $?" } run_test 24b "Multiple MGSs on a single node (should return err)" test_25() { - setup + setup_noconfig check_mount || error "check_mount failed" local MODULES=$($LCTL modules | awk '{ print $2 }') rmmod $MODULES 2>/dev/null || true @@ -1032,6 +1055,7 @@ test_26() { run_test 26 "MDT startup failure cleans LOV (should return errs)" test_27a() { + cleanup start_ost || error "Unable to start OST1" start_mds || error "Unable to start MDS" echo "Requeue thread should have started: " @@ -1045,7 +1069,7 @@ run_test 27a "Reacquire MGS lock if OST started first" test_27b() { # FIXME. ~grev - setup + setup_noconfig local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }') @@ -1062,32 +1086,45 @@ test_27b() { run_test 27b "Reacquire MGS lock after failover" test_28A() { # was test_28 - setup - TEST="llite.$FSNAME-*.max_read_ahead_whole_mb" - PARAM="$FSNAME.llite.max_read_ahead_whole_mb" - ORIG=$($LCTL get_param -n $TEST) - FINAL=$(($ORIG + 1)) - set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL - FINAL=$(($FINAL + 1)) - set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL + setup_noconfig + + local TEST="llite.$FSNAME-*.max_read_ahead_whole_mb" + local PARAM="$FSNAME.llite.max_read_ahead_whole_mb" + local orig=$($LCTL get_param -n $TEST) + local max=$($LCTL get_param -n \ + llite.$FSNAME-*.max_read_ahead_per_file_mb) + + orig=${orig%%.[0-9]*} + max=${max%%.[0-9]*} + echo "ORIG:$orig MAX:$max" + [[ $max -le $orig ]] && orig=$((max - 3)) + echo "ORIG:$orig MAX:$max" + + local final=$((orig + 1)) + + set_persistent_param_and_check client "$TEST" "$PARAM" $final + final=$((final + 1)) + set_persistent_param_and_check client "$TEST" "$PARAM" $final umount_client $MOUNT || error "umount_client $MOUNT failed" mount_client $MOUNT || error "mount_client $MOUNT failed" - RESULT=$($LCTL get_param -n $TEST) - if [ $RESULT -ne $FINAL ]; then - error "New config not seen: wanted $FINAL got $RESULT" + + local result=$($LCTL get_param -n $TEST) + + if [ $result -ne $final ]; then + error "New config not seen: wanted $final got $result" else - echo "New config success: got $RESULT" + echo "New config success: got $result" fi - set_persistent_param_and_check client "$TEST" "$PARAM" $ORIG + set_persistent_param_and_check client "$TEST" "$PARAM" $orig cleanup || error "cleanup failed with rc $?" } run_test 28A "permanent parameter setting" test_28a() { # LU-4221 [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] || - { skip "Need OST version at least 2.5.52" && return 0; } + skip "Need OST version at least 2.5.52" [ "$(facet_fstype ost1)" = "zfs" ] && - skip "LU-4221: no such proc params for ZFS OSTs" && return + skip "LU-4221: no such proc params for ZFS OSTs" local name local param @@ -1096,7 +1133,7 @@ test_28a() { # LU-4221 local new local device="$FSNAME-OST0000" - setup + setup_noconfig # In this test we will set three kinds of proc parameters with # lctl set_param -P or lctl conf_param: @@ -1132,21 +1169,21 @@ test_28a() { # LU-4221 run_test 28a "set symlink parameters permanently with lctl" test_29() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return - setup > /dev/null 2>&1 + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" + setup_noconfig > /dev/null 2>&1 start_ost2 || error "Unable to start OST2" sleep 10 local PARAM="$FSNAME-OST0001.osc.active" # With lctl set_param -P the value $PROC_ACT will be sent to - # all nodes. The [^M] filter out the ability to set active + # all nodes. The [!M] filter out the ability to set active # on the MDS servers which is tested with wait_osp_* below. # For ost_server_uuid that only exist on client so filtering # is safe. - local PROC_ACT="osc.$FSNAME-OST0001-osc-*.active" - local PROC_UUID="osc.$FSNAME-OST0001-osc-[!M]*.ost_server_uuid" + local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active" + local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid" - ACTV=$($LCTL get_param -n $PROC_ACT) + ACTV=$($LCTL get_param -n $PROC_ACT) DEAC=$((1 - $ACTV)) set_persistent_param_and_check client $PROC_ACT $PARAM $DEAC # also check ost_server_uuid status @@ -1164,12 +1201,15 @@ test_29() { # test new client starts deactivated umount_client $MOUNT || error "umount_client $MOUNT failed" mount_client $MOUNT || error "mount_client $MOUNT failed" - RESULT=$($LCTL get_param -n $PROC_UUID | grep DEACTIV | grep NEW) - if [ -z "$RESULT" ]; then - error "New client start active: $(lctl get_param -n $PROC_UUID)" - else - echo "New client success: got $RESULT" - fi + + # the 2nd and 3rd field of ost_server_uuid do not update at the same + # time when using lctl set_param -P + wait_update_facet client \ + "$LCTL get_param -n $PROC_UUID | awk '{print \\\$3 }'" \ + "DEACTIVATED" || + error "New client start active: $($LCTL get_param -n $PROC_UUID)" + + echo "New client success: got '$($LCTL get_param -n $PROC_UUID)'" # make sure it reactivates set_persistent_param_and_check client $PROC_ACT $PARAM $ACTV @@ -1183,7 +1223,7 @@ test_29() { run_test 29 "permanently remove an OST" test_30a() { - setup + setup_noconfig echo Big config llog TEST="llite.$FSNAME-*.max_read_ahead_whole_mb" @@ -1201,7 +1241,7 @@ test_30a() { pass echo Erase parameter setting - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs "$PERM_CMD -d $TEST" || error "Erase param $TEST failed" else @@ -1213,6 +1253,8 @@ test_30a() { mount_client $MOUNT || error "mount_client $MOUNT failed" FINAL=$($LCTL get_param -n $TEST) echo "deleted (default) value=$FINAL, orig=$ORIG" + ORIG=${ORIG%%.[0-9]*} + FINAL=${FINAL%%.[0-9]*} # assumes this parameter started at the default value [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG" @@ -1221,7 +1263,7 @@ test_30a() { run_test 30a "Big config llog and permanent parameter deletion" test_30b() { - setup + setup_noconfig local orignids=$($LCTL get_param -n \ osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids) @@ -1240,7 +1282,7 @@ test_30b() { local TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'" - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then PARAM="osc.$FSNAME-OST0000-osc-[^M]*.import" echo "Setting $PARAM from $TEST to $NEW" do_facet mgs "$PERM_CMD $PARAM='connection=$NEW'" || @@ -1262,7 +1304,7 @@ test_30b() { [ $NIDCOUNT -eq $((orignidcount + 1)) ] || error "Failover nid not added" - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs "$PERM_CMD -d osc.$FSNAME-OST0000-osc-*.import" else do_facet mgs "$PERM_CMD -d $FSNAME-OST0000.failover.node" || @@ -1435,28 +1477,19 @@ test_32newtarball() { # variable "tarballs". # t32_check() { + [ "$CLIENTONLY" ] && skip "Client-only testing" + local node=$(facet_active_host $SINGLEMDS) local r="do_node $node" - if [ "$CLIENTONLY" ]; then - skip "Client-only testing" - exit 0 - fi - - if ! $r which $TUNEFS; then - skip_env "tunefs.lustre required on $node" - exit 0 - fi + ! $r which "$TUNEFS" && skip_env "tunefs.lustre required on $node" local IMGTYPE=$(facet_fstype $SINGLEMDS) tarballs=$($r find $RLUSTRE/tests -maxdepth 1 \ -name \'disk*-$IMGTYPE.tar.bz2\') - if [ -z "$tarballs" ]; then - skip "No applicable tarballs found" - exit 0 - fi + [ -z "$tarballs" ] && skip "No applicable tarballs found" } t32_test_cleanup() { @@ -1778,8 +1811,8 @@ t32_test() { else if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || - { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids " - "(Need MGS version at least 2.3.59)"; return 0; } + skip "LU-2200: Cannot run over IB w/o lctl replace_nids " + "(Need MGS version at least 2.3.59)" local osthost=$(facet_active_host ost1) local ostnid=$(do_node $osthost $LCTL list_nids | head -1) @@ -1901,7 +1934,7 @@ t32_test() { return 1 fi - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then $r $PERM_CMD osc.$fsname-OST0000*.import=connection=$nid || { error_noexit "Setting OST \"failover.node\"" return 1 @@ -1985,7 +2018,7 @@ t32_test() { fi if [ "$dne_upgrade" != "no" ]; then - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then $r $PERM_CMD mdc.$fsname-MDT0001*.import=connection=$nid || { error_noexit "Setting MDT1 \"failover.node\"" return 1 @@ -2395,7 +2428,7 @@ test_32c() { local tarball local rc=0 - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" t32_check for tarball in $tarballs; do # Do not support 1_8 and 2_1 direct upgrade to DNE2 anymore */ @@ -2423,7 +2456,7 @@ run_test 32d "convert ff test" test_32e() { [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.56) ]] || - { skip "Need MDS version at least 2.10.56"; return 0; } + skip "Need MDS version at least 2.10.56" local tarballs local tarball @@ -2450,8 +2483,7 @@ test_33a() { # bug 12333, was test_33 local dev=${SINGLEMDS}_dev local MDSDEV=${!dev} is_blkdev $SINGLEMDS $MDSDEV && - skip_env "mixed loopback and real device not working" && - return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -2477,7 +2509,7 @@ test_33a() { # bug 12333, was test_33 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs "$PERM_CMD timeout=200" || error "$PERM_CMD timeout=200 failed" else @@ -2489,7 +2521,8 @@ test_33a() { # bug 12333, was test_33 echo "ok." cp /etc/hosts $MOUNT2/ || error "copy /etc/hosts $MOUNT2/ failed" - $GETSTRIPE $MOUNT2/hosts || error "$GETSTRIPE $MOUNT2/hosts failed" + $LFS getstripe $MOUNT2/hosts || + error "$LFS getstripe $MOUNT2/hosts failed" umount $MOUNT2 stop fs2ost -f @@ -2558,7 +2591,7 @@ test_35a() { # bug 12459 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs "$PERM_CMD \ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" || error "Setting mdc.*${device}*.import=connection=\ @@ -2609,7 +2642,7 @@ test_35a() { # bug 12459 run_test 35a "Reconnect to the last active server first" test_35b() { # bug 18674 - remote_mds || { skip "local MDS" && return 0; } + remote_mds || skip "local MDS" setup debugsave @@ -2623,7 +2656,7 @@ test_35b() { # bug 18674 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs "$PERM_CMD \ mdc.*${device}*.import=connection=$(h2nettype $FAKENID)" || error "Set mdc.*${device}*.import=connection=\ @@ -2699,10 +2732,10 @@ test_35b() { # bug 18674 run_test 35b "Continue reconnection retries, if the active server is busy" test_36() { # 12743 - [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return + [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || - { skip "remote OST" && return 0; } + skip "remote OST" local rc=0 local FSNAME2=test1234 @@ -2712,7 +2745,7 @@ test_36() { # 12743 if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then is_blkdev $SINGLEMDS $MDSDEV && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -2722,6 +2755,7 @@ test_36() { # 12743 local fs2ostvdev=$(ostvdevname 1_2) local fs3ostvdev=$(ostvdevname 2_2) + load_modules add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \ --reformat $fs2mdsdev $fs2mdsvdev || exit 10 # XXX after we support non 4K disk blocksize in ldiskfs, specify a @@ -2789,7 +2823,6 @@ test_37() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi echo "MDS : $mdsdev" @@ -2803,6 +2836,8 @@ test_37() { if ! do_facet $SINGLEMDS test -b $mdsdev; then opts=$(csa_add "$opts" -o loop) fi + + load_modules mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $opts \ $mdsdev_sym $mntpt 2>&1) rc=${PIPESTATUS[0]} @@ -2918,11 +2953,9 @@ test_41a() { #bug 14134 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then skip "Loop devices does not work with nosvc option" - return fi - combined_mgs_mds || - { skip "needs combined MGT and MDT device" && return 0; } + combined_mgs_mds || skip "needs combined MGT and MDT device" start_mdt 1 -o nosvc -n if [ $MDSCOUNT -ge 2 ]; then @@ -2949,10 +2982,9 @@ test_41b() { if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then skip "Loop devices does not work with nosvc option" - return fi - ! combined_mgs_mds && skip "needs combined mgs device" && return 0 + ! combined_mgs_mds && skip "needs combined mgs device" stopall reformat @@ -2988,7 +3020,7 @@ test_41c() { $server_version -lt $(version_code 2.5.50) ]] || [[ $server_version -ge $(version_code 2.5.4) && $server_version -lt $(version_code 2.5.11) ]] || - { skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; } + skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+" # ensure mds1 ost1 have been created even if running sub-test standalone cleanup @@ -3130,7 +3162,7 @@ test_42() { #bug 14693 setup check_mount || error "client was not mounted" - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then PARAM="llite.$FSNAME-*.some_wrong_param" else PARAM="$FSNAME.llite.some_wrong_param" @@ -3152,13 +3184,12 @@ run_test 42 "allow client/server mount/unmount with invalid config param" test_43a() { [[ $(lustre_version_code mgs) -ge $(version_code 2.5.58) ]] || - { skip "Need MDS version at least 2.5.58" && return 0; } + skip "Need MDS version at least 2.5.58" [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root" ID1=${ID1:-501} USER1=$(getent passwd | grep :$ID1:$ID1: | cut -d: -f1) - [ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" && - return + [ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" setup chmod ugo+x $DIR || error "chmod 0 failed" @@ -3304,11 +3335,11 @@ run_test 43a "check root_squash and nosquash_nids" test_43b() { # LU-5690 [[ $(lustre_version_code mgs) -ge $(version_code 2.7.62) ]] || - { skip "Need MGS version 2.7.62+"; return; } + skip "Need MGS version 2.7.62+" if [[ -z "$fs2mds_DEV" ]]; then is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -3416,12 +3447,13 @@ test_46a() { #second client see all ost's mount_client $MOUNT2 || error "mount_client failed" - $SETSTRIPE -c -1 $MOUNT2 || error "$SETSTRIPE -c -1 $MOUNT2 failed" - $GETSTRIPE $MOUNT2 || error "$GETSTRIPE $MOUNT2 failed" + $LFS setstripe -c -1 $MOUNT2 || + error "$LFS setstripe -c -1 $MOUNT2 failed" + $LFS getstripe $MOUNT2 || error "$LFS getstripe $MOUNT2 failed" echo "ok" > $MOUNT2/widestripe - $GETSTRIPE $MOUNT2/widestripe || - error "$GETSTRIPE $MOUNT2/widestripe failed" + $LFS getstripe $MOUNT2/widestripe || + error "$LFS getstripe $MOUNT2/widestripe failed" # fill acl buffer for avoid expand lsm to them awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do @@ -3487,12 +3519,13 @@ test_48() { # bz-17636 LU-7473 setup_noconfig check_mount || error "check_mount failed" - $SETSTRIPE -c -1 $MOUNT || error "$SETSTRIPE -c -1 $MOUNT failed" - $GETSTRIPE $MOUNT || error "$GETSTRIPE $MOUNT failed" + $LFS setstripe -c -1 $MOUNT || + error "$LFS setstripe -c -1 $MOUNT failed" + $LFS getstripe $MOUNT || error "$LFS getstripe $MOUNT failed" echo "ok" > $MOUNT/widestripe - $GETSTRIPE $MOUNT/widestripe || - error "$GETSTRIPE $MOUNT/widestripe failed" + $LFS getstripe $MOUNT/widestripe || + error "$LFS getstripe $MOUNT/widestripe failed" # In the future, we may introduce more EAs, such as selinux, enlarged # LOV EA, and so on. These EA will use some EA space that is shared by @@ -3502,11 +3535,11 @@ test_48() { # bz-17636 LU-7473 then count=28 # hard coded of RPC protocol elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - count=4000 # max_num 4091 max_ea_size = 32768 + count=4000 # max_num 4091 max_ea_size = ~65536 elif ! large_xattr_enabled; then count=450 # max_num 497 max_ea_size = 4012 else - count=4500 # max_num 8187 max_ea_size = 1048492 + count=4500 # max_num 8187 max_ea_size = 65452 # not create too much (>5000) to save test time fi @@ -3775,19 +3808,19 @@ test_50f() { run_test 50f "normal statfs one server in down" test_50g() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" setup start_ost2 || error "Unable to start OST2" - wait_osc_import_state mds ost2 FULL + wait_osc_import_state mds ost2 FULL wait_osc_import_ready client ost2 - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then local PARAM="osc.${FSNAME}-OST0001*.active" else local PARAM="${FSNAME}-OST0001.osc.active" fi - $SETSTRIPE -c -1 $DIR/$tfile || error "$SETSTRIPE failed" + $LFS setstripe -c -1 $DIR/$tfile || error "$LFS setstripe failed" do_facet mgs $PERM_CMD $PARAM=0 || error "Unable to deactivate OST" umount_client $MOUNT || error "Unable to unmount client" @@ -3809,7 +3842,7 @@ run_test 50g "deactivated OST should not cause panic" # LU-642 test_50h() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" || @@ -3827,17 +3860,18 @@ test_50h() { "${FSNAME}-OST0000.osc.active" 1 mkdir $DIR/$tdir/2 || error "mkdir $DIR/$tdir/2 failed" - $SETSTRIPE -c -1 -i 0 $DIR/$tdir/2 || - error "$SETSTRIPE $DIR/$tdir/2 failed" + $LFS setstripe -c -1 -i 0 $DIR/$tdir/2 || + error "$LFS setstripe $DIR/$tdir/2 failed" sleep 1 && echo "create a file after OST1 is activated" - # create some file - createmany -o $DIR/$tdir/2/$tfile-%d 1 + # doing some io, shouldn't crash + dd if=/dev/zero of=$DIR/$tdir/2/$tfile-io bs=1M count=10 # check OSC import is working stat $DIR/$tdir/2/* >/dev/null 2>&1 || error "some OSC imports are still not connected" # cleanup + rm -rf $DIR/$tdir umount_client $MOUNT || error "Unable to umount client" stop_ost2 || error "Unable to stop OST2" cleanup_nocli || error "cleanup_nocli failed with $?" @@ -3846,7 +3880,7 @@ run_test 50h "LU-642: activate deactivated OST" test_50i() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$MDSCOUNT" -lt "2" ] && skip_env "needs >= 2 MDTs" && return + [ "$MDSCOUNT" -lt "2" ] && skip "needs >= 2 MDTs" load_modules [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 @@ -3859,7 +3893,7 @@ test_50i() { mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then $PERM_CMD mdc.${FSNAME}-MDT0001-mdc-*.active=0 && error "deactive MDC0 succeeds" else @@ -3916,8 +3950,8 @@ test_51() { check_mount || error "check_mount failed" mkdir $MOUNT/$tdir || error "mkdir $MOUNT/$tdir failed" - $SETSTRIPE -c -1 $MOUNT/$tdir || - error "$SETSTRIPE -c -1 $MOUNT/$tdir failed" + $LFS setstripe -c -1 $MOUNT/$tdir || + error "$LFS setstripe -c -1 $MOUNT/$tdir failed" #define OBD_FAIL_MDS_REINT_DELAY 0x142 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x142" touch $MOUNT/$tdir/$tfile & @@ -3984,7 +4018,6 @@ diff_files_xattrs() test_52() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi start_mds || error "Unable to start MDS" @@ -4005,7 +4038,7 @@ test_52() { error "Unable to create temporary file" sleep 1 - $SETSTRIPE -c -1 -S 1M $DIR/$tdir || error "$SETSTRIPE failed" + $LFS setstripe -c -1 -S 1M $DIR/$tdir || error "$LFS setstripe failed" for (( i=0; i < nrfiles; i++ )); do multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c || @@ -4124,8 +4157,7 @@ thread_sanity() { fi [ $tmin -eq $tmax -a $tmin -eq $tstarted ] && - skip_env "module parameter forced $facet thread count" && - tmin=3 && tmax=$((3 * tmax)) + skip_env "module parameter forced $facet thread count" # Check that we can change min/max do_facet $facet "$LCTL set_param \ @@ -4209,7 +4241,6 @@ run_test 53b "check MDS thread count params" test_54a() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p || @@ -4221,7 +4252,6 @@ run_test 54a "test llverdev and partial verify of device" test_54b() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi setup @@ -4239,7 +4269,6 @@ lov_objid_size() test_55() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi local mdsdev=$(mdsdevname 1) @@ -4271,7 +4300,7 @@ test_55() { stopall done - reformat + reformat_and_config } run_test 55 "check lov_objid size" @@ -4300,7 +4329,7 @@ test_56a() { $server_version -lt $(version_code 2.5.11) ]]; then wait_osc_import_state mds ost1 FULL wait_osc_import_state mds ost2 FULL - $SETSTRIPE --stripe-count=-1 $DIR/$tfile || + $LFS setstripe --stripe-count=-1 $DIR/$tfile || error "Unable to setstripe $DIR/$tfile" n=$($LFS getstripe --stripe-count $DIR/$tfile) [ "$n" -eq 2 ] || error "Stripe count not two: $n" @@ -4309,7 +4338,7 @@ test_56a() { stopall MDSJOURNALSIZE=$mds_journal_size_orig - reformat + reformat_and_config } run_test 56a "check big OST indexes and out-of-index-order start" @@ -4321,11 +4350,11 @@ cleanup_56b() { stop mds2 stop mds3 stopall - reformat + reformat_and_config } test_56b() { - [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" trap cleanup_56b EXIT RETURN ERR stopall @@ -4420,7 +4449,7 @@ test_57a() { # bug 22656 error "tunefs failed" start_mgsmds start_ost && error "OST registration from failnode should fail" - reformat + cleanup } run_test 57a "initial registration from failnode should fail (should return errs)" @@ -4434,7 +4463,7 @@ test_57b() { error "tunefs failed" start_mgsmds start_ost || error "OST registration from servicenode should not fail" - reformat + cleanup } run_test 57b "initial registration from servicenode should not fail" @@ -4462,7 +4491,7 @@ test_58() { # bug 22658 # restart MDS with missing llog files start_mds || error "unable to start MDS" do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" - reformat + cleanup } run_test 58 "missing llog files must not prevent MDT from mounting" @@ -4500,13 +4529,12 @@ test_59() { run_test 59 "writeconf mount option" test_60() { # LU-471 - local num - if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi + local num + for num in $(seq $MDSCOUNT); do add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \ --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' \ @@ -4523,15 +4551,16 @@ test_60() { # LU-471 echo $dump | grep uninit_bg > /dev/null && error "uninit_bg is set" # we set stride extended options echo $dump | grep stride > /dev/null || error "stride is not set" - reformat + stop_mds + reformat_and_config } run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting" test_61() { # LU-80 - local lxattr=false + local lxattr=$(large_xattr_enabled) [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] || - { skip "Need MDS version at least 2.1.53"; return 0; } + skip "Need MDS version at least 2.1.53" if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && ! large_xattr_enabled; then @@ -4544,8 +4573,7 @@ test_61() { # LU-80 done fi - combined_mgs_mds || stop_mgs || error "stopping MGS service failed" - setup_noconfig || error "setting up the filesystem failed" + setup || error "setting up the filesystem failed" client_up || error "starting client failed" local file=$DIR/$tfile @@ -4588,7 +4616,7 @@ test_61() { # LU-80 log "remove large xattr $name from $file" setfattr -x $name $file || error "removing $name from $file failed" - if $lxattr; then + if $lxattr && [ $(facet_fstype $SINGLEMDS) == ldiskfs ]; then stopall || error "stopping for e2fsck run" for num in $(seq $MDSCOUNT); do run_e2fsck $(facet_active_host mds$num) \ @@ -4600,23 +4628,21 @@ test_61() { # LU-80 # need to delete this file to avoid problems in other tests rm -f $file - stopall || error "stopping systems failed" + cleanup || error "stopping systems failed" } run_test 61 "large xattr" test_62() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] || + skip "Need MDS version at least 2.2.51" # MRP-118 local mdsdev=$(mdsdevname 1) local ostdev=$(ostdevname 1) - [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] || - { skip "Need MDS version at least 2.2.51"; return 0; } - echo "disable journal for mds" do_facet mds1 $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" start_mds && error "MDT start should fail" @@ -4631,7 +4657,6 @@ run_test 62 "start with disabled journal" test_63() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "ldiskfs only test" - return fi do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs @@ -4639,7 +4664,6 @@ test_63() { awk '/ldiskfs_inode_cache/ { print $5 / $6 }') if [ -z "$inode_slab" ]; then skip "ldiskfs module has not been loaded" - return fi echo "$inode_slab ldiskfs inodes per page" @@ -4648,7 +4672,7 @@ test_63() { # If kmalloc-128 is also 1 per page - this is a debug kernel # and so this is not an error. local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | - awk '/^(kmalloc|size)-128 / { print $5 / $6 }') + awk '/^(dma-kmalloc|size)-128 / { print $5 / $6 }') # 32 128-byte chunks in 4k [ "${kmalloc128%.*}" -lt "32" ] || error "ldiskfs inode too big, only $inode_slab objs/page, " \ @@ -4674,7 +4698,7 @@ run_test 64 "check lfs df --lazy " test_65() { # LU-2237 # Currently, the test is only valid for ldiskfs backend [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && - skip "ldiskfs only test" && return + skip "ldiskfs only test" local devname=$(mdsdevname ${SINGLEMDS//mds/}) local brpt=$(facet_mntpt brpt) @@ -4713,7 +4737,7 @@ run_test 65 "re-create the lost last_rcvd file when server mount" test_66() { [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || - { skip "Need MGS version at least 2.3.59"; return 0; } + skip "Need MGS version at least 2.3.59" setup local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1) @@ -4758,6 +4782,8 @@ test_66() { echo "wrong nids list should not destroy the system" do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" && error "wrong parse" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "asdfasdf, asdfadf" && + error "wrong parse" echo "replace OST nid" do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID || @@ -4771,6 +4797,14 @@ test_66() { do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" && error "wrong parse" + local FAKE_NIDS="192.168.0.112@tcp1,192.168.0.112@tcp2" + local FAKE_FAILOVER="192.168.0.113@tcp1,192.168.0.113@tcp2" + local NIDS_AND_FAILOVER="$MDS_NID,$FAKE_NIDS:$FAKE_FAILOVER" + echo "set NIDs with failover" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $NIDS_AND_FAILOVER || + error "replace nids failed" + + echo "replace MDS nid" do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID || error "replace nids failed" @@ -4791,7 +4825,7 @@ test_66() { check_mount || error "error after nid replace" cleanup || error "cleanup failed" - reformat + reformat_and_config } run_test 66 "replace nids" @@ -4860,16 +4894,12 @@ test_68() { local END [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] || - { skip "Need MDS version at least 2.4.53"; return 0; } + skip "Need MDS version at least 2.4.53" umount_client $MOUNT || error "umount client failed" - if ! combined_mgs_mds; then - start_mgs || error "start mgs failed" - fi - - start_mdt 1 || error "MDT start failed" - start_ost || error "Unable to start OST1" + start_mgsmds + start_ost # START-END - the sequences we'll be reserving START=$(do_facet $SINGLEMDS \ @@ -4911,11 +4941,11 @@ test_69() { local server_version=$(lustre_version_code $SINGLEMDS) [[ $server_version -lt $(version_code 2.4.2) ]] && - skip "Need MDS version at least 2.4.2" && return + skip "Need MDS version at least 2.4.2" [[ $server_version -ge $(version_code 2.4.50) ]] && [[ $server_version -lt $(version_code 2.5.0) ]] && - skip "Need MDS version at least 2.5.0" && return + skip "Need MDS version at least 2.5.0" setup mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" @@ -4942,8 +4972,8 @@ test_69() { local ifree=$($LFS df -i $MOUNT | awk '/OST0000/ { print $4 }') log "On OST0, $ifree inodes available. Want $num_create." - $SETSTRIPE -i 0 $DIR/$tdir || - error "$SETSTRIPE -i 0 $DIR/$tdir failed" + $LFS setstripe -i 0 $DIR/$tdir || + error "$LFS setstripe -i 0 $DIR/$tdir failed" if [ $ifree -lt 10000 ]; then files=$(( ifree - 50 )) else @@ -4973,7 +5003,7 @@ test_69() { mount_client $MOUNT || error "mount client failed" touch $DIR/$tdir/$tfile-last || error "create file after reformat" - local idx=$($GETSTRIPE -i $DIR/$tdir/$tfile-last) + local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last) [ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true local iused=$($LFS df -i $MOUNT | awk '/OST0000/ { print $3 }') @@ -4985,7 +5015,7 @@ test_69() { run_test 69 "replace an OST with the same index" test_70a() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" local MDTIDX=1 cleanup || error "cleanup failed with $?" @@ -5010,7 +5040,7 @@ test_70a() { run_test 70a "start MDT0, then OST, then MDT1" test_70b() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" local MDTIDX=1 start_ost || error "OST0 start fail" @@ -5031,7 +5061,7 @@ test_70b() { run_test 70b "start OST, MDT1, MDT0" test_70c() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" local MDTIDX=1 start_mds || error "MDS start fail" @@ -5055,7 +5085,7 @@ test_70c() { run_test 70c "stop MDT0, mkdir fail, create remote dir fail" test_70d() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" local MDTIDX=1 start_mds || error "MDS start fail" @@ -5082,12 +5112,14 @@ test_70d() { run_test 70d "stop MDT1, mkdir succeed, create remote dir fail" test_70e() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.62) ] || - { skip "Need MDS version at least 2.7.62"; return 0; } + skip "Need MDS version at least 2.7.62" - cleanup || error "cleanup failed with $?" + reformat || error "reformat failed with $?" + + load_modules local mdsdev=$(mdsdevname 1) local ostdev=$(ostdevname 1) @@ -5096,6 +5128,10 @@ test_70e() { local opts_mds="$(mkfs_opts mds1 $mdsdev) --reformat $mdsdev $mdsvdev" local opts_ost="$(mkfs_opts ost1 $ostdev) --reformat $ostdev $ostvdev" + if ! combined_mgs_mds ; then + start_mgs + fi + add mds1 $opts_mds || error "add mds1 failed" start_mdt 1 || error "start mdt1 failed" add ost1 $opts_ost || error "add ost1 failed" @@ -5136,9 +5172,9 @@ test_70e() { run_test 70e "Sync-on-Cancel will be enabled by default on DNE" test_71a() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" if combined_mgs_mds; then - skip "needs separate MGS/MDT" && return + skip "needs separate MGS/MDT" fi local MDTIDX=1 @@ -5167,9 +5203,9 @@ test_71a() { run_test 71a "start MDT0 OST0, MDT1, OST1" test_71b() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" if combined_mgs_mds; then - skip "needs separate MGS/MDT" && return + skip "needs separate MGS/MDT" fi local MDTIDX=1 @@ -5197,10 +5233,9 @@ test_71b() { run_test 71b "start MDT1, OST0, MDT0, OST1" test_71c() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return - if combined_mgs_mds; then - skip "needs separate MGS/MDT" && return - fi + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + combined_mgs_mds && skip "needs separate MGS/MDT" + local MDTIDX=1 start_ost || error "OST0 start fail" @@ -5228,10 +5263,9 @@ test_71c() { run_test 71c "start OST0, OST1, MDT1, MDT0" test_71d() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return - if combined_mgs_mds; then - skip "needs separate MGS/MDT" && return - fi + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + combined_mgs_mds && skip "needs separate MGS/MDT" + local MDTIDX=1 start_ost || error "OST0 start fail" @@ -5259,10 +5293,9 @@ test_71d() { run_test 71d "start OST0, MDT1, MDT0, OST1" test_71e() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return - if combined_mgs_mds; then - skip "needs separate MGS/MDT" && return - fi + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + combined_mgs_mds && skip "needs separate MGS/MDT" + local MDTIDX=1 start_ost || error "OST0 start fail" @@ -5290,14 +5323,17 @@ test_71e() { run_test 71e "start OST0, MDT1, OST1, MDT0" test_72() { #LU-2634 + [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && + skip "ldiskfs only test" + local mdsdev=$(mdsdevname 1) local ostdev=$(ostdevname 1) local cmd="$E2FSCK -fnvd $mdsdev" local fn=3 local add_options - [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && - skip "ldiskfs only test" && return + cleanup + load_modules if combined_mgs_mds; then add_options='--reformat' @@ -5350,20 +5386,20 @@ test_73() { #LU-3006 $LCTL get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids | grep 1.2.3.4@$NETTYPE || error "failover nids haven't changed" umount_client $MOUNT || error "umount client failed" - stopall - reformat + stop_ost + stop_mds } run_test 73 "failnode to update from mountdata properly" test_75() { # LU-2374 [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] && - skip "Need MDS version at least 2.4.1" && return + skip "Need MDS version at least 2.4.1" local index=0 local opts_mds="$(mkfs_opts mds1 $(mdsdevname 1)) \ - --reformat $(mdsdevname 1) $(mdsvdevname 1)" + --replace --reformat $(mdsdevname 1) $(mdsvdevname 1)" local opts_ost="$(mkfs_opts ost1 $(ostdevname 1)) \ - --reformat $(ostdevname 1) $(ostvdevname 1)" + --replace --reformat $(ostdevname 1) $(ostvdevname 1)" #check with default parameters add mds1 $opts_mds || error "add mds1 failed for default params" @@ -5378,21 +5414,16 @@ test_75() { # LU-2374 add mds1 $opts_mds || error "add mds1 failed for new params" add ost1 $opts_ost || error "add ost1 failed for new params" - if ! combined_mgs_mds; then - stop_mgs || error "stop mgs failed" - fi - reformat + + reformat_and_config return 0 } run_test 75 "The order of --index should be irrelevant" test_76a() { [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || - { skip "Need MDS version at least 2.4.52" && return 0; } + skip "Need MDS version at least 2.4.52" - if ! combined_mgs_mds; then - start_mgs || error "start mgs failed" - fi setup local MDMB_PARAM="osc.*.max_dirty_mb" echo "Change MGS params" @@ -5451,7 +5482,7 @@ run_test 76a "set permanent params with lctl across mounts" test_76b() { # LU-4783 [[ $(lustre_version_code mgs) -ge $(version_code 2.5.57) ]] || - { skip "Need MGS version at least 2.5.57" && return 0; } + skip "Need MGS version at least 2.5.57" stopall setupall do_facet mgs $LCTL get_param mgs.MGS.live.params || @@ -5462,7 +5493,7 @@ run_test 76b "verify params log setup correctly" test_76c() { [[ $(lustre_version_code mgs) -ge $(version_code 2.8.54) ]] || - { skip "Need MDS version at least 2.4.52" && return 0; } + skip "Need MDS version at least 2.4.52" setupall local MASK_PARAM="mdd.*.changelog_mask" echo "Change changelog_mask" @@ -5513,11 +5544,11 @@ run_test 76d "verify llite.*.xattr_cache can be set by 'lctl set_param -P' corre test_77() { # LU-3445 local server_version=$(lustre_version_code $SINGLEMDS) [[ $server_version -ge $(version_code 2.8.55) ]] || - { skip "Need MDS version 2.8.55+ "; return; } + skip "Need MDS version 2.8.55+ " if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && - skip_env "mixed loopback and real device not working" && return + skip_env "mixed loopback and real device not working" fi local fs2mdsdev=$(mdsdevname 1_2) @@ -5554,7 +5585,7 @@ run_test 77 "comma-separated MGS NIDs and failover node NIDs" test_78() { [[ $(facet_fstype $SINGLEMDS) != ldiskfs || $(facet_fstype ost1) != ldiskfs ]] && - skip "ldiskfs only test" && return + skip "ldiskfs only test" # reformat the Lustre filesystem with a smaller size local saved_MDSCOUNT=$MDSCOUNT @@ -5686,7 +5717,8 @@ test_78() { # check whether the MDT or OST filesystem was shrunk or not if ! $shrunk; then combined_mgs_mds || stop_mgs || error "(9) stop mgs failed" - reformat || error "(10) reformat Lustre filesystem failed" + reformat_and_config || + error "(10) reformat Lustre filesystem failed" return 0 fi @@ -5711,13 +5743,13 @@ test_78() { MDSCOUNT=$saved_MDSCOUNT OSTCOUNT=$saved_OSTCOUNT - reformat || error "(14) reformat Lustre filesystem failed" + reformat_and_config || error "(14) reformat Lustre filesystem failed" } run_test 78 "run resize2fs on MDT and OST filesystems" test_79() { # LU-4227 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.5.59) ]] || - { skip "Need MDS version at least 2.5.59"; return 0; } + skip "Need MDS version at least 2.5.59" local mdsdev1=$(mdsdevname 1) local mdsvdev1=$(mdsvdevname 1) @@ -5744,7 +5776,7 @@ test_79() { # LU-4227 if ( $i ~ "--mgsnode" ) { print $i; break } }') [ -n $mgsnode_opt ] && opts_ost1=$(echo $opts_ost1 | sed -e "s/$mgsnode_opt//") - + load_modules # -MGS, format a mdt without --mgs option add mds1 $opts_mds1 $mdsdev1 $mdsvdev1 && error "Must specify --mgs when formatting mdt combined with mgs" @@ -5757,7 +5789,7 @@ test_79() { # LU-4227 add ost1 $opts_ost1 $ostdev1 $ostvdev2 && error "Must specify --mgsnode when formatting an ost" - reformat + reformat_and_config } run_test 79 "format MDT/OST without mgs option (should return errors)" @@ -5773,7 +5805,9 @@ test_80() { start_ost2 || error "Failed to start OST2" do_facet ost1 "$LCTL set_param fail_loc=0" - stopall + stop_ost2 + stop_ost + stop_mds } run_test 80 "mgc import reconnect race" @@ -5804,7 +5838,10 @@ restore_ostindex() { done OSTCOUNT=$saved_ostcount - formatall + reformat + if ! combined_mgs_mds ; then + start_mgs + fi } # The main purpose of this test is to ensure the OST_INDEX_LIST functions as @@ -5812,8 +5849,8 @@ restore_ostindex() { # assigned index and ensures we can mount such a formatted file system test_81() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || - { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } + skip "Need MDS version at least 2.6.54" + [[ $OSTCOUNT -ge 3 ]] || skip_env "needs >= 3 OSTs" stopall @@ -5851,8 +5888,23 @@ test_81() { # LU-4665 # Check max_easize. local max_easize=$($LCTL get_param -n llite.*.max_easize) - [[ $max_easize -eq 128 ]] || - error "max_easize is $max_easize, should be 128 bytes" + if [ $MDS1_VERSION -lt $(version_code 2.12.51) ] + then + [[ $max_easize -eq 128 ]] || + error "max_easize is $max_easize, should be 128 bytes" + else + # LU-11868 + # 4012 is 4096 - ldiskfs ea overhead + [[ $max_easize -ge 4012 ]] || + error "max_easize is $max_easize, should be at least 4012 bytes" + + # 65452 is XATTR_SIZE_MAX - ldiskfs ea overhead + if large_xattr_enabled; + then + [[ $max_easize -ge 65452 ]] || + error "max_easize is $max_easize, should be at least 65452 bytes" + fi + fi restore_ostindex } @@ -5874,8 +5926,8 @@ run_test 81 "sparse OST indexing" # 5. Lastly ensure this functionality fails with directories. test_82a() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || - { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } + skip "Need MDS version at least 2.6.54" + [[ $OSTCOUNT -ge 3 ]] || skip_env "needs >= 3 OSTs" stopall @@ -5892,7 +5944,7 @@ test_82a() { # LU-4665 done ost_indices=$(comma_list $ost_indices) - trap "restore_ostindex" EXIT + stack_trap "restore_ostindex" EXIT echo -e "\nFormat $OSTCOUNT OSTs with sparse indices $ost_indices" OST_INDEX_LIST=[$ost_indices] formatall @@ -5903,16 +5955,23 @@ test_82a() { # LU-4665 error "start ost$i failed" done + # Collect debug information - start of test + do_nodes $(comma_list $(mdts_nodes)) \ + $LCTL get_param osc.*.prealloc_*_id + mount_client $MOUNT || error "mount client $MOUNT failed" wait_osts_up $LFS df $MOUNT || error "$LFS df $MOUNT failed" mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" + stack_trap "do_nodes $(comma_list $(mdts_nodes)) \ + $LCTL get_param osc.*.prealloc_*_id" EXIT + # 1. If the file does not exist, new file will be created # with specified OSTs. local file=$DIR/$tdir/$tfile-1 - local cmd="$SETSTRIPE -o $ost_indices $file" + local cmd="$LFS setstripe -o $ost_indices $file" echo -e "\n$cmd" eval $cmd || error "$cmd failed" check_stripe_count $file $OSTCOUNT @@ -5924,7 +5983,7 @@ test_82a() { # LU-4665 # will be attached with specified layout. file=$DIR/$tdir/$tfile-2 mcreate $file || error "mcreate $file failed" - cmd="$SETSTRIPE -o $ost_indices $file" + cmd="$LFS setstripe -o $ost_indices $file" echo -e "\n$cmd" eval $cmd || error "$cmd failed" dd if=/dev/urandom of=$file count=1 bs=1M > /dev/null 2>&1 || @@ -5941,7 +6000,7 @@ test_82a() { # LU-4665 # be in the OST indices list. local start_ost_idx=${ost_indices##*,} file=$DIR/$tdir/$tfile-3 - cmd="$SETSTRIPE -o $ost_indices -i $start_ost_idx $file" + cmd="$LFS setstripe -o $ost_indices -i $start_ost_idx $file" echo -e "\n$cmd" eval $cmd || error "$cmd failed" check_stripe_count $file $OSTCOUNT @@ -5949,7 +6008,7 @@ test_82a() { # LU-4665 check_start_ost_idx $file $start_ost_idx file=$DIR/$tdir/$tfile-4 - cmd="$SETSTRIPE" + cmd="$LFS setstripe" cmd+=" -o $(exclude_items_from_list $ost_indices $start_ost_idx)" cmd+=" -i $start_ost_idx $file" echo -e "\n$cmd" @@ -5958,7 +6017,7 @@ test_82a() { # LU-4665 # 5. Specifying OST indices for directory should succeed. local dir=$DIR/$tdir/$tdir mkdir $dir || error "mkdir $dir failed" - cmd="$SETSTRIPE -o $ost_indices $dir" + cmd="$LFS setstripe -o $ost_indices $dir" if [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.11.53) && $(lustre_version_code client -gt $(version_code 2.11.53)) ]]; then echo -e "\n$cmd" @@ -5989,8 +6048,8 @@ cleanup_82b() { # supplied pool. test_82b() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || - { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 4 ]] || { skip_env "needs >= 4 OSTs" && return; } + skip "Need MDS version at least 2.6.54" + [[ $OSTCOUNT -ge 4 ]] || skip_env "needs >= 4 OSTs" stopall @@ -6059,17 +6118,17 @@ test_82b() { # LU-4665 # If [--pool|-p ] is set with [--ost-list|-o ], # then the OSTs must be the members of the pool. local file=$DIR/$tdir/$tfile - cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file" + cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file" echo -e "\n$cmd" eval $cmd && error "OST with index $ost_idx_in_list should be" \ "in OST pool $ost_pool" # Only select OST $ost_idx_in_list from $ost_pool for file. ost_idx_in_list=${ost_idx_in_pool#*,} - cmd="$SETSTRIPE -p $ost_pool -o $ost_idx_in_list $file" + cmd="$LFS setstripe -p $ost_pool -o $ost_idx_in_list $file" echo -e "\n$cmd" eval $cmd || error "$cmd failed" - cmd="$GETSTRIPE $file" + cmd="$LFS getstripe $file" echo -e "\n$cmd" eval $cmd || error "$cmd failed" check_stripe_count $file 2 @@ -6083,10 +6142,9 @@ run_test 82b "specify OSTs for file with --pool and --ost-list options" test_83() { [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] || - { skip "Need OST version at least 2.6.91" && return 0; } + skip "Need OST version at least 2.6.91" if [ $(facet_fstype ost1) != ldiskfs ]; then skip "ldiskfs only test" - return fi local dev @@ -6130,7 +6188,7 @@ test_83() { err=$(do_facet ost1 dmesg | grep "VFS: Busy inodes after unmount of") echo "string err $err" [ -z "$err" ] || error $err - reformat + reformat_and_config } run_test 83 "ENOSPACE on OST doesn't cause message VFS: \ Busy inodes after unmount ..." @@ -6201,7 +6259,7 @@ run_test 84 "check recovery_hard_time" test_85() { [[ $(lustre_version_code ost1) -ge $(version_code 2.7.55) ]] || - { skip "Need OST version at least 2.7.55" && return 0; } + skip "Need OST version at least 2.7.55" ##define OBD_FAIL_OSD_OST_EA_FID_SET 0x197 do_facet ost1 "lctl set_param fail_loc=0x197" start_ost @@ -6221,9 +6279,9 @@ cleanup_86() { test_86() { local server_version=$(lustre_version_code $SINGLEMDS) [ "$(facet_fstype ost1)" = "zfs" ] && - skip "LU-6442: no such mkfs params for ZFS OSTs" && return + skip "LU-6442: no such mkfs params for ZFS OSTs" [[ $server_version -ge $(version_code 2.7.56) ]] || - { skip "Need server version newer than 2.7.55"; return 0; } + skip "Need server version newer than 2.7.55" local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \ --reformat $(ostdevname 1) $(ostvdevname 1)" @@ -6261,11 +6319,11 @@ run_test 86 "Replacing mkfs.lustre -G option" test_87() { #LU-6544 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.51) ]] || - { skip "Need MDS version at least 2.9.51" && return; } + skip "Need MDS version at least 2.9.51" [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && - { skip "ldiskfs only test" && return; } + skip "ldiskfs only test" [[ $OSTCOUNT -gt 59 ]] && - { skip "Ignore wide striping situation" && return; } + skip "Ignore wide striping situation" local mdsdev=$(mdsdevname 1) local mdsvdev=$(mdsvdevname 1) @@ -6301,6 +6359,10 @@ test_87() { #LU-6544 unload_modules reformat + if ! combined_mgs_mds ; then + start_mgs + fi + add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$stripe_cnt \ --reformat $mdsdev $mdsvdev || error "add mds1 failed" start_mdt 1 > /dev/null || error "start mdt1 failed" @@ -6312,10 +6374,10 @@ test_87() { #LU-6544 check_mount || error "check client $MOUNT failed" #set xattr - $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || + $LFS setstripe -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || error "Create file with 3 components failed" $TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed" - i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed" + i=$($LFS getstripe -I3 -c $file) || error "get 3rd stripe count failed" if [ $i -ne $OSTCOUNT ]; then left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24) echo -n "Since only $i out $OSTCOUNT OSTs are used, " @@ -6340,13 +6402,17 @@ test_87() { #LU-6544 more than $left_size-byte space left in inode." echo "Verified: at most $left_size-byte space left in inode." - stopall + umount_ldiskfs $SINGLEMDS + + for i in $(seq $OSTCOUNT); do + stop ost$i -f || error "stop ost$i failed" + done } run_test 87 "check if MDT inode can hold EAs with N stripes properly" test_88() { [ "$(facet_fstype mds1)" == "zfs" ] && - skip "LU-6662: no implementation for ZFS" && return + skip "LU-6662: no implementation for ZFS" load_modules @@ -6369,7 +6435,7 @@ run_test 88 "check the default mount options can be overridden" test_89() { # LU-7131 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] || - { skip "Need MDT version at least 2.9.54" && return 0; } + skip "Need MDT version at least 2.9.54" local key=failover.node local val1=192.0.2.254@tcp0 # Reserved IPs, see RFC 5735 @@ -6438,7 +6504,7 @@ test_89() { # LU-7131 [ $(echo $params | tr ' ' '\n') == "$key=$val1" ] || error "on-disk param not added correctly with --erase-params" - reformat + reformat_and_config } run_test 89 "check tunefs --param and --erase-param{s} options" @@ -6527,15 +6593,11 @@ check_max_mod_rpcs_in_flight() { } test_90a() { - reformat - if ! combined_mgs_mds ; then - start_mgs - fi setup [[ $($LCTL get_param mdc.*.import | grep "connect_flags:.*multi_mod_rpc") ]] || - { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + skip "Need MDC with 'multi_mod_rpcs' feature" # check default value $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed" @@ -6555,7 +6617,7 @@ test_90b() { [[ $($LCTL get_param mdc.*.import | grep "connect_flags:.*multi_mod_rpc") ]] || - { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + skip "Need MDC with 'multi_mod_rpcs' feature" ### test 1. # update max_mod_rpcs_in_flight @@ -6567,9 +6629,8 @@ test_90b() { tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import | grep -c "multi_mod_rpcs") if [ "$tmp" -ne $MDSCOUNT ]; then - echo "Client not able to send multiple modify RPCs in parallel" cleanup - return + skip "Client not able to send multiple modify RPCs in parallel" fi # update max_mod_rpcs_in_flight @@ -6611,15 +6672,14 @@ test_90c() { [[ $($LCTL get_param mdc.*.import | grep "connect_flags:.*multi_mod_rpc") ]] || - { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + skip "Need MDC with 'multi_mod_rpcs' feature" # check client is able to send multiple modify RPCs in paralell tmp=$($LCTL get_param -n mdc.$FSNAME-MDT*-mdc-*.import | grep -c "multi_mod_rpcs") if [ "$tmp" -ne $MDSCOUNT ]; then - skip "Client not able to send multiple modify RPCs in parallel" cleanup - return + skip "Client not able to send multiple modify RPCs in parallel" fi # get max_rpcs_in_flight value @@ -6673,7 +6733,7 @@ test_90d() { [[ $($LCTL get_param mdc.*.import | grep "connect_flags:.*multi_mod_rpc") ]] || - { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; } + skip "Need MDC with 'multi_mod_rpcs' feature" $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed" idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir)) @@ -6683,9 +6743,8 @@ test_90d() { tmp=$($LCTL get_param -N \ mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight) if [ -z "$tmp" ]; then - skip "Client does not support multiple modify RPCs in flight" cleanup - return + skip "Client does not support multiple modify RPCs in flight" fi # get current value of max_mod_rcps_in_flight @@ -6747,9 +6806,9 @@ test_91() { local found [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] || - { skip "Need OST version at least 2.7.63" && return 0; } + skip "Need OST version at least 2.7.63" [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] || - { skip "Need MDT version at least 2.7.63" && return 0; } + skip "Need MDT version at least 2.7.63" start_mds || error "MDS start failed" start_ost || error "unable to start OST" @@ -6942,7 +7001,7 @@ test_92() { run_test 92 "ldev returns MGS NID correctly in command substitution" test_93() { - [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" reformat #start mgs or mgs/mdt0 @@ -6956,14 +7015,14 @@ test_93() { start_ost || error "OST0 start fail" #define OBD_FAIL_MGS_WRITE_TARGET_DELAY 0x90e - do_facet mgs "$LCTL set_param fail_val = 10 fail_loc=0x8000090e" + do_facet mgs "$LCTL set_param fail_val=10 fail_loc=0x8000090e" for num in $(seq 2 $MDSCOUNT); do start_mdt $num & done mount_client $MOUNT || error "mount client fails" wait_osc_import_state mds ost FULL - wait_osc_import_state client ost FULL + wait_osc_import_ready client ost check_mount || error "check_mount failed" cleanup || error "cleanup failed with $?" @@ -7247,14 +7306,14 @@ run_test 98 "Buffer-overflow check while parsing mount_opts" test_99() { [[ $(facet_fstype ost1) != ldiskfs ]] && - { skip "ldiskfs only test" && return; } + skip "ldiskfs only test" [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] || - { skip "Need OST version at least 2.8.57" && return 0; } + skip "Need OST version at least 2.8.57" local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \ --reformat $(ostdevname 1) $(ostvdevname 1)" do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" && - skip "meta_bg already set" && return + skip "meta_bg already set" local opts=ost_opts if [[ ${!opts} != *mkfsoptions* ]]; then @@ -7268,18 +7327,18 @@ test_99() echo "params: $opts" + load_modules add ost1 $opts || error "add ost1 failed with new params" do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" || error "meta_bg is not set" - - reformat } run_test 99 "Adding meta_bg option" test_100() { reformat - start_mds || error "MDS start failed" + + start_mgsmds || error "MDS start failed" start_ost || error "unable to start OST" mount_client $MOUNT || error "client start failed" check_mount || error "check_mount failed" @@ -7336,6 +7395,10 @@ run_test 101 "Race MDT->OST reconnection with create" test_102() { [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.53) ]] || skip "Need server version greater than 2.9.53" + [[ “$(mdsdevname 1)” != “$(mgsdevname)” ]] && + [[ “$(facet_host mds1)” = “$(facet_host mgs)” ]] && + skip "MGS must be on different node or combined" + cleanup || error "cleanup failed with $?" local mds1dev=$(mdsdevname 1) @@ -7460,7 +7523,7 @@ test_103() { test_103_set_pool $FSNAME OST0000 - $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 || + $LFS setstripe -p $FSNAME $DIR/$tdir/d0 || error "(6) Fail to setstripe on $DIR/$tdir/d0" if ! combined_mgs_mds ; then @@ -7484,7 +7547,7 @@ test_103() { test_103_set_pool $save_fsname OST0001 fi - $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 || + $LFS setstripe -p $save_fsname $DIR/$tdir/f0 || error "(16) Fail to setstripe on $DIR/$tdir/f0" if ! combined_mgs_mds ; then umount_mgs_client @@ -7523,7 +7586,7 @@ test_104() { # LU-6952 awk '{ print $2 }') [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] && - { skip "this test needs utils above 2.9.55" && return 0; } + skip "this test needs utils above 2.9.55" # specify "acl" in mount options used by mkfs.lustre if [ -z "$MDS_MOUNT_FS_OPTS" ]; then @@ -7537,6 +7600,10 @@ test_104() { # LU-6952 #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options formatall + if ! combined_mgs_mds ; then + start_mgs + fi + if [ -z "$MDS_MOUNT_OPTS" ]; then MDS_MOUNT_OPTS="-o noacl" else @@ -7560,9 +7627,6 @@ test_104() { # LU-6952 MDS_MOUNT_OPTS=$mds_mountopts OST_MOUNT_OPTS=$ost_mountopts MDS_MOUNT_FS_OPTS=$mds_mountfsopts - - formatall - setupall } run_test 104 "Make sure user defined options are reflected in mount" @@ -7575,7 +7639,7 @@ error_and_umount() { test_105() { cleanup -f reformat - setup + setup_noconfig mkdir -p $TMP/$tdir mount --bind $DIR $TMP/$tdir || error "mount bind mnt pt failed" rm -f $TMP/$tdir/$tfile @@ -7608,7 +7672,7 @@ test_106() { local repeat=5 reformat - setupall + setup_noconfig mkdir -p $DIR/$tdir || error "create $tdir failed" lfs setstripe -c 1 -i 0 $DIR/$tdir #define OBD_FAIL_CAT_RECORDS 0x1312 @@ -7626,20 +7690,20 @@ test_106() { #shows that osp code is buggy do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0 - stopall + cleanup } run_test 106 "check osp llog processing when catalog is wrapped" test_107() { [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] || - { skip "Need MDS version > 2.10.50"; return; } + skip "Need MDS version > 2.10.50" local cmd start_mgsmds || error "start_mgsmds failed" start_ost || error "unable to start OST" # add unknown configuration parameter. - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then cmd="$PERM_CMD ost.$FSNAME-OST0000*.unknown_param" else cmd="$PERM_CMD $FSNAME-OST0000*.ost.unknown_param" @@ -7740,13 +7804,10 @@ t_108_cleanup() { } test_108a() { - [ "$CLIENTONLY" ] && skip "Client-only testing" && return - - [ $(facet_fstype $SINGLEMDS) != "zfs" ] && - skip "zfs only test" && return - + [ "$CLIENTONLY" ] && skip "Client-only testing" + [ $(facet_fstype $SINGLEMDS) != "zfs" ] && skip "zfs only test" [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] && - skip "Need server version at least 2.10.58" && return + skip "Need server version at least 2.10.58" stopall load_modules @@ -7807,13 +7868,10 @@ test_108a() { run_test 108a "migrate from ldiskfs to ZFS" test_108b() { - [ "$CLIENTONLY" ] && skip "Client-only testing" && return - - [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && - skip "ldiskfs only test" && return - + [ "$CLIENTONLY" ] && skip "Client-only testing" + [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && skip "ldiskfs only test" [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] && - skip "Need server version at least 2.10.58" && return + skip "Need server version at least 2.10.58" stopall load_modules @@ -7935,7 +7993,7 @@ test_109_clear_conf() local mgsdev if ! combined_mgs_mds ; then - mgsdev=$MGSDEV + mgsdev=$(mgsdevname) stop_mgs || error "stop_mgs failed" start_mgs "-o nosvc" || error "start_mgs nosvc failed" else @@ -7974,18 +8032,27 @@ test_109_file_shortened() { test_109a() { + [ $MDS1_VERSION -lt $(version_code 2.10.59) ] && + skip "Needs MDS version 2.10.59 or later." [ "$(facet_fstype mgs)" == "zfs" ] && - skip "LU-8727: no implementation for ZFS" && return + skip "LU-8727: no implementation for ZFS" + stopall reformat setup_noconfig client_up || error "client_up failed" + #pool commands requires a client on MGS for procfs interfaces + if ! combined_mgs_mds ; then + mount_mgs_client + stack_trap umount_mgs_client EXIT + fi # # set number of permanent parameters # test_109_set_params $FSNAME + combined_mgs_mds || umount_mgs_client umount_client $MOUNT || error "umount_client failed" stop_ost || error "stop_ost failed" stop_mds || error "stop_mds failed" @@ -8000,6 +8067,7 @@ test_109a() error "failed to clear client config" setup_noconfig + combined_mgs_mds || mount_mgs_client # # check that configurations are intact @@ -8011,24 +8079,34 @@ test_109a() # destroy_test_pools || error "destroy test pools failed" + combined_mgs_mds || umount_mgs_client cleanup } run_test 109a "test lctl clear_conf fsname" test_109b() { + [ $MDS1_VERSION -lt $(version_code 2.10.59) ] && + skip "Needs MDS version 2.10.59 or later." [ "$(facet_fstype mgs)" == "zfs" ] && - skip "LU-8727: no implementation for ZFS" && return + skip "LU-8727: no implementation for ZFS" + stopall reformat setup_noconfig client_up || error "client_up failed" + #pool commands requires a client on MGS for procfs interfaces + if ! combined_mgs_mds ; then + mount_mgs_client + stack_trap umount_mgs_client EXIT + fi # # set number of permanent parameters # test_109_set_params $FSNAME + combined_mgs_mds || umount_mgs_client umount_client $MOUNT || error "umount_client failed" stop_ost || error "stop_ost failed" stop_mds || error "stop_mds failed" @@ -8043,7 +8121,7 @@ test_109b() error "failed to clear client config" setup_noconfig - + combined_mgs_mds || mount_mgs_client # # check that configurations are intact # @@ -8054,26 +8132,245 @@ test_109b() # destroy_test_pools || error "destroy test pools failed" + combined_mgs_mds || umount_mgs_client cleanup } run_test 109b "test lctl clear_conf one config" +test_110() +{ + [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && + skip "Only applicable to ldiskfs-based MDTs" + + do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir || + skip "large_dir option is not supported on MDS" + do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir || + skip "large_dir option is not supported on OSS" + + stopall # stop all targets before modifying the target counts + local old_mdscount=$MDSCOUNT + local old_ostcount=$OSTCOUNT + local replace="" + stack_trap "MDSCOUNT=$old_mdscount OSTCOUNT=$old_ostcount" EXIT + MDSCOUNT=1 + OSTCOUNT=1 + + # ext4_dir_entry_2 struct size:264 + # dx_root struct size:8 + # dx_node struct size:8 + # dx_entry struct size:8 + # For 1024 bytes block size. + # First level directory entries: 126 + # Second level directory entries: 127 + # Entries in leaf: 3 + # For 2 levels limit: 48006 + # For 3 levels limit : 6096762 + # Create 80000 files to safely exceed 2-level htree limit. + CONF_SANITY_110_LINKS=${CONF_SANITY_110_LINKS:-80000} + + # can fit at most 3 filenames per 1KB leaf block, but each + # leaf/index block will only be 3/4 full before split at each level + (( MDSSIZE < CONF_SANITY_110_LINKS / 3 * 4/3 * 4/3 )) && + CONF_SANITY_110_LINKS=$((MDSSIZE * 3 * 3/4 * 3/4)) + + combined_mgs_mds || replace=" --replace " + local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \ + $replace --reformat $(mdsdevname 1) $(mdsvdevname 1)" + if [[ $opts != *mkfsoptions* ]]; then + opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\"" + else + opts="${opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }" + fi + echo "MDT params: $opts" + load_modules + combined_mgs_mds || start_mgs + add mds1 $opts || error "add mds1 failed with new params" + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS + + opts="$(mkfs_opts ost1 $(ostdevname 1)) \ + $replace --reformat $(ostdevname 1) $(ostvdevname 1)" + + if [[ $opts != *mkfsoptions* ]]; then + opts+=" --mkfsoptions=\\\"-O large_dir\\\" " + else + opts="${opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O large_dir }" + fi + echo "OST params: $opts" + add ost1 $opts || error "add ost1 failed with new params" + start ost1 $(ostdevname 1) $OST_MOUNT_OPTS + + MOUNT_2=yes mountcli || error "mount clients failed" + + mkdir -v $DIR/$tdir || error "cannot create $DIR/$tdir" + local pids count=0 group=0 + + echo "creating $CONF_SANITY_110_LINKS in total" + while (( count < CONF_SANITY_110_LINKS )); do + local len=$((253 - $(wc -c <<<"$tfile-$group-40000-"))) + local dir=DIR$((group % 2 + 1)) + local target=${!dir}/$tdir/$tfile-$group + local long=$target-$(generate_name $len)- + local create=$((CONF_SANITY_110_LINKS - count)) + + (( create > 40000 )) && create=40000 + touch $target || error "creating $target failed" + echo "creating $create hard links to $target" + createmany -l $target $long $create & + pids+=" $!" + + count=$((count + create)) + group=$((group + 1)) + done + echo "waiting for PIDs$pids to complete" + wait $pids || error "createmany failed after $group groups" + + umount_client $MOUNT2 -f + cleanup + + run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n + MDSCOUNT=$old_mdscount + OSTCOUNT=$old_ostcount +} +run_test 110 "Adding large_dir with 3-level htree" + +test_111() { + [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && + skip "Only applicable to ldiskfs-based MDTs" + + is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) && + skip "This test can not be executed on flakey dev" + + do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir || + skip "large_dir option is not supported on MDS" + + do_facet ost1 $DEBUGFS -w -R supported_features | grep large_dir || + skip "large_dir option is not supported on OSS" + + # cleanup before changing target counts + cleanup + local old_mdscount=$MDSCOUNT + local old_ostcount=$OSTCOUNT + local old_mdssize=$MDSSIZE + local replace="" + stack_trap "MDSSIZE=$MDSSIZE MDSCOUNT=$MDSCOUNT OSTCOUNT=$OSTCOUNT" EXIT + MDSCOUNT=1 + OSTCOUNT=1 + (( MDSSIZE < 2400000 )) && MDSSIZE=2400000 # need at least 2.4GB + + local mdsdev=$(mdsdevname 1) + combined_mgs_mds || replace=" --replace " + local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \ + $replace --reformat $(mdsdevname 1) $(mdsvdevname 1)" + if [[ $opts != *mkfsoptions* ]]; then + opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" " + else + opts="${opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O large_dir -i 1048576 }" + fi + echo "MDT params: $opts" + load_modules + combined_mgs_mds || start_mgs + __touch_device mds 1 + add mds1 $opts || error "add mds1 failed with new params" + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS + + opts="$(mkfs_opts ost1 $(ostdevname 1)) \ + $replace --reformat $(ostdevname 1) $(ostvdevname 1)" + if [[ $opts != *mkfsoptions* ]]; then + opts+=" --mkfsoptions=\\\"-O large_dir \\\"" + else + opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }" + fi + echo "OST params: $opts" + __touch_device ost 1 + add ost1 $opts || error "add ost1 failed with new params" + start ost1 $(ostdevname 1) $OST_MOUNT_OPTS + + MOUNT_2=yes mountcli + mkdir $DIR/$tdir || error "cannot create $DIR/$tdir" + lfs df $DIR/$tdir + lfs df -i $DIR/$tdir + + local group=0 + + local start=$SECONDS + local dirsize=0 + local dirmax=$((2 << 30)) + local needskip=0 + local taken=0 + local rate=0 + local left=0 + local num=0 + while (( !needskip & dirsize < dirmax )); do + local pids="" + + for cli in ${CLIENTS//,/ }; do + local len=$((253 - $(wc -c <<<"$cli-$group-60000-"))) + local target=$cli-$group + local long=$DIR/$tdir/$target-$(generate_name $len)- + + RPWD=$DIR/$tdir do_node $cli touch $target || + error "creating $target failed" + echo "creating 60000 hardlinks to $target" + RPWD=$DIR/$tdir do_node $cli createmany -l $target $long 60000 & + pids+=" $!" + + group=$((group + 1)) + target=$cli-$group + long=$DIR2/$tdir/$target-$(generate_name $len)- + + RPWD=$DIR2/$tdir do_node $cli touch $target || + error "creating $target failed" + echo "creating 60000 hardlinks to $target" + RPWD=$DIR2/$tdir do_node $cli createmany -l $target $long 60000 & + pids+=" $!" + + group=$((group + 1)) + done + echo "waiting for PIDs$pids to complete" + wait $pids || error "createmany failed after $group groups" + dirsize=$(stat -c %s $DIR/$tdir) + taken=$((SECONDS - start)) + rate=$((dirsize / taken)) + left=$(((dirmax - dirsize) / rate)) + num=$((group * 60000)) + echo "estimate ${left}s left after $num files / ${taken}s" + # if the estimated time remaining is too large (it may change + # over time as the create rate is not constant) then exit + # without declaring a failure. + (( left > 1200 )) && needskip=1 + done + + umount_client $MOUNT2 -f + cleanup + + (( $needskip )) && skip "ETA ${left}s after $num files / ${taken}s is too long" + + run_e2fsck $(facet_active_host mds1) $(mdsdevname 1) -n + MDSCOUNT=$old_mdscount + OSTCOUNT=$old_ostcount + MDSSIZE=$old_mdssize +} +run_test 111 "Adding large_dir with over 2GB directory" + + cleanup_115() { trap 0 stopall rm -f $TMP/$tdir/lustre-mdt - formatall + reformat_and_config } test_115() { - IMAGESIZE=$((3072 << 30)) # 3072 GiB - if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "Only applicable to ldiskfs-based MDTs" - return fi + IMAGESIZE=$((3072 << 30)) # 3072 GiB + stopall # We need MDT size 3072GB, because it is smallest # partition that can store 2B inodes @@ -8083,7 +8380,7 @@ test_115() { do_facet $SINGLEMDS "touch $mdsimgname" trap cleanup_115 RETURN EXIT do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" || - { skip "Backend FS doesn't support sparse files"; return 0; } + skip "Backend FS doesn't support sparse files" local mdsdev=$(do_facet $SINGLEMDS "losetup -f") do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname" @@ -8091,7 +8388,7 @@ test_115() { --mkfsoptions='-O lazy_itable_init,ea_inode,^resize_inode,meta_bg \ -i 1024'" add mds1 $mds_opts --mgs --reformat $mdsdev || - { skip_env "format large MDT failed"; return 0; } + skip_env "format large MDT failed" add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ --reformat $(ostdevname 1) $(ostvdevname 1) @@ -8126,16 +8423,11 @@ test_115() { run_test 115 "Access large xattr with inodes number over 2TB" test_116() { - [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && - skip "ldiskfs only test" && return - + [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && skip "ldiskfs only test" [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] && - skip "Need server version at least 2.10.59" && return - - do_facet $SINGLEMDS which mkfs.xfs || { + skip "Need server version at least 2.10.59" + do_facet $SINGLEMDS which mkfs.xfs || skip_env "No mkfs.xfs installed" - return - } stopall load_modules @@ -8168,6 +8460,7 @@ test_116() { do_facet $SINGLEMDS $TUNE2FS -l $tmpmnt/$mdtimg | grep -qw 'features.*extent' || error "extent should be enabled" + reformat_and_config } run_test 116 "big size MDT support" @@ -8206,33 +8499,179 @@ test_120() { # LU-11130 run_test 120 "cross-target rename should not create bad symlinks" test_122() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] || - { skip "Need OST version at least 2.11.53" && return 0; } - + skip "Need OST version at least 2.11.53" reformat LOAD_MODULES_REMOTE=true load_modules #define OBD_FAIL_OFD_SET_OID 0x1e0 do_facet ost1 $LCTL set_param fail_loc=0x00001e0 - setupall + setup_noconfig $LFS mkdir -i1 -c1 $DIR/$tdir $LFS setstripe -i0 -c1 $DIR/$tdir do_facet ost1 $LCTL set_param fail_loc=0 createmany -o $DIR/$tdir/file_ 1000 || error "Fail to create a new sequence" - reformat + cleanup } run_test 122 "Check OST sequence update" -test_123() { - setupall +test_123aa() { + remote_mgs_nodsh && skip "remote MGS with nodsh" + [ -d $MOUNT/.lustre ] || setup + + # test old logid format until removal from llog_ioctl.c::str2logid() + if [ $MGS_VERSION -lt $(version_code 3.1.53) ]; then + do_facet mgs $LCTL dl | grep MGS + do_facet mgs "$LCTL --device %MGS llog_print \ + \\\\\\\$$FSNAME-client 1 10" || + error "old llog_print failed" + fi + + # test new logid format + if [ $MGS_VERSION -ge $(version_code 2.9.53) ]; then + do_facet mgs "$LCTL --device MGS llog_print $FSNAME-client" || + error "new llog_print failed" + fi +} +run_test 123aa "llog_print works with FIDs and simple names" + +test_123ab() { + remote_mgs_nodsh && skip "remote MGS with nodsh" + [[ $MGS_VERSION -gt $(version_code 2.11.51) ]] || + skip "Need server with working llog_print support" + + [ -d $MOUNT/.lustre ] || setup + + local yaml + local orig_val + + orig_val=$(do_facet mgs $LCTL get_param jobid_name) + do_facet mgs $LCTL set_param -P jobid_name="testname" + + yaml=$(do_facet mgs $LCTL --device MGS llog_print params | + grep jobid_name | tail -n 1) + + local param=$(awk '{ print $10 }' <<< "$yaml") + local val=$(awk '{ print $12 }' <<< "$yaml") + #return to the default + do_facet mgs $LCTL set_param -P jobid_name=$orig_val + [ $val = "testname" ] || error "bad value: $val" + [ $param = "jobid_name," ] || error "Bad param: $param" +} +run_test 123ab "llog_print params output values from set_param -P" + +test_123ac() { # LU-11566 + remote_mgs_nodsh && skip "remote MGS with nodsh" + do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start || + skip "Need 'lctl llog_print --start' on MGS" + + local start=10 + local end=50 + + [ -d $MOUNT/.lustre ] || setup + + # - { index: 10, event: add_uuid, nid: 192.168.20.1@tcp(0x20000c0a81401, + # node: 192.168.20.1@tcp } + do_facet mgs $LCTL --device MGS \ + llog_print --start $start --end $end $FSNAME-client | tr -d , | + while read DASH BRACE INDEX idx EVENT BLAH BLAH BLAH; do + (( idx >= start )) || error "llog_print index $idx < $start" + (( idx <= end )) || error "llog_print index $idx > $end" + done +} +run_test 123ac "llog_print with --start and --end" + +test_123ad() { # LU-11566 + remote_mgs_nodsh && skip "remote MGS with nodsh" + # older versions of lctl may not print all records properly + do_facet mgs "$LCTL help llog_print" 2>&1 | grep -q -- --start || + skip "Need 'lctl llog_print --start' on MGS" + + [ -d $MOUNT/.lustre ] || setup + + # append a new record, to avoid issues if last record was cancelled + local old=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1) + do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$old + + # logid: [0x3:0xa:0x0]:0 + # flags: 4 (plain) + # records_count: 72 + # last_index: 72 + local num=$(do_facet mgs $LCTL --device MGS llog_info $FSNAME-client | + awk '/last_index:/ { print $2 - 1 }') + + # - { index: 71, event: set_timeout, num: 0x14, param: sys.timeout=20 } + local last=$(do_facet mgs $LCTL --device MGS llog_print $FSNAME-client | + tail -1 | awk '{ print $4 }' | tr -d , ) + (( last == num )) || error "llog_print only showed $last/$num records" +} +run_test 123ad "llog_print shows all records" + +test_123ae() { # LU-11566 + remote_mgs_nodsh && skip "remote MGS with nodsh" + [ -d $MOUNT/.lustre ] || setupall + + local max=$($LCTL get_param -n osc.*-OST0000-*.max_dirty_mb | head -1) + + if do_facet mgs "$LCTL help llog_cancel" 2>&1| grep -q -- --log_id; then + # save one set_param -P record in case none exist + do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max + + local log=params + local orig=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + do_facet mgs $LCTL set_param -P osc.*.max_dirty_mb=$max + do_facet mgs $LCTL --device MGS llog_print $log | tail -1 | + grep "parameter: osc.*.max_dirty_mb" || + error "new set_param -P wasn't stored in params log" + + # - { index: 71, event: set_param, device: general, + # param: osc.*.max_dirty_mb, value: 256 } + local id=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + + do_facet mgs $LCTL --device MGS llog_cancel $log --log_idx=$id + local new=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + (( new == orig )) || + error "new llog_cancel now $new, not at $orig records" + fi + + # test old positional parameters for a while still + if [ $(lustre_version_code mgs) -le $(version_code 3.1.53) ]; then + log=$FSNAME-client + orig=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + do_facet mgs $LCTL conf_param $FSNAME-OST0000.osc.max_dirty_mb=$max + do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | grep "parameter: osc.max_dirty_mb" || + error "old conf_param wasn't stored in params log" + + # - { index: 71, event: conf_param, device: testfs-OST0000-osc, + # param: osc.max_dirty_mb=256 } + id=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + do_facet mgs $LCTL --device MGS llog_cancel $log $id + new=$(do_facet mgs $LCTL --device MGS llog_print $log | + tail -1 | awk '{ print $4 }' | tr -d , ) + (( new == orig )) || + error "old llog_cancel now $new, not at $orig records" + fi +} +run_test 123ae "llog_cancel can cancel requested record" + +test_123F() { + remote_mgs_nodsh && skip "remote MGS with nodsh" + + [ -d $MOUNT/.lustre ] || setup local yaml_file="$TMP/$tfile.yaml" do_facet mgs rm "$yaml_file" - local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist |"\ - " sed 's/config_log://'") + local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist" | + sed 's/config_log://') # set jobid_var to a different value for test local orig_val=$(do_facet mgs $LCTL get_param jobid_var) @@ -8263,8 +8702,182 @@ test_123() { error "$set_val is not testname" do_facet mgs rm "$yaml_file" + cleanup +} +run_test 123F "clear and reset all parameters using set_param -F" + +test_124() +{ + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + [ -z $mds2failover_HOST ] && skip "needs MDT failover setup" + + setup + cleanup + + load_modules + if combined_mgs_mds; then + start_mdt 1 "-o nosvc" || + error "starting mds with nosvc option failed" + fi + local nid=$(do_facet mds2 $LCTL list_nids | head -1) + local failover_nid=$(do_node $mds2failover_HOST $LCTL list_nids | head -1) + do_facet mgs $LCTL replace_nids $FSNAME-MDT0001 $nid:$failover_nid || + error "replace_nids execution error" + + if combined_mgs_mds; then + stop_mdt 1 + fi + + setup + fail mds2 + echo "lfs setdirstripe" + $LFS setdirstripe -i 1 $MOUNT/$tdir || error "setdirstirpe error" + echo ok +} +run_test 124 "check failover after replace_nids" + +get_max_sectors_kb() { + local facet="$1" + local device="$2" + local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}")) + local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb" + + do_facet ${facet} "[[ -e ${max_sectors_path} ]] && + cat ${max_sectors_path}" +} + +get_max_hw_sectors_kb() { + local facet="$1" + local device="$2" + local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}")) + local max_hw_path="/sys/block/${dev_base}/queue/max_hw_sectors_kb" + + do_facet ${facet} "[[ -e ${max_hw_path} ]] && cat ${max_hw_path}" +} + +set_max_sectors_kb() { + local facet="$1" + local device="$2" + local value="$3" + local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}")) + local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb" + + do_facet ${facet} "[[ -e ${max_sectors_path} ]] && + echo ${value} > ${max_sectors_path}" + rc=$? + + [[ $rc -ne 0 ]] && echo "Failed to set ${max_sectors_path} to ${value}" + + return $rc +} + +# Return 0 if all slave devices have max_sectors_kb == max_hw_sectors_kb +# Otherwise return > 0 +check_slaves_max_sectors_kb() +{ + local facet="$1" + local device="$2" + local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}")) + local slaves_dir=/sys/block/${dev_base}/slaves + local slave_devices=$(do_facet ${facet} "ls ${slaves_dir} 2>/dev/null") + [[ -z ${slave_devices} ]] && return 0 + + local slave max_sectors new_max_sectors max_hw_sectors path + local rc=0 + for slave in ${slave_devices}; do + path="/dev/${slave}" + ! is_blkdev ${facet} ${path} && continue + max_sectors=$(get_max_sectors_kb ${facet} ${path}) + max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${path}) + new_max_sectors=${max_hw_sectors} + [[ ${new_max_sectors} -gt ${RQ_SIZE_LIMIT} ]] && + new_max_sectors=${RQ_SIZE_LIMIT} + + if [[ ${max_sectors} -ne ${new_max_sectors} ]]; then + echo "${path} ${max_sectors} ${new_max_sectors}" + ((rc++)) + fi + check_slaves_max_sectors_kb ${facet} ${path} + ((rc + $?)) + done + + return $rc +} + +test_125() +{ + local facet_list="mgs mds1 ost1" + combined_mgs_mds && facet_list="mgs ost1" + + local facet + for facet in ${facet_list}; do + [[ $(facet_fstype ${facet}) != ldiskfs ]] && + skip "ldiskfs only test" && + return 0 + ! is_blkdev ${facet} $(facet_device ${facet}) && + skip "requires all real devices" && + return 0 + done + + local rc=0 + # We don't increase IO request size limit past 16MB. See comments in + # lustre/utils/libmount_utils_ldiskfs.c:tune_max_sectors_kb() + RQ_SIZE_LIMIT=$((16 * 1024)) + local device old_max_sectors new_max_sectors max_hw_sectors + for facet in ${facet_list}; do + device=$(facet_device ${facet}) + old_max_sectors=$(get_max_sectors_kb ${facet} ${device}) + max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${device}) + + # The expected value after l_tunedisk is executed + new_max_sectors=$old_max_sectors + [[ ${new_max_sectors_kb} -gt ${RQ_SIZE_LIMIT} ]] && + new_max_sectors_kb=${RQ_SIZE_LIMIT} + + # Ensure the current value of max_sectors_kb does not equal + # max_hw_sectors_kb, so we can tell whether l_tunedisk did + # anything + set_max_sectors_kb ${facet} ${device} $((new_max_sectors - 1)) + + # Value before l_tunedisk + local pre_max_sectors=$(get_max_sectors_kb ${facet} ${device}) + if [[ ${pre_max_sectors} -ne $((new_max_sectors - 1)) ]]; then + echo "unable to satsify test pre-condition:" + echo "${pre_max_sectors} != $((new_max_sectors - 1))" + ((rc++)) + continue + fi + + echo "Before: ${facet} ${device} ${pre_max_sectors} ${max_hw_sectors}" + + do_facet ${facet} "libtool execute l_tunedisk ${device}" + + # Value after l_tunedisk + local post_max_sectors=$(get_max_sectors_kb ${facet} ${device}) + + echo "After: ${facet} ${device} ${post_max_sectors} ${max_hw_sectors}" + + if [[ ${facet} != ost1 ]]; then + if [[ ${post_max_sectors} -ne ${pre_max_sectors} ]]; then + echo "l_tunedisk modified max_sectors_kb of ${facet}" + ((rc++)) + fi + + set_max_sectors_kb ${facet} ${device} ${old_max_sectors} + else + if [[ ${post_max_sectors} -eq ${pre_max_sectors} ]]; then + echo "l_tunedisk failed to modify max_sectors_kb of ${facet}" + ((rc++)) + fi + + check_slaves_max_sectors_kb ${facet} ${device} || + ((rc++)) + fi + done + + return $rc } -run_test 123 "clear and reset all parameters using set_param -F" +run_test 125 "check l_tunedisk only tunes OSTs and their slave devices" if ! combined_mgs_mds ; then stop mgs