X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=6e272a18d4cb70c9f4e762e73ba72b118839911a;hp=000be7a91e03800ede72dab89a4514a52abc155b;hb=c7ed812d6a87f0ffa3c0d4c1a9289497f680ccec;hpb=364bd1d974bb5b08319fbe73c8eabc5736b9d76b diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh old mode 100755 new mode 100644 index 000be7a..6e272a1 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -4,8 +4,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: LU-7428 -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 84" +# bug number for skipped test: LU-8972 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -23,10 +23,6 @@ is_sles11() # LU-2181 return 1 } -if is_sles11; then # LU-2181 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b" -fi - if [ "$FAILURE_MODE" = "HARD" ]; then CONFIG_EXCEPTIONS="24a " && echo "Except the tests: $CONFIG_EXCEPTIONS for " \ @@ -68,8 +64,12 @@ OSTDEV1_2=$fs2ost_DEV OSTDEV2_2=$fs3ost_DEV if ! combined_mgs_mds; then - # bug number for skipped test: 23954 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b" + # bug number for skipped test: LU-9860 LU-9860 LU-9860 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 43b 53b 54b" + # bug number for skipped test: LU-9875 LU-9879 LU-9879 LU-9879 LU-9879 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 70e 80 84 87 100" + # bug number for skipped test: LU-8110 LU-9879 LU-9879 LU-9879 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 102 104 105 107" fi # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time @@ -78,8 +78,8 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then fi [ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-4444 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 69" +# bug number for skipped test: + ALWAYS_EXCEPT="$ALWAYS_EXCEPT" init_logging @@ -255,7 +255,9 @@ cleanup_nocli() { } cleanup() { - umount_client $MOUNT || return 200 + local force="" + [ "x$1" != "x" ] && force='-f' + umount_client $MOUNT $force|| return 200 cleanup_nocli || return $? } @@ -455,17 +457,16 @@ run_test 5c "cleanup after failed mount (bug 2712) (should return errs)" test_5d() { grep " $MOUNT " /etc/mtab && - error false "unexpected entry in mtab before mount" && return 10 + error "unexpected entry in mtab before mount" start_ost || error "OST start failed" start_mds || error "MDS start failed" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" mount_client $MOUNT || error "mount_client $MOUNT failed" umount_client $MOUNT -f || error "umount_client $MOUNT failed" cleanup_nocli || error "cleanup_nocli failed with $?" - grep " $MOUNT " /etc/mtab && + ! grep " $MOUNT " /etc/mtab || error "$MOUNT entry in mtab after unmount" - pass } run_test 5d "mount with ost down" @@ -488,7 +489,7 @@ run_test 5e "delayed connect, don't crash (bug 10268)" test_5f() { if combined_mgs_mds ; then - skip "combined mgs and mds" + skip "needs separate mgs and mds" return 0 fi @@ -523,6 +524,17 @@ test_5f() { } run_test 5f "mds down, cleanup after failed mount (bug 2712)" +test_5g() { + modprobe lustre + [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] && + { skip "automount of debugfs missing before 2.9.53" && return 0; } + umount /sys/kernel/debug + $LCTL get_param -n devices | egrep -v "error" && \ + error "lctl can't access debugfs data" + grep " debugfs " /etc/mtab || error "debugfs failed to remount" +} +run_test 5g "handle missing debugfs" + test_6() { setup manual_umount_client @@ -598,7 +610,7 @@ is_blkdev () { test_17() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -627,7 +639,7 @@ run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should ret test_18() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -702,7 +714,7 @@ run_test 19a "start/stop MDS without OSTs" test_19b() { start_ost || error "Unable to start OST1" - stop_ost || error "Unable to stop OST1" + stop_ost -f || error "Unable to stop OST1" } run_test 19b "start/stop OSTs without MDS" @@ -950,9 +962,10 @@ test_24a() { local fs2ostdev=$(ostdevname 1_2) local fs2mdsvdev=$(mdsvdevname 1_2) local fs2ostvdev=$(ostvdevname 1_2) + local cl_user - # test 8-char fsname as well - local FSNAME2=test1234 + # LU-9733 test fsname started with numbers as well + local FSNAME2=969362ae add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \ --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10 @@ -965,6 +978,15 @@ test_24a() { start fs2ost $fs2ostdev $OST_MOUNT_OPTS mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed" $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed" + + # LU-9733 test fsname started with numbers + cl_user=$(do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \ + changelog_register -n) || + error "register changelog failed" + + do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \ + changelog_deregister $cl_user || + error "deregister changelog failed" # 1 still works check_mount || error "check_mount failed" # files written on 1 should not show up on 2 @@ -1173,7 +1195,7 @@ test_28a() { # LU-4221 run_test 28a "set symlink parameters permanently with conf_param" test_29() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return setup > /dev/null 2>&1 start_ost2 || error "Unable to start OST2" sleep 10 @@ -1197,27 +1219,8 @@ test_29() { fi # check MDTs too - for num in $(seq $MDSCOUNT); do - local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001) - local MPROC="osc.$mdtosc.active" - local MAX=30 - local WAIT=0 - while [ 1 ]; do - sleep 5 - RESULT=$(do_facet mds${num} "$LCTL get_param -n $MPROC") - [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC" - if [ $RESULT -eq $DEAC ]; then - echo -n "MDT deactivated also after" - echo "$WAIT sec (got $RESULT)" - break - fi - WAIT=$((WAIT + 5)) - if [ $WAIT -eq $MAX ]; then - error "MDT active: wanted $DEAC got $RESULT" - fi - echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated" - done - done + wait_osp_active ost ${FSNAME}-OST0001 1 0 + # test new client starts deactivated umount_client $MOUNT || error "umount_client $MOUNT failed" mount_client $MOUNT || error "mount_client $MOUNT failed" @@ -1376,7 +1379,7 @@ test_32newtarball() { chown $T32_QID.$T32_QID $tmp/src/t32_qf_old # format ost with comma-separated NIDs to verify LU-4460 - local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)" MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall setupall @@ -1492,7 +1495,8 @@ t32_check() { local IMGTYPE=$(facet_fstype $SINGLEMDS) - tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\') + tarballs=$($r find $RLUSTRE/tests -maxdepth 1 \ + -name \'disk*-$IMGTYPE.tar.bz2\') if [ -z "$tarballs" ]; then skip "No applicable tarballs found" @@ -1531,6 +1535,7 @@ t32_test_cleanup() { destroy_zpool $facet $poolname done fi + combined_mgs_mds || start_mgs || rc=$? return $rc } @@ -1595,6 +1600,19 @@ t32_verify_quota() { local qval local cmd + # LU-2435: if the underlying zfs doesn't support userobj_accounting, + # lustre will estimate the object count usage. This fails quota + # verification in 32b. The object quota usage should be accurate after + # zfs-0.7.0 is released. + [ $fstype == "zfs" ] && { + local zfs_version=$(do_node $node cat /sys/module/zfs/version) + + [ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && { + echo "Skip quota verify for zfs: $zfs_version" + return 0 + } + } + $LFS quota -u $T32_QID -v $mnt qval=$($LFS quota -v -u $T32_QID $mnt | @@ -1671,6 +1689,7 @@ t32_test() { local tarball=$1 local writeconf=$2 local dne_upgrade=${dne_upgrade:-"no"} + local dom_upgrade=${dom_upgrade:-"no"} local ff_convert=${ff_convert:-"no"} local shall_cleanup_mdt=false local shall_cleanup_mdt1=false @@ -1679,7 +1698,6 @@ t32_test() { local mdt2_is_available=false local node=$(facet_active_host $SINGLEMDS) local r="do_node $node" - local node2=$(facet_active_host mds2) local tmp=$TMP/t32 local img_commit local img_kernel @@ -1689,7 +1707,7 @@ t32_test() { local img_blimit local img_ilimit local fsname=t32fs - local nid=$($r $LCTL list_nids | head -1) + local nid local mopts local uuid local nrpcs_orig @@ -1703,9 +1721,12 @@ t32_test() { local stripe_count local dir + combined_mgs_mds || stop_mgs || error "Unable to stop MGS" trap 'trap - RETURN; t32_test_cleanup' RETURN load_modules + nid=$($r $LCTL list_nids | head -1) + mkdir -p $tmp/mnt/lustre || error "mkdir $tmp/mnt/lustre failed" $r mkdir -p $tmp/mnt/{mdt,mdt1,ost} $r tar xjvf $tarball -S -C $tmp || { @@ -1749,10 +1770,15 @@ t32_test() { ! $mdt2_is_available || poolname_list+=" t32fs-mdt2" for poolname in $poolname_list; do - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" done + # upgrade zpool to latest supported features, including + # dnode quota accounting in 0.7.0 + $r "$ZPOOL upgrade -a" + mdt_dev=t32fs-mdt1/mdt1 ost_dev=t32fs-ost1/ost1 ! $mdt2_is_available || mdt2_dev=t32fs-mdt2/mdt2 @@ -1852,13 +1878,13 @@ t32_test() { mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" fi - add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ + add $SINGLEMDS $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || { error_noexit "Mkfs new MDT failed" return 1 } - [[ $(facet_fstype mds1) != zfs ]] || import_zpool fs2mds + [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1 $r $TUNEFS --dryrun $fs2mdsdev || { error_noexit "tunefs.lustre before mounting the MDT" @@ -1906,7 +1932,8 @@ t32_test() { mopts="loop,$mopts" fi fi - $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || { + + $r $MOUNT_CMD -onomgs -o$mopts $ost_dev $tmp/mnt/ost || { error_noexit "Mounting the OST" return 1 } @@ -1999,11 +2026,6 @@ t32_test() { shall_cleanup_lustre=true $r $LCTL set_param debug="$PTLDEBUG" - t32_verify_quota $node $fsname $tmp/mnt/lustre || { - error_noexit "verify quota failed" - return 1 - } - if $r test -f $tmp/list; then # # There is not a Test Framework API to copy files to or @@ -2055,6 +2077,43 @@ t32_test() { echo "list verification skipped" fi + if [ "$dom_upgrade" != "no" ]; then + echo "Check DoM file can be created" + $LFS setstripe -E 1M -L mdt -E EOF $tmp/mnt/lustre/dom || { + error_noexit "Verify DoM creation" + return 1 + } + [ $($LFS getstripe -L $tmp/mnt/lustre/dom) == 100 ] || { + error_noexit "Verify a DoM file" + return 1 + } + dd if=/dev/urandom of=$tmp/mnt/lustre/dom bs=4096 \ + count=1 conv=fsync || { + error_noexit "Cannot write to DoM file" + return 1 + } + [ $(stat -c%s $tmp/mnt/lustre/dom) == 4096 ] || { + error_noexit "DoM: bad size after write" + return 1 + } + rm $tmp/mnt/lustre/dom + + $r $LCTL get_param -n lod.*MDT0000*.dom_stripesize || { + error_noexit "Getting \"dom_stripesize\"" + return 1 + } + $r $LCTL conf_param \ + $fsname-MDT0000.lod.dom_stripesize=0 || { + error_noexit "Changing \"dom_stripesize\"" + return 1 + } + wait_update $(facet_host mds) "$LCTL get_param \ + -n lod.*MDT0000*.dom_stripesize" 0 || { + error_noexit "Verifying \"dom_stripesize\"" + return 1 + } + fi + if [ "$dne_upgrade" != "no" ]; then $LFS mkdir -i 1 -c2 $tmp/mnt/lustre/striped_dir || { error_noexit "set striped dir failed" @@ -2274,8 +2333,13 @@ t32_test() { if [[ $fstype == zfs ]]; then local poolname=t32fs-mdt1 - $r "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $r "modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f -d $tmp $poolname" + + # upgrade zpool to latest supported features, + # including dnode quota accounting in 0.7.0 + $r "$ZPOOL upgrade $poolname" fi # mount a second time to make sure we didnt leave upgrade flag on @@ -2354,6 +2418,21 @@ test_32d() { } run_test 32d "convert ff test" +test_32e() { + local tarballs + local tarball + local rc=0 + + t32_check + for tarball in $tarballs; do + echo $tarball | grep "2_9" || continue + #load_modules + dom_upgrade=yes t32_test $tarball writeconf || let "rc += $?" + done + return $rc +} +run_test 32e "dom upgrade test" + test_33a() { # bug 12333, was test_33 local FSNAME2=test-123 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) @@ -2378,15 +2457,20 @@ test_33a() { # bug 12333, was test_33 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931. fi - add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \ - --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10 + if combined_mgs_mds; then + local mgs_flag="--mgs" + fi + + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --fsname=${FSNAME2} \ + --reformat $mgs_flag $mkfsoptions $fs2mdsdev $fs2mdsvdev || + exit 10 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \ --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \ $fs2ostvdev || exit 10 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS - do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || + do_facet mgs "$LCTL conf_param $FSNAME2.sys.timeout=200" || error "$LCTL conf_param $FSNAME2.sys.timeout=200 failed" mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed" $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed" @@ -2462,9 +2546,9 @@ test_35a() { # bug 12459 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ - ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || + ${device}.failover.node=$(h2nettype $FAKENID)" || error "Setting ${device}.failover.node=\ - $(h2$NETTYPE $FAKENID) failed." + $(h2nettype $FAKENID) failed." log "Wait for RECONNECT_INTERVAL seconds (10s)" sleep 10 @@ -2520,9 +2604,9 @@ test_35b() { # bug 18674 local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) do_facet mgs "$LCTL conf_param \ - ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || + ${device}.failover.node=$(h2nettype $FAKENID)" || error "Set ${device}.failover.node=\ - $(h2$NETTYPE $FAKENID) failed" + $(h2nettype $FAKENID) failed" local at_max_saved=0 # adaptive timeouts may prevent seeing the issue @@ -2588,7 +2672,7 @@ test_35b() { # bug 18674 run_test 35b "Continue reconnection retries, if the active server is busy" test_36() { # 12743 - [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return + [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || { skip "remote OST" && return 0; } @@ -2677,7 +2761,7 @@ test_37() { local rc=0 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Currently only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -2810,7 +2894,8 @@ test_41a() { #bug 14134 return fi - local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + combined_mgs_mds || + { skip "needs combined MGT and MDT device" && return 0; } start_mdt 1 -o nosvc -n if [ $MDSCOUNT -ge 2 ]; then @@ -2860,7 +2945,7 @@ test_41b() { echo "blah blah" > $MOUNT/$tfile cat $MOUNT/$tfile || error "cat $MOUNT/$tfile failed" - umount_client $MOUNT || error "umount_client $MOUNT failed" + umount_client $MOUNT -f || error "umount_client $MOUNT failed" stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" stop_mds || error "Unable to stop MDS on second try" @@ -2869,6 +2954,7 @@ run_test 41b "mount mds with --nosvc and --nomgs on first mount" test_41c() { local server_version=$(lustre_version_code $SINGLEMDS) + local oss_list=$(comma_list $(osts_nodes)) [[ $server_version -ge $(version_code 2.6.52) ]] || [[ $server_version -ge $(version_code 2.5.26) && @@ -2877,19 +2963,45 @@ test_41c() { $server_version -lt $(version_code 2.5.11) ]] || { skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; } + # ensure mds1 ost1 have been created even if running sub-test standalone cleanup + setup + cleanup || error "cleanup failed" + + # using directly mount command instead of start() function to avoid + # any side effect of // with others/externals tools/features + # ("zpool import", ...) + # MDT concurrent start + + LOAD_MODULES_REMOTE=true load_modules + do_facet $SINGLEMDS "lsmod | grep -q libcfs" || + error "MDT concurrent start: libcfs module not loaded" + + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x716" - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & + do_facet mds1 "$LCTL set_param fail_loc=0x80000716" + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts & local pid=$! - start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & - do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x0" - local pid2=$! - wait $pid2 + + do_facet mds1 mount -t lustre $mds1dev $mds1mnt $mds1opts local rc2=$? wait $pid local rc=$? + do_facet mds1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st MDT start succeed" echo "2nd MDT start failed with $rc2" @@ -2909,17 +3021,33 @@ test_41c() { # OST concurrent start + do_rpc_nodes $oss_list "lsmod | grep -q libcfs" || + error "OST concurrent start: libcfs module not loaded" + + local ost1dev=$(ostdevname 1) + local ost1mnt=$(facet_mntpt ost1) + local ost1fstype=$(facet_fstype ost1) + local ost1opts=$OST_MOUNT_OPTS + + if [ $ost1fstype == ldiskfs ] && + ! do_facet ost1 test -b $ost1dev; then + ost1opts=$(csa_add "$ost1opts" -o loop) + fi + if [[ $ost1fstype == zfs ]]; then + import_zpool ost1 || return ${PIPESTATUS[0]} + fi + #define OBD_FAIL_TGT_MOUNT_RACE 0x716 - do_facet ost1 "$LCTL set_param fail_loc=0x716" - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & + do_facet ost1 "$LCTL set_param fail_loc=0x80000716" + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts & pid=$! - start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & - do_facet ost1 "$LCTL set_param fail_loc=0x0" - pid2=$! - wait $pid2 + + do_facet ost1 mount -t lustre $ost1dev $ost1mnt $ost1opts rc2=$? wait $pid rc=$? + do_facet ost1 "$LCTL set_param fail_loc=0x0" if [ $rc -eq 0 ] && [ $rc2 -ne 0 ]; then echo "1st OST start succeed" echo "2nd OST start failed with $rc2" @@ -2999,7 +3127,7 @@ test_43a() { setup chmod ugo+x $DIR || error "chmod 0 failed" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "0:0" @@ -3007,7 +3135,7 @@ test_43a() { "$LCTL get_param -n llite.${FSNAME}*.root_squash" \ "0:0" || error "check llite root_squash failed!" - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME.mdt.nosquash_nids" \ "NONE" @@ -3039,7 +3167,7 @@ test_43a() { # set root squash UID:GID to RUNAS_ID # root should be able to access only files owned by RUNAS_ID # - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.root_squash" \ "$FSNAME.mdt.root_squash" \ "$RUNAS_ID:$RUNAS_ID" @@ -3109,7 +3237,7 @@ test_43a() { local NIDLIST=$($LCTL list_nids all | tr '\n' ' ') NIDLIST="2@gni $NIDLIST 192.168.0.[2,10]@tcp" NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ') - set_conf_param_and_check mds \ + set_conf_param_and_check mds1 \ "$LCTL get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ "$FSNAME-MDTall.mdt.nosquash_nids" \ "$NIDLIST" @@ -3162,7 +3290,7 @@ test_43b() { # LU-5690 local client_ip=$(host_nids_address $HOSTNAME $NETTYPE) local host=${client_ip//*./} local net=${client_ip/%$host/} - local nosquash_nids=$(h2$NETTYPE $net[$host,$host,$host]) + local nosquash_nids=$(h2nettype $net[$host,$host,$host]) add $fs2mgs $(mkfs_opts mgs $fs2mgsdev) --fsname=$fsname \ --param mdt.root_squash=$RUNAS_ID:$RUNAS_ID \ @@ -3199,8 +3327,8 @@ test_45() { #17310 df -h $MOUNT & log "sleep 60 sec" sleep 60 -#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f - do_facet client "$LCTL set_param fail_loc=0x50f fail_val=0" + #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f + do_facet client "$LCTL set_param fail_loc=0x8000050f" log "sleep 10 sec" sleep 10 manual_umount_client --force || error "manual_umount_client failed" @@ -3320,8 +3448,9 @@ cleanup_48() { reformat_and_config } -test_48() { # bug 17636 - reformat +test_48() { # bz-17636 LU-7473 + local count + setup_noconfig check_mount || error "check_mount failed" @@ -3332,14 +3461,36 @@ test_48() { # bug 17636 $GETSTRIPE $MOUNT/widestripe || error "$GETSTRIPE $MOUNT/widestripe failed" - trap cleanup_48 EXIT ERR + # In the future, we may introduce more EAs, such as selinux, enlarged + # LOV EA, and so on. These EA will use some EA space that is shared by + # ACL entries. So here we only check some reasonable ACL entries count, + # instead of the max number that is calculated from the max_ea_size. + if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.8.57) ]; + then + count=28 # hard coded of RPC protocol + elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + count=4000 # max_num 4091 max_ea_size = 32768 + elif ! large_xattr_enabled; then + count=450 # max_num 497 max_ea_size = 4012 + else + count=4500 # max_num 8187 max_ea_size = 1048492 + # not create too much (>5000) to save test time + fi - # fill acl buffer for avoid expand lsm to them - getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do - setfacl -m $acl $MOUNT/widestripe + echo "It is expected to hold at least $count ACL entries" + trap cleanup_48 EXIT ERR + for ((i = 0; i < $count; i++)) do + setfacl -m u:$((i + 100)):rw $MOUNT/widestripe || + error "Fail to setfacl for $MOUNT/widestripe at $i" done + cancel_lru_locks mdc stat $MOUNT/widestripe || error "stat $MOUNT/widestripe failed" + local r_count=$(getfacl $MOUNT/widestripe | grep "user:" | wc -l) + count=$((count + 1)) # for the entry "user::rw-" + + [ $count -eq $r_count ] || + error "Expected ACL entries $count, but got $r_count" cleanup_48 } @@ -3588,7 +3739,7 @@ test_50f() { run_test 50f "normal statfs one server in down" test_50g() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return setup start_ost2 || error "Unable to start OST2" wait_osc_import_state mds ost2 FULL @@ -3619,7 +3770,7 @@ run_test 50g "deactivated OST should not cause panic" # LU-642 test_50h() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" || @@ -3657,9 +3808,10 @@ run_test 50h "LU-642: activate deactivated OST" test_50i() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return + [ "$MDSCOUNT" -lt "2" ] && skip_env "needs >= 2 MDTs" && return - [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + load_modules + [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 do_facet mds2 "$TUNEFS --param mdc.active=0 $(mdsdevname 2)" || error "tunefs MDT2 failed" start_mds || error "Unable to start MDT" @@ -3694,9 +3846,18 @@ test_50i() { "$TEST" "${FSNAME}-MDT0001.mdc.active" 0 || error "Unable to deactivate MDT2" + wait_osp_active mds ${FSNAME}-MDT0001 1 0 + $LFS mkdir -i1 $DIR/$tdir/2 && error "mkdir $DIR/$tdir/2 succeeds after deactive MDT" + $LFS mkdir -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir || + error "mkdir $DIR/$tdir/striped_dir fails after deactive MDT2" + + local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir) + [ $stripe_count -eq $((MDSCOUNT - 1)) ] || + error "wrong $stripe_count != $((MDSCOUNT -1)) for striped_dir" + # cleanup umount_client $MOUNT || error "Unable to umount client" stop_mds @@ -3780,7 +3941,7 @@ diff_files_xattrs() test_52() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -3999,7 +4160,7 @@ run_test 53b "check MDS thread count params" test_54a() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4011,7 +4172,7 @@ run_test 54a "test llverdev and partial verify of device" test_54b() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4029,7 +4190,7 @@ lov_objid_size() test_55() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4038,6 +4199,10 @@ test_55() { for i in 1023 2048 do + if ! combined_mgs_mds; then + stop_mgs || error "stopping MGS service failed" + format_mgs || error "formatting MGT failed" + fi add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \ $mdsvdev || exit 10 add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ @@ -4048,7 +4213,8 @@ test_55() { sync echo checking size of lov_objid for ost index $i - LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}') + LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | + grep ^User | awk -F 'Size: ' '{print $2}') if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE" else @@ -4061,16 +4227,14 @@ test_55() { } run_test 55 "check lov_objid size" -test_56() { +test_56a() { local server_version=$(lustre_version_code $SINGLEMDS) local mds_journal_size_orig=$MDSJOURNALSIZE local n MDSJOURNALSIZE=16 - for num in $(seq 1 $MDSCOUNT); do - reformat_mdt $num - done + formatall add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=10000 --reformat \ $(ostdevname 1) $(ostvdevname 1) add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=1000 --reformat \ @@ -4099,7 +4263,104 @@ test_56() { MDSJOURNALSIZE=$mds_journal_size_orig reformat } -run_test 56 "check big OST indexes and out-of-index-order start" +run_test 56a "check big OST indexes and out-of-index-order start" + +cleanup_56b() { + trap 0 + + umount_client $MOUNT -f || error "unmount client failed" + stop mds1 + stop mds2 + stop mds3 + stopall + reformat +} + +test_56b() { + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" && return + + trap cleanup_56b EXIT RETURN ERR + stopall + + if ! combined_mgs_mds ; then + format_mgs + start_mgs + fi + + add mds1 $(mkfs_opts mds1 $(mdsdevname 1)) --index=0 --reformat \ + $(mdsdevname 1) $(mdsvdevname 1) + add mds2 $(mkfs_opts mds2 $(mdsdevname 2)) --index=1 --reformat \ + $(mdsdevname 2) $(mdsvdevname 2) + add mds3 $(mkfs_opts mds3 $(mdsdevname 3)) --index=1000 --reformat \ + $(mdsdevname 3) $(mdsvdevname 3) + format_ost 1 + format_ost 2 + + start_mdt 1 || error "MDT 1 (idx 0) start failed" + start_mdt 2 || error "MDT 2 (idx 1) start failed" + start_mdt 3 || error "MDT 3 (idx 1000) start failed" + start_ost || error "Unable to start first ost" + start_ost2 || error "Unable to start second ost" + + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param mdt.*.enable_remote_dir=1 \ + mdt.*.enable_remote_dir_gid=-1" + + mount_client $MOUNT || error "Unable to mount client" + + $LFS mkdir -c3 $MOUNT/$tdir || error "failed to make testdir" + + echo "This is test file 1!" > $MOUNT/$tdir/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir/$tfile.1000 || + error "failed to make test file 1000" + + rm -rf $MOUNT/$tdir || error "failed to remove testdir" + + $LFS mkdir -i1000 $MOUNT/$tdir.1000 || + error "create remote dir at idx 1000 failed" + + output=$($LFS df) + echo "=== START lfs df OUTPUT ===" + echo -e "$output" + echo "==== END lfs df OUTPUT ====" + + mdtcnt=$(echo -e "$output" | grep $FSNAME-MDT | wc -l) + ostcnt=$(echo -e "$output" | grep $FSNAME-OST | wc -l) + + echo "lfs df returned mdt count $mdtcnt and ost count $ostcnt" + [ $mdtcnt -eq 3 ] || error "lfs df returned wrong mdt count" + [ $ostcnt -eq 2 ] || error "lfs df returned wrong ost count" + + echo "This is test file 1!" > $MOUNT/$tdir.1000/$tfile.1 || + error "failed to make test file 1" + echo "This is test file 2!" > $MOUNT/$tdir.1000/$tfile.2 || + error "failed to make test file 2" + echo "This is test file 1000!" > $MOUNT/$tdir.1000/$tfile.1000 || + error "failed to make test file 1000" + rm -rf $MOUNT/$tdir.1000 || error "failed to remove remote_dir" + + output=$($LFS mdts) + echo "=== START lfs mdts OUTPUT ===" + echo -e "$output" + echo "==== END lfs mdts OUTPUT ====" + + echo -e "$output" | grep -v "MDTS:" | awk '{print $1}' | + sed 's/://g' > $TMP/mdts-actual.txt + sort $TMP/mdts-actual.txt -o $TMP/mdts-actual.txt + + echo -e "0\n1\n1000" > $TMP/mdts-expected.txt + + diff $TMP/mdts-expected.txt $TMP/mdts-actual.txt + result=$? + + rm $TMP/mdts-expected.txt $TMP/mdts-actual.txt + + [ $result -eq 0 ] || error "target_obd proc file is incorrect!" +} +run_test 56b "test target_obd correctness with nonconsecutive MDTs" test_57a() { # bug 22656 do_rpc_nodes $(facet_active_host ost1) load_modules_local @@ -4134,6 +4395,7 @@ count_osts() { } test_58() { # bug 22658 + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" createmany -o $DIR/$tdir/$tfile-%d 100 @@ -4154,7 +4416,7 @@ test_58() { # bug 22658 unmount_fstype $SINGLEMDS # restart MDS with missing llog files start_mds || error "unable to start MDS" - do_facet mds "$LCTL set_param fail_loc=0" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" reformat } run_test 58 "missing llog files must not prevent MDT from mounting" @@ -4196,7 +4458,7 @@ test_60() { # LU-471 local num if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4237,6 +4499,7 @@ test_61() { # LU-80 done fi + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" setup_noconfig || error "setting up the filesystem failed" client_up || error "starting client failed" @@ -4305,7 +4568,7 @@ run_test 61 "large xattr" test_62() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4317,7 +4580,7 @@ test_62() { { skip "Need MDS version at least 2.2.51"; return 0; } echo "disable journal for mds" - do_facet mds $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" + do_facet mds1 $TUNE2FS -O ^has_journal $mdsdev || error "tune2fs failed" start_mds && error "MDT start should fail" echo "disable journal for ost" do_facet ost1 $TUNE2FS -O ^has_journal $ostdev || error "tune2fs failed" @@ -4329,23 +4592,31 @@ run_test 62 "start with disabled journal" test_63() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi - local inode_slab=$(do_facet $SINGLEMDS \ - "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo") + do_rpc_nodes $(facet_active_host $SINGLEMDS) load_module ldiskfs + local inode_slab=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/ldiskfs_inode_cache/ { print $5 / $6 }') if [ -z "$inode_slab" ]; then skip "ldiskfs module has not been loaded" return fi - echo "$inode_slab ldisk inodes per page" - [ "$inode_slab" -ge "3" ] || - error "ldisk inode size is too big, $inode_slab objs per page" - return + echo "$inode_slab ldiskfs inodes per page" + [ "${inode_slab%.*}" -ge "3" ] && return 0 + + # If kmalloc-128 is also 1 per page - this is a debug kernel + # and so this is not an error. + local kmalloc128=$(do_facet $SINGLEMDS "cat /proc/slabinfo" | + awk '/^(kmalloc|size)-128 / { print $5 / $6 }') + # 32 128-byte chunks in 4k + [ "${kmalloc128%.*}" -lt "32" ] || + error "ldiskfs inode too big, only $inode_slab objs/page, " \ + "kmalloc128 = $kmalloc128 objs/page" } -run_test 63 "Verify each page can at least hold 3 ldisk inodes" +run_test 63 "Verify each page can at least hold 3 ldiskfs inodes" test_64() { start_mds || error "unable to start MDS" @@ -4354,7 +4625,7 @@ test_64() { mount_client $MOUNT || error "Unable to mount client" stop_ost2 || error "Unable to stop second ost" echo "$LFS df" - $LFS df --lazy || error "lfs df failed" + $LFS df --lazy umount_client $MOUNT -f || error “unmount $MOUNT failed” cleanup_nocli || error "cleanup_nocli failed with $?" #writeconf to remove all ost2 traces for subsequent tests @@ -4365,7 +4636,7 @@ run_test 64 "check lfs df --lazy " test_65() { # LU-2237 # Currently, the test is only valid for ldiskfs backend [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && - skip "non-ldiskfs backend" && return + skip "ldiskfs only test" && return local devname=$(mdsdevname ${SINGLEMDS//mds/}) local brpt=$(facet_mntpt brpt) @@ -4541,6 +4812,10 @@ test_68() { umount_client $MOUNT || error "umount client failed" + if ! combined_mgs_mds; then + start_mgs || error "start mgs failed" + fi + start_mdt 1 || error "MDT start failed" start_ost || error "Unable to start OST1" @@ -4802,6 +5077,7 @@ test_70e() { soc=$(do_facet mds1 "$LCTL get_param -n \ mdt.*MDT0000.sync_lock_cancel") [ $soc == "never" ] || error "SoC enabled on single MDS" + umount_client $MOUNT -f > /dev/null cleanup || error "cleanup failed with $?" } @@ -4966,23 +5242,30 @@ test_72() { #LU-2634 local ostdev=$(ostdevname 1) local cmd="$E2FSCK -fnvd $mdsdev" local fn=3 + local add_options [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && skip "ldiskfs only test" && return + if combined_mgs_mds; then + add_options='--reformat' + else + add_options='--reformat --replace' + fi + #tune MDT with "-O extents" for num in $(seq $MDSCOUNT); do add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ - --reformat $(mdsdevname $num) $(mdsvdevname $num) || + $add_options $(mdsdevname $num) $(mdsvdevname $num) || error "add mds $num failed" do_facet mds${num} "$TUNE2FS -O extents $(mdsdevname $num)" || error "$TUNE2FS failed on mds${num}" done - add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || + add ost1 $(mkfs_opts ost1 $ostdev) $add_options $ostdev || error "add $ostdev failed" - start_mgsmds || error "start mds failed" + start_mds || error "start mds failed" start_ost || error "start ost failed" mount_client $MOUNT || error "mount client failed" @@ -5043,6 +5326,10 @@ test_75() { # LU-2374 add mds1 $opts_mds || error "add mds1 failed for new params" add ost1 $opts_ost || error "add ost1 failed for new params" + if ! combined_mgs_mds; then + stop_mgs || error "stop mgs failed" + fi + reformat return 0 } run_test 75 "The order of --index should be irrelevant" @@ -5050,6 +5337,10 @@ run_test 75 "The order of --index should be irrelevant" test_76a() { [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || { skip "Need MDS version at least 2.4.52" && return 0; } + + if ! combined_mgs_mds; then + start_mgs || error "start mgs failed" + fi setup local MDMB_PARAM="osc.*.max_dirty_mb" echo "Change MGS params" @@ -5139,12 +5430,38 @@ test_76c() { } run_test 76c "verify changelog_mask is applied with set_param -P" +test_76d() { #LU-9399 + setupall + + local xattr_cache="llite.*.xattr_cache" + local cmd="$LCTL get_param -n $xattr_cache | head -1" + local new=$((($(eval $cmd) + 1) % 2)) + + echo "lctl set_param -P llite.*.xattr_cache=$new" + do_facet mgs $LCTL set_param -P $xattr_cache=$new || + error "Can't change xattr_cache" + wait_update $HOSTNAME "$cmd" "$new" + + echo "Check $xattr_cache on client $MOUNT" + umount_client $MOUNT || error "umount $MOUNT failed" + mount_client $MOUNT || error "mount $MOUNT failed" + [ $(eval $cmd) -eq $new ] || + error "$xattr_cache != $new on client $MOUNT" + + echo "Check $xattr_cache on the new client $MOUNT2" + mount_client $MOUNT2 || error "mount $MOUNT2 failed" + [ $(eval $cmd) -eq $new ] || + error "$xattr_cache != $new on client $MOUNT2" + umount_client $MOUNT2 || error "umount $MOUNT2 failed" + + stopall +} +run_test 76d "verify llite.*.xattr_cache can be set by 'set_param -P' correctly" + test_77() { # LU-3445 local server_version=$(lustre_version_code $SINGLEMDS) - - [[ $server_version -ge $(version_code 2.2.60) ]] && - [[ $server_version -le $(version_code 2.4.0) ]] && - skip "Need MDS version < 2.2.60 or > 2.4.0" && return + [[ $server_version -ge $(version_code 2.8.55) ]] || + { skip "Need MDS version 2.8.55+ "; return; } if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && @@ -5157,7 +5474,9 @@ test_77() { # LU-3445 local fs2ostvdev=$(ostvdevname 1_2) local fsname=test1234 local mgsnid - local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + local failnid="$(h2nettype 1.2.3.4),$(h2nettype 4.3.2.1)" + + combined_mgs_mds || stop_mgs || error "stopping MGS service failed" add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \ --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed" @@ -5165,9 +5484,9 @@ test_77() { # LU-3445 error "start fs2mds failed" mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) - [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid" + mgsnid="0.0.0.0@tcp,$mgsnid,$mgsnid:$mgsnid" - add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ + add fs2ost --mgsnode=$mgsnid $(mkfs_opts ost1 $fs2ostdev) \ --failnode=$failnid --fsname=$fsname \ --reformat $fs2ostdev $fs2ostvdev || error "add fs2ost failed" @@ -5183,7 +5502,7 @@ run_test 77 "comma-separated MGS NIDs and failover node NIDs" test_78() { [[ $(facet_fstype $SINGLEMDS) != ldiskfs || $(facet_fstype ost1) != ldiskfs ]] && - skip "only applicable to ldiskfs-based MDTs and OSTs" && return + skip "ldiskfs only test" && return # reformat the Lustre filesystem with a smaller size local saved_MDSCOUNT=$MDSCOUNT @@ -5442,7 +5761,7 @@ restore_ostindex() { test_81() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; } + [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } stopall @@ -5487,19 +5806,6 @@ test_81() { # LU-4665 } run_test 81 "sparse OST indexing" -# Wait OSTs to be active on both client and MDT side. -wait_osts_up() { - local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update $HOSTNAME "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on client failed" - - cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u | - awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" - wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT || - error "wait_update OSTs up on MDT failed" -} - # Here we exercise the stripe placement functionality on a file system that # has formatted the OST with a random index. With the file system the following # functionality is tested: @@ -5517,7 +5823,7 @@ wait_osts_up() { test_82a() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; } + [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } stopall @@ -5615,6 +5921,9 @@ cleanup_82b() { # Remove OSTs from a pool and destroy the pool. destroy_pool $ost_pool || true + if ! combined_mgs_mds ; then + umount_mgs_client + fi restore_ostindex } @@ -5625,7 +5934,7 @@ cleanup_82b() { test_82b() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 4 ]] || { skip_env "Need at least 4 OSTs" && return; } + [[ $OSTCOUNT -ge 4 ]] || { skip_env "needs >= 4 OSTs" && return; } stopall @@ -5654,6 +5963,10 @@ test_82b() { # LU-4665 done mount_client $MOUNT || error "mount client $MOUNT failed" + if ! combined_mgs_mds ; then + mount_mgs_client + fi + wait_osts_up $LFS df $MOUNT || error "$LFS df $MOUNT failed" mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" @@ -5684,7 +5997,8 @@ test_82b() { # LU-4665 wait_update $HOSTNAME "$LCTL get_param -n lov.$FSNAME-*.pools.$TESTNAME| sort -u | tr '\n' ' ' " "$ost_targets_uuid" || error "wait_update $ost_pool failed" - pool_list $ost_pool || error "list OST pool $ost_pool failed" + wait_update_facet $SINGLEMDS "$LCTL pool_list $ost_pool | wc -l" 4 || + error "wait_update pool_list $ost_pool failed" # If [--pool|-p ] is set with [--ost-list|-o ], # then the OSTs must be the members of the pool. @@ -5714,8 +6028,8 @@ run_test 82b "specify OSTs for file with --pool and --ost-list options" test_83() { [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] || { skip "Need OST version at least 2.6.91" && return 0; } - if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + if [ $(facet_fstype ost1) != ldiskfs ]; then + skip "ldiskfs only test" return fi @@ -5731,7 +6045,7 @@ test_83() { # Mount the OST as an ldiskfs filesystem. log "mount the OST $dev as a $fstype filesystem" add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \ - --reformat $dev $dev > /dev/null || + --reformat $dev > /dev/null || error "format ost1 error" if ! test -b $dev; then @@ -5839,9 +6153,21 @@ test_85() { } run_test 85 "osd_ost init: fail ea_fid_set" +cleanup_86() { + trap 0 + + # ost1 has already registered to the MGS before the reformat. + # So after reformatting it with option "-G", it could not be + # mounted to the MGS. Cleanup the system for subsequent tests. + reformat_and_config +} + test_86() { + local server_version=$(lustre_version_code $SINGLEMDS) [ "$(facet_fstype ost1)" = "zfs" ] && skip "LU-6442: no such mkfs params for ZFS OSTs" && return + [[ $server_version -ge $(version_code 2.7.56) ]] || + { skip "Need server version newer than 2.7.55"; return 0; } local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \ --reformat $(ostdevname 1) $(ostvdevname 1)" @@ -5862,6 +6188,9 @@ test_86() { echo "params: $opts" + trap cleanup_86 EXIT ERR + + stopall add ost1 $opts || error "add ost1 failed with new params" local FOUNDSIZE=$(do_facet ost1 "$DEBUGFS -c -R stats $(ostdevname 1)" | @@ -5869,16 +6198,17 @@ test_86() { [[ $FOUNDSIZE == $NEWSIZE ]] || error "Flex block group size: $FOUNDSIZE, expected: $NEWSIZE" - return 0 + + cleanup_86 } run_test 86 "Replacing mkfs.lustre -G option" test_87() { #LU-6544 - [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.7.56) ]] || - { skip "Need MDS version at least 2.7.56" && return; } + [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] || + { skip "Need MDS version at least 2.9.51" && return; } [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && - { skip "Only applicable to ldiskfs-based MDTs" && return; } - [[ $OSTCOUNT -gt 69 ]] && + { skip "ldiskfs only test" && return; } + [[ $OSTCOUNT -gt 59 ]] && { skip "Ignore wide striping situation" && return; } local mdsdev=$(mdsdevname 1) @@ -5886,23 +6216,22 @@ test_87() { #LU-6544 local file=$DIR/$tfile local mntpt=$(facet_mntpt $SINGLEMDS) local used_xattr_blk=0 - local inode_size=${1:-512} + local inode_size=${1:-1024} local left_size=0 local xtest="trusted.test" local value local orig local i + local stripe_cnt=$(($OSTCOUNT + 2)) - #Please see LU-6544 for MDT inode size calculation - if [ $OSTCOUNT -gt 26 ]; then + #Please see ldiskfs_make_lustre() for MDT inode size calculation + if [ $stripe_cnt -gt 16 ]; then inode_size=2048 - elif [ $OSTCOUNT -gt 5 ]; then - inode_size=1024 fi left_size=$(expr $inode_size - \ 156 - \ 32 - \ - 32 - $OSTCOUNT \* 24 - 16 - 3 - \ + 32 - 40 \* 3 - 32 \* 3 - $stripe_cnt \* 24 - 16 - 3 - \ 24 - 16 - 3 - \ 24 - 18 - $(expr length $tfile) - 16 - 4) if [ $left_size -le 0 ]; then @@ -5916,7 +6245,7 @@ test_87() { #LU-6544 unload_modules reformat - add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$OSTCOUNT \ + add mds1 $(mkfs_opts mds1 ${mdsdev}) --stripe-count-hint=$stripe_cnt \ --reformat $mdsdev $mdsvdev || error "add mds1 failed" start_mdt 1 > /dev/null || error "start mdt1 failed" for i in $(seq $OSTCOUNT); do @@ -5927,9 +6256,10 @@ test_87() { #LU-6544 check_mount || error "check client $MOUNT failed" #set xattr - $SETSTRIPE -c -1 $file || error "$SETSTRIPE -c -1 $file failed" - $GETSTRIPE $file || error "$GETSTRIPE $file failed" - i=$($GETSTRIPE -c $file) + $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || + error "Create file with 3 components failed" + $TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed" + i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed" if [ $i -ne $OSTCOUNT ]; then left_size=$(expr $left_size + $(expr $OSTCOUNT - $i) \* 24) echo -n "Since only $i out $OSTCOUNT OSTs are used, " @@ -5981,6 +6311,81 @@ test_88() { } run_test 88 "check the default mount options can be overridden" +test_89() { # LU-7131 + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] || + { skip "Need MDT version at least 2.9.54" && return 0; } + + local key=failover.node + local val1=192.0.2.254@tcp0 # Reserved IPs, see RFC 5735 + local val2=192.0.2.255@tcp0 + local mdsdev=$(mdsdevname 1) + local params + + stopall + + [ $(facet_fstype mds1) == zfs ] && import_zpool mds1 + # Check that parameters are added correctly + echo "tunefs --param $key=$val1" + do_facet mds "$TUNEFS --param $key=$val1 $mdsdev >/dev/null" || + error "tunefs --param $key=$val1 failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=$val1) = "1" ] || + error "on-disk parameter not added correctly via tunefs" + + # Check that parameters replace existing instances when added + echo "tunefs --param $key=$val2" + do_facet mds "$TUNEFS --param $key=$val2 $mdsdev >/dev/null" || + error "tunefs --param $key=$val2 failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "1" ] || + error "on-disk parameter not replaced via tunefs" + [ $(echo $params | tr ' ' '\n' | grep -c $key=$val2) = "1" ] || + error "on-disk parameter not replaced correctly via tunefs" + + # Check that a parameter is erased properly + echo "tunefs --erase-param $key" + do_facet mds "$TUNEFS --erase-param $key $mdsdev >/dev/null" || + error "tunefs --erase-param $key failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n' | grep -c $key=) = "0" ] || + error "on-disk parameter not erased correctly via tunefs" + + # Check that all the parameters are erased + echo "tunefs --erase-params" + do_facet mds "$TUNEFS --erase-params $mdsdev >/dev/null" || + error "tunefs --erase-params failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ -z $params ] || + error "all on-disk parameters not erased correctly via tunefs" + + # Check the order of options --erase-params and --param + echo "tunefs --param $key=$val1 --erase-params" + do_facet mds \ + "$TUNEFS --param $key=$val1 --erase-params $mdsdev >/dev/null"|| + error "tunefs --param $key=$val1 --erase-params failed" + params=$(do_facet mds $TUNEFS --dryrun $mdsdev) || + error "tunefs --dryrun failed" + params=${params##*Parameters:} + params=${params%%exiting*} + [ $(echo $params | tr ' ' '\n') == "$key=$val1" ] || + error "on-disk param not added correctly with --erase-params" + + reformat +} +run_test 89 "check tunefs --param and --erase-param{s} options" + # $1 test directory # $2 (optional) value of max_mod_rpcs_in_flight to set check_max_mod_rpcs_in_flight() { @@ -6341,10 +6746,21 @@ run_test 91 "evict-by-nid support" generate_ldev_conf() { # generate an ldev.conf file local ldevconfpath=$1 + local fstype= + local fsldevformat="" touch $ldevconfpath - printf "%s\t-\t%s-MGS0000\t%s\n" \ + + fstype=$(facet_fstype mgs) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t-\t%s-MGS0000\t%s%s\n" \ $mgs_HOST \ $FSNAME \ + $fsldevformat \ $(mgsdevname) > $ldevconfpath local mdsfo_host=$mdsfailover_HOST; @@ -6353,11 +6769,19 @@ generate_ldev_conf() { fi for num in $(seq $MDSCOUNT); do - printf "%s\t%s\t%s-MDT%04d\t%s\n" \ + fstype=$(facet_fstype mds$num) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t%s\t%s-MDT%04d\t%s%s\n" \ $mds_HOST \ $mdsfo_host \ $FSNAME \ $num \ + $fsldevformat \ $(mdsdevname $num) >> $ldevconfpath done @@ -6367,11 +6791,19 @@ generate_ldev_conf() { fi for num in $(seq $OSTCOUNT); do - printf "%s\t%s\t%s-OST%04d\t%s\n" \ + fstype=$(facet_fstype ost$num) + if [ "$fstype" == "zfs" ]; then + fsldevformat="$fstype:" + else + fsldevformat="" + fi + + printf "%s\t%s\t%s-OST%04d\t%s%s\n" \ $ost_HOST \ $ostfo_host \ $FSNAME \ $num \ + $fsldevformat \ $(ostdevname $num) >> $ldevconfpath done @@ -6613,6 +7045,550 @@ test_95() { } run_test 95 "ldev should only allow one label filter" +test_96() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + local LDEV_OUTPUT=$TMP/ldev-output.txt + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -H $mgs_HOST \ + echo %H-%b | \ + awk '{print $2}' > $LDEV_OUTPUT + + # ldev failed, error + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT + error "ldev failed to execute!" + fi + + # expected output + local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt + + echo "$mgs_HOST-$(facet_fstype mgs)" > $EXPECTED_OUTPUT + + if [ "$mgs_HOST" == "$mds_HOST" ]; then + for num in $(seq $MDSCOUNT); do + echo "$mds_HOST-$(facet_fstype mds$num)" \ + >> $EXPECTED_OUTPUT + done + fi + + if [ "$mgs_HOST" == "$ost_HOST" ]; then + for num in $(seq $OSTCOUNT); do + echo "$ost_HOST-$(facet_fstype ost$num)" \ + >> $EXPECTED_OUTPUT + done + fi + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output!" + fi + + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT +} +run_test 96 "ldev returns hostname and backend fs correctly in command sub" + +test_97() { + if [ -z "$LDEV" ]; then + error "ldev is missing!" + fi + + local LDEVCONFPATH=$TMP/ldev.conf + local NIDSPATH=$TMP/nids + + generate_ldev_conf $LDEVCONFPATH + generate_nids $NIDSPATH + + local LDEV_OUTPUT=$TMP/ldev-output.txt + local EXPECTED_OUTPUT=$TMP/ldev-expected-output.txt + + echo -e "\nMDT role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mdt > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT + error "ldev failed to execute for mdt role!" + fi + + for num in $(seq $MDSCOUNT); do + printf "%s-MDT%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for mdt role!" + fi + + echo -e "\nOST role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R ost > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT + error "ldev failed to execute for ost role!" + fi + + rm $EXPECTED_OUTPUT + for num in $(seq $OSTCOUNT); do + printf "%s-OST%04d\n" $FSNAME $num >> $EXPECTED_OUTPUT + done + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for ost role!" + fi + + echo -e "\nMGS role" + $LDEV -c $LDEVCONFPATH -n $NIDSPATH -F $FSNAME -R mgs > $LDEV_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $LDEV_OUTPUT $EXPECTED_OUTPUT + error "ldev failed to execute for mgs role!" + fi + + printf "%s-MGS0000\n" $FSNAME > $EXPECTED_OUTPUT + + compare_ldev_output $LDEV_OUTPUT $EXPECTED_OUTPUT + + if [ $? -ne 0 ]; then + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT + error "ldev failed to produce the correct output for mgs role!" + fi + + rm $LDEVCONFPATH $NIDSPATH $EXPECTED_OUTPUT $LDEV_OUTPUT +} +run_test 97 "ldev returns correct ouput when querying based on role" + +test_98() +{ + local mountopt + local temp=$MDS_MOUNT_OPTS + + setup + check_mount || error "mount failed" + mountopt="user_xattr" + for ((x = 1; x <= 400; x++)); do + mountopt="$mountopt,user_xattr" + done + remount_client $mountopt $MOUNT 2>&1 | grep "too long" || + error "Buffer overflow check failed" + cleanup || error "cleanup failed" +} +run_test 98 "Buffer-overflow check while parsing mount_opts" + +test_99() +{ + [[ $(facet_fstype ost1) != ldiskfs ]] && + { skip "ldiskfs only test" && return; } + [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] || + { skip "Need OST version at least 2.8.57" && return 0; } + + local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" && + skip "meta_bg already set" && return + + local opts=ost_opts + if [[ ${!opts} != *mkfsoptions* ]]; then + eval opts=\"${!opts} \ + --mkfsoptions='\\\"-O ^resize_inode,meta_bg\\\"'\" + else + local val=${!opts//--mkfsoptions=\\\"/ \ + --mkfsoptions=\\\"-O ^resize_inode,meta_bg } + eval opts='${val}' + fi + + echo "params: $opts" + + add ost1 $opts || error "add ost1 failed with new params" + + do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" || + error "meta_bg is not set" + + reformat +} +run_test 99 "Adding meta_bg option" + +test_100() { + reformat + start_mds || error "MDS start failed" + start_ost || error "unable to start OST" + mount_client $MOUNT || error "client start failed" + check_mount || error "check_mount failed" + + # Desired output + # MGS: + # 0@lo + # lustre-MDT0000: + # 0@lo + # lustre-OST0000: + # 0@lo + do_facet mgs 'lshowmount -v' | awk 'BEGIN {NR == 0; rc=1} /MGS:/ {rc=0} + END {exit rc}' || error "lshowmount have no output MGS" + + do_facet mds1 'lshowmount -v' | awk 'BEGIN {NR == 2; rc=1} /-MDT0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output MDT0" + + do_facet ost1 'lshowmount -v' | awk 'BEGIN {NR == 4; rc=1} /-OST0000:/ + {rc=0} END {exit rc}' || error "lshowmount have no output OST0" + + cleanup || error "cleanup failed with $?" +} +run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo" + +test_101() { + local createmany_oid + local dev=$FSNAME-OST0000-osc-MDT0000 + setup + + createmany -o $DIR1/$tfile-%d 50000 & + createmany_oid=$! + # MDT->OST reconnection causes MDT<->OST last_id synchornisation + # via osp_precreate_cleanup_orphans. + for ((i = 0; i < 100; i++)); do + for ((k = 0; k < 10; k++)); do + do_facet $SINGLEMDS "$LCTL --device $dev deactivate;" \ + "$LCTL --device $dev activate" + done + + ls -asl $MOUNT | grep '???' && + (kill -9 $createmany_oid &>/dev/null; \ + error "File hasn't object on OST") + + kill -s 0 $createmany_oid || break + done + wait $createmany_oid + cleanup +} +run_test 101 "Race MDT->OST reconnection with create" + +test_102() { + cleanup || error "cleanup failed with $?" + + local mds1dev=$(mdsdevname 1) + local mds1mnt=$(facet_mntpt mds1) + local mds1fstype=$(facet_fstype mds1) + local mds1opts=$MDS_MOUNT_OPTS + + if [ $mds1fstype == ldiskfs ] && + ! do_facet mds1 test -b $mds1dev; then + mds1opts=$(csa_add "$mds1opts" -o loop) + fi + if [[ $mds1fstype == zfs ]]; then + import_zpool mds1 || return ${PIPESTATUS[0]} + fi + + # unload all and only load libcfs to allow fail_loc setting + do_facet mds1 lustre_rmmod || error "unable to unload modules" + do_facet mds1 modprobe libcfs || error "libcfs not loaded" + do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded" + + #define OBD_FAIL_OBDCLASS_MODULE_LOAD 0x60a + do_facet mds1 "$LCTL set_param fail_loc=0x8000060a" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts && + error "mdt start must fail" + do_facet mds1 lsmod \| grep obdclass && error "obdclass must not load" + + do_facet mds1 "$LCTL set_param fail_loc=0x0" + + do_facet mds1 $MOUNT_CMD $mds1dev $mds1mnt $mds1opts || + error "mdt start must not fail" + + cleanup || error "cleanup failed with $?" +} +run_test 102 "obdclass module cleanup upon error" + +test_renamefs() { + local newname=$1 + + echo "rename $FSNAME to $newname" + + if ! combined_mgs_mds ; then + local facet=$(mgsdevname) + + do_facet mgs \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(7) Fail to rename MGS" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mgs $newname-mgs + fi + fi + + for num in $(seq $MDSCOUNT); do + local facet=$(mdsdevname $num) + + do_facet mds${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(8) Fail to rename MDT $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool mds${num} $newname-mdt${num} + fi + done + + for num in $(seq $OSTCOUNT); do + local facet=$(ostdevname $num) + + do_facet ost${num} \ + "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"|| + error "(9) Fail to rename OST $num" + if [ "$(facet_fstype $facet)" = "zfs" ]; then + reimport_zpool ost${num} $newname-ost${num} + fi + done +} + +test_103_set_pool() { + local pname=$1 + local ost_x=$2 + + do_facet mgs $LCTL pool_add $FSNAME.$pname ${FSNAME}-$ost_x || + error "Fail to add $ost_x to $FSNAME.$pname" + wait_update $HOSTNAME \ + "lctl get_param -n lov.$FSNAME-clilov-*.pools.$pname | + grep $ost_x" "$FSNAME-${ost_x}_UUID" || + error "$ost_x is NOT in pool $FSNAME.$pname" +} + +test_103_check_pool() { + local save_fsname=$1 + local errno=$2 + + stat $DIR/$tdir/test-framework.sh || + error "($errno) Fail to stat" + do_facet mgs $LCTL pool_list $FSNAME.pool1 || + error "($errno) Fail to list $FSNAME.pool1" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname || + error "($errno) Fail to list $FSNAME.$save_fsname" + do_facet mgs $LCTL pool_list $FSNAME.$save_fsname | + grep ${FSNAME}-OST0000 || + error "($errno) List $FSNAME.$save_fsname is invalid" + + local pname=$($LFS getstripe --pool $DIR/$tdir/d0) + [ "$pname" = "$save_fsname" ] || + error "($errno) Unexpected pool name $pname" +} + +test_103() { + check_mount_and_prep + rm -rf $DIR/$tdir + mkdir $DIR/$tdir || error "(1) Fail to mkdir $DIR/$tdir" + cp $LUSTRE/tests/test-framework.sh $DIR/$tdir || + error "(2) Fail to copy test-framework.sh" + + if ! combined_mgs_mds ; then + mount_mgs_client + fi + do_facet mgs $LCTL pool_new $FSNAME.pool1 || + error "(3) Fail to create $FSNAME.pool1" + # name the pool name as the fsname + do_facet mgs $LCTL pool_new $FSNAME.$FSNAME || + error "(4) Fail to create $FSNAME.$FSNAME" + + test_103_set_pool $FSNAME OST0000 + + $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 || + error "(6) Fail to setstripe on $DIR/$tdir/d0" + + if ! combined_mgs_mds ; then + umount_mgs_client + fi + KEEP_ZPOOL=true + stopall + + test_renamefs mylustre + + local save_fsname=$FSNAME + FSNAME="mylustre" + setupall + + if ! combined_mgs_mds ; then + mount_mgs_client + fi + test_103_check_pool $save_fsname 7 + + if [ $OSTCOUNT -ge 2 ]; then + test_103_set_pool $save_fsname OST0001 + fi + + $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 || + error "(16) Fail to setstripe on $DIR/$tdir/f0" + if ! combined_mgs_mds ; then + umount_mgs_client + fi + + stopall + + test_renamefs tfs + + FSNAME="tfs" + setupall + + if ! combined_mgs_mds ; then + mount_mgs_client + fi + test_103_check_pool $save_fsname 17 + + if ! combined_mgs_mds ; then + umount_mgs_client + fi + stopall + + test_renamefs $save_fsname + + FSNAME=$save_fsname + setupall + KEEP_ZPOOL=false +} +run_test 103 "rename filesystem name" + +test_104() { # LU-6952 + local mds_mountopts=$MDS_MOUNT_OPTS + local ost_mountopts=$OST_MOUNT_OPTS + local mds_mountfsopts=$MDS_MOUNT_FS_OPTS + local lctl_ver=$(do_facet $SINGLEMDS $LCTL --version | + awk '{ print $2 }') + + [[ $(version_code $lctl_ver) -lt $(version_code 2.9.55) ]] && + { skip "this test needs utils above 2.9.55" && return 0; } + + # specify "acl" in mount options used by mkfs.lustre + if [ -z "$MDS_MOUNT_FS_OPTS" ]; then + MDS_MOUNT_FS_OPTS="acl,user_xattr" + else + + MDS_MOUNT_FS_OPTS="${MDS_MOUNT_FS_OPTS},acl,user_xattr" + fi + + echo "mountfsopt: $MDS_MOUNT_FS_OPTS" + + #reformat/remount the MDT to apply the MDT_MOUNT_FS_OPT options + formatall + if [ -z "$MDS_MOUNT_OPTS" ]; then + MDS_MOUNT_OPTS="-o noacl" + else + MDS_MOUNT_OPTS="${MDS_MOUNT_OPTS},noacl" + fi + + for num in $(seq $MDSCOUNT); do + start mds$num $(mdsdevname $num) $MDS_MOUNT_OPTS || + error "Failed to start MDS" + done + + for num in $(seq $OSTCOUNT); do + start ost$num $(ostdevname $num) $OST_MOUNT_OPTS || + error "Failed to start OST" + done + + mount_client $MOUNT + setfacl -m "d:$RUNAS_ID:rwx" $MOUNT && + error "ACL is applied when FS is mounted with noacl." + + MDS_MOUNT_OPTS=$mds_mountopts + OST_MOUNT_OPTS=$ost_mountopts + MDS_MOUNT_FS_OPTS=$mds_mountfsopts + + formatall + setupall +} +run_test 104 "Make sure user defined options are reflected in mount" + +error_and_umount() { + umount $TMP/$tdir + rmdir $TMP/$tdir + error $* +} + +test_105() { + cleanup -f + reformat + setup + mkdir -p $TMP/$tdir + mount --bind $DIR $TMP/$tdir || error "mount bind mnt pt failed" + rm -f $TMP/$tdir/$tfile + rm -f $TMP/$tdir/${tfile}1 + + # Files should not be created in ro bind mount point + # remounting from rw to ro + mount -o remount,ro $TMP/$tdir || + error_and_umount "readonly remount of bind mnt pt failed" + touch $TMP/$tdir/$tfile && + error_and_umount "touch succeeds on ro bind mnt pt" + [ -e $TMP/$tdir/$tfile ] && + error_and_umount "file created on ro bind mnt pt" + + # Files should be created in rw bind mount point + # remounting from ro to rw + mount -o remount,rw $TMP/$tdir || + error_and_umount "read-write remount of bind mnt pt failed" + touch $TMP/$tdir/${tfile}1 || + error_and_umount "touch fails on rw bind mnt pt" + [ -e $TMP/$tdir/${tfile}1 ] || + error_and_umount "file not created on rw bind mnt pt" + umount $TMP/$tdir || error "umount of bind mnt pt failed" + rmdir $TMP/$tdir + cleanup || error "cleanup failed with $?" +} +run_test 105 "check file creation for ro and rw bind mnt pt" + +test_106() { + local repeat=5 + + reformat + setupall + mkdir -p $DIR/$tdir || error "create $tdir failed" + lfs setstripe -c 1 -i 0 $DIR/$tdir +#define OBD_FAIL_CAT_RECORDS 0x1312 + do_facet mds1 $LCTL set_param fail_loc=0x1312 fail_val=$repeat + + for ((i = 1; i <= $repeat; i++)); do + + #one full plain llog + createmany -o $DIR/$tdir/f- 64768 + + createmany -u $DIR/$tdir/f- 64768 + done + wait_delete_completed $((TIMEOUT * 7)) +#ASSERTION osp_sync_thread() ( thread->t_flags != SVC_RUNNING ) failed +#shows that osp code is buggy + do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0 + + cleanupall +} +run_test 106 "check osp llog processing when catalog is wrapped" + +test_107() { + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] || + { skip "Need MDS version > 2.10.50"; return; } + + start_mgsmds || error "start_mgsmds failed" + start_ost || error "unable to start OST" + + # add unknown configuration parameter. + local PARAM="$FSNAME-OST0000.ost.unknown_param=50" + do_facet mgs "$LCTL conf_param $PARAM" + cleanup_nocli || error "cleanup_nocli failed with $?" + load_modules + + # unknown param should be ignored while mounting. + start_ost || error "unable to start OST after unknown param set" + + cleanup || error "cleanup failed with $?" +} +run_test 107 "Unknown config param should not fail target mounting" + if ! combined_mgs_mds ; then stop mgs fi