X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=cb2daaf0b1c0d8a4fdefad070bb40a82f6e448c5;hp=a251e2345a54fa286b4a92526765c8c4581be368;hb=eb65c3a586f1efd425f8360972b5d365cfecf7e1;hpb=75eb91aeabcd167fe586e5e0f707cee5e8966133 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index a251e23..cb2daaf 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -8,25 +8,6 @@ ONLY=${ONLY:-"$*"} ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 101" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! -is_sles11() # LU-2181 -{ - if [ -r /etc/SuSE-release ] - then - local vers=$(grep VERSION /etc/SuSE-release | awk '{print $3}') - local patchlev=$(grep PATCHLEVEL /etc/SuSE-release | - awk '{ print $3 }') - if [ $vers -eq 11 ] && [ $patchlev -eq 2 ] - then - return 0 - fi - fi - return 1 -} - -if is_sles11; then # LU-2181 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b" -fi - if [ "$FAILURE_MODE" = "HARD" ]; then CONFIG_EXCEPTIONS="24a " && echo "Except the tests: $CONFIG_EXCEPTIONS for " \ @@ -68,8 +49,12 @@ OSTDEV1_2=$fs2ost_DEV OSTDEV2_2=$fs3ost_DEV if ! combined_mgs_mds; then - # bug number for skipped test: 23954 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b" + # bug number for skipped test: LU-9860 LU-9860 LU-9860 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 43b 53b 54b" + # bug number for skipped test: LU-9875 LU-9879 LU-9879 LU-9879 LU-9879 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 70e 80 84 87 100" + # bug number for skipped test: LU-8110 LU-9879 LU-9879 LU-9879 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 102 104 105 107" fi # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time @@ -125,8 +110,8 @@ reformat() { } start_mgs () { - echo "start mgs" - start mgs $(mgsdevname) $MGS_MOUNT_OPTS + echo "start mgs service on $(facet_active_host mgs)" + start mgs $(mgsdevname) $MGS_MOUNT_OPTS $@ } start_mdt() { @@ -255,7 +240,9 @@ cleanup_nocli() { } cleanup() { - umount_client $MOUNT || return 200 + local force="" + [ "x$1" != "x" ] && force='-f' + umount_client $MOUNT $force|| return 200 cleanup_nocli || return $? } @@ -487,7 +474,7 @@ run_test 5e "delayed connect, don't crash (bug 10268)" test_5f() { if combined_mgs_mds ; then - skip "combined mgs and mds" + skip "needs separate mgs and mds" return 0 fi @@ -608,7 +595,7 @@ is_blkdev () { test_17() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -637,7 +624,7 @@ run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should ret test_18() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -728,7 +715,7 @@ test_20() { [ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly" remount_client rw $MOUNT || error "remount_client with rw failed" touch $DIR/$tfile || error "touch $DIR/$tfile failed" - MCNT=$(grep -c $MOUNT /etc/mtab) + MCNT=$(grep -c $MOUNT' ' /etc/mtab) [ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times" umount_client $MOUNT stop_mds || error "Unable to stop MDS" @@ -960,9 +947,10 @@ test_24a() { local fs2ostdev=$(ostdevname 1_2) local fs2mdsvdev=$(mdsvdevname 1_2) local fs2ostvdev=$(ostvdevname 1_2) + local cl_user - # test 8-char fsname as well - local FSNAME2=test1234 + # LU-9733 test fsname started with numbers as well + local FSNAME2=969362ae add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \ --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10 @@ -975,6 +963,15 @@ test_24a() { start fs2ost $fs2ostdev $OST_MOUNT_OPTS mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed" $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed" + + # LU-9733 test fsname started with numbers + cl_user=$(do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \ + changelog_register -n) || + error "register changelog failed" + + do_facet $SINGLEMDS lctl --device $FSNAME2-MDT0000 \ + changelog_deregister $cl_user || + error "deregister changelog failed" # 1 still works check_mount || error "check_mount failed" # files written on 1 should not show up on 2 @@ -1085,7 +1082,7 @@ test_27b() { } run_test 27b "Reacquire MGS lock after failover" -test_28() { +test_28A() { # was test_28 setup TEST="$LCTL get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb" PARAM="$FSNAME.llite.max_read_ahead_whole_mb" @@ -1108,7 +1105,7 @@ test_28() { error "third set_conf_param_and_check client failed" cleanup || error "cleanup failed with rc $?" } -run_test 28 "permanent parameter setting" +run_test 28A "permanent parameter setting" test_28a() { # LU-4221 [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] || @@ -1183,7 +1180,7 @@ test_28a() { # LU-4221 run_test 28a "set symlink parameters permanently with conf_param" test_29() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return setup > /dev/null 2>&1 start_ost2 || error "Unable to start OST2" sleep 10 @@ -1677,6 +1674,7 @@ t32_test() { local tarball=$1 local writeconf=$2 local dne_upgrade=${dne_upgrade:-"no"} + local dom_upgrade=${dom_upgrade:-"no"} local ff_convert=${ff_convert:-"no"} local shall_cleanup_mdt=false local shall_cleanup_mdt1=false @@ -2013,11 +2011,6 @@ t32_test() { shall_cleanup_lustre=true $r $LCTL set_param debug="$PTLDEBUG" - t32_verify_quota $node $fsname $tmp/mnt/lustre || { - error_noexit "verify quota failed" - return 1 - } - if $r test -f $tmp/list; then # # There is not a Test Framework API to copy files to or @@ -2069,6 +2062,43 @@ t32_test() { echo "list verification skipped" fi + if [ "$dom_upgrade" != "no" ]; then + echo "Check DoM file can be created" + $LFS setstripe -E 1M -L mdt -E EOF $tmp/mnt/lustre/dom || { + error_noexit "Verify DoM creation" + return 1 + } + [ $($LFS getstripe -L $tmp/mnt/lustre/dom) == "mdt" ] || { + error_noexit "Verify a DoM file" + return 1 + } + dd if=/dev/urandom of=$tmp/mnt/lustre/dom bs=4096 \ + count=1 conv=fsync || { + error_noexit "Cannot write to DoM file" + return 1 + } + [ $(stat -c%s $tmp/mnt/lustre/dom) == 4096 ] || { + error_noexit "DoM: bad size after write" + return 1 + } + rm $tmp/mnt/lustre/dom + + $r $LCTL get_param -n lod.*MDT0000*.dom_stripesize || { + error_noexit "Getting \"dom_stripesize\"" + return 1 + } + $r $LCTL conf_param \ + $fsname-MDT0000.lod.dom_stripesize=0 || { + error_noexit "Changing \"dom_stripesize\"" + return 1 + } + wait_update $(facet_host mds) "$LCTL get_param \ + -n lod.*MDT0000*.dom_stripesize" 0 || { + error_noexit "Verifying \"dom_stripesize\"" + return 1 + } + fi + if [ "$dne_upgrade" != "no" ]; then $LFS mkdir -i 1 -c2 $tmp/mnt/lustre/striped_dir || { error_noexit "set striped dir failed" @@ -2210,7 +2240,7 @@ t32_test() { mdt_index=$($LFS getdirstripe -i $dir) stripe_cnt=$($LFS getdirstripe -c $dir) if [ $mdt_index = 0 -a $stripe_cnt -le 1 ]; then - $LFS mv -M 1 $dir || { + $LFS migrate -m 1 $dir || { popd error_noexit "migrate MDT1 failed" return 1 @@ -2222,7 +2252,7 @@ t32_test() { mdt_index=$($LFS getdirstripe -i $dir) stripe_cnt=$($LFS getdirstripe -c $dir) if [ $mdt_index = 1 -a $stripe_cnt -le 1 ]; then - $LFS mv -M 0 $dir || { + $LFS migrate -m 0 $dir || { popd error_noexit "migrate MDT0 failed" return 1 @@ -2373,6 +2403,24 @@ test_32d() { } run_test 32d "convert ff test" +test_32e() { + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.56) ]] || + { skip "Need MDS version at least 2.10.56"; return 0; } + + local tarballs + local tarball + local rc=0 + + t32_check + for tarball in $tarballs; do + echo $tarball | grep "2_9" || continue + #load_modules + dom_upgrade=yes t32_test $tarball writeconf || let "rc += $?" + done + return $rc +} +run_test 32e "dom upgrade test" + test_33a() { # bug 12333, was test_33 local FSNAME2=test-123 local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) @@ -2397,15 +2445,20 @@ test_33a() { # bug 12333, was test_33 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931. fi - add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \ - --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10 + if combined_mgs_mds; then + local mgs_flag="--mgs" + fi + + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --fsname=${FSNAME2} \ + --reformat $mgs_flag $mkfsoptions $fs2mdsdev $fs2mdsvdev || + exit 10 add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \ --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \ $fs2ostvdev || exit 10 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS - do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || + do_facet mgs "$LCTL conf_param $FSNAME2.sys.timeout=200" || error "$LCTL conf_param $FSNAME2.sys.timeout=200 failed" mkdir -p $MOUNT2 || error "mkdir $MOUNT2 failed" $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || error "$MOUNT_CMD failed" @@ -2607,7 +2660,7 @@ test_35b() { # bug 18674 run_test 35b "Continue reconnection retries, if the active server is busy" test_36() { # 12743 - [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return + [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || { skip "remote OST" && return 0; } @@ -2696,7 +2749,7 @@ test_37() { local rc=0 if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Currently only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -2880,7 +2933,7 @@ test_41b() { echo "blah blah" > $MOUNT/$tfile cat $MOUNT/$tfile || error "cat $MOUNT/$tfile failed" - umount_client $MOUNT || error "umount_client $MOUNT failed" + umount_client $MOUNT -f || error "umount_client $MOUNT failed" stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" stop_mds || error "Unable to stop MDS on second try" @@ -3674,7 +3727,7 @@ test_50f() { run_test 50f "normal statfs one server in down" test_50g() { - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return setup start_ost2 || error "Unable to start OST2" wait_osc_import_state mds ost2 FULL @@ -3705,7 +3758,7 @@ run_test 50g "deactivated OST should not cause panic" # LU-642 test_50h() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs" && return [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" || @@ -3743,7 +3796,7 @@ run_test 50h "LU-642: activate deactivated OST" test_50i() { # prepare MDT/OST, make OSC inactive for OST1 - [ "$MDSCOUNT" -lt "2" ] && skip_env "$MDSCOUNT < 2, skipping" && return + [ "$MDSCOUNT" -lt "2" ] && skip_env "needs >= 2 MDTs" && return load_modules [ $(facet_fstype mds2) == zfs ] && import_zpool mds2 @@ -3876,7 +3929,7 @@ diff_files_xattrs() test_52() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4095,7 +4148,7 @@ run_test 53b "check MDS thread count params" test_54a() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4107,7 +4160,7 @@ run_test 54a "test llverdev and partial verify of device" test_54b() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4125,7 +4178,7 @@ lov_objid_size() test_55() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4393,7 +4446,7 @@ test_60() { # LU-471 local num if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4503,7 +4556,7 @@ run_test 61 "large xattr" test_62() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4527,7 +4580,7 @@ run_test 62 "start with disabled journal" test_63() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based MDTs" + skip "ldiskfs only test" return fi @@ -4571,7 +4624,7 @@ run_test 64 "check lfs df --lazy " test_65() { # LU-2237 # Currently, the test is only valid for ldiskfs backend [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && - skip "non-ldiskfs backend" && return + skip "ldiskfs only test" && return local devname=$(mdsdevname ${SINGLEMDS//mds/}) local brpt=$(facet_mntpt brpt) @@ -4714,7 +4767,7 @@ test_67() { #LU-2950 lctl --net tcp4 add_route 10.3.3.4@tcp 1 3 VERIFY_LNET_CONFIG - lustre_routes_conversion $legacy $new > /dev/null + $LUSTRE_ROUTES_CONVERSION $legacy $new > /dev/null if [ -f $new ]; then # verify the conversion output cmp -s $new $verify > /dev/null @@ -5012,6 +5065,7 @@ test_70e() { soc=$(do_facet mds1 "$LCTL get_param -n \ mdt.*MDT0000.sync_lock_cancel") [ $soc == "never" ] || error "SoC enabled on single MDS" + umount_client $MOUNT -f > /dev/null cleanup || error "cleanup failed with $?" } @@ -5436,7 +5490,7 @@ run_test 77 "comma-separated MGS NIDs and failover node NIDs" test_78() { [[ $(facet_fstype $SINGLEMDS) != ldiskfs || $(facet_fstype ost1) != ldiskfs ]] && - skip "only applicable to ldiskfs-based MDTs and OSTs" && return + skip "ldiskfs only test" && return # reformat the Lustre filesystem with a smaller size local saved_MDSCOUNT=$MDSCOUNT @@ -5695,7 +5749,7 @@ restore_ostindex() { test_81() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; } + [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } stopall @@ -5757,7 +5811,7 @@ run_test 81 "sparse OST indexing" test_82a() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 3 ]] || { skip_env "Need at least 3 OSTs" && return; } + [[ $OSTCOUNT -ge 3 ]] || { skip_env "needs >= 3 OSTs" && return; } stopall @@ -5855,6 +5909,9 @@ cleanup_82b() { # Remove OSTs from a pool and destroy the pool. destroy_pool $ost_pool || true + if ! combined_mgs_mds ; then + umount_mgs_client + fi restore_ostindex } @@ -5865,7 +5922,7 @@ cleanup_82b() { test_82b() { # LU-4665 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] || { skip "Need MDS version at least 2.6.54" && return; } - [[ $OSTCOUNT -ge 4 ]] || { skip_env "Need at least 4 OSTs" && return; } + [[ $OSTCOUNT -ge 4 ]] || { skip_env "needs >= 4 OSTs" && return; } stopall @@ -5894,6 +5951,10 @@ test_82b() { # LU-4665 done mount_client $MOUNT || error "mount client $MOUNT failed" + if ! combined_mgs_mds ; then + mount_mgs_client + fi + wait_osts_up $LFS df $MOUNT || error "$LFS df $MOUNT failed" mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed" @@ -5956,7 +6017,7 @@ test_83() { [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] || { skip "Need OST version at least 2.6.91" && return 0; } if [ $(facet_fstype ost1) != ldiskfs ]; then - skip "Only applicable to ldiskfs-based OSTs" + skip "ldiskfs only test" return fi @@ -6134,7 +6195,7 @@ test_87() { #LU-6544 [[ $(lustre_version_code $SINGLEMDS1) -ge $(version_code 2.9.51) ]] || { skip "Need MDS version at least 2.9.51" && return; } [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] && - { skip "Only applicable to ldiskfs-based MDTs" && return; } + { skip "ldiskfs only test" && return; } [[ $OSTCOUNT -gt 59 ]] && { skip "Ignore wide striping situation" && return; } @@ -6183,7 +6244,7 @@ test_87() { #LU-6544 check_mount || error "check client $MOUNT failed" #set xattr - $SETSTRIPE -E 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || + $SETSTRIPE -E 1M -S 1M -c 1 -E 64M -c 1 -E -1 -c -1 $file || error "Create file with 3 components failed" $TRUNCATE $file $((1024*1024*64+1)) || error "truncate file failed" i=$($GETSTRIPE -I3 -c $file) || error "get 3rd stripe count failed" @@ -7118,7 +7179,7 @@ run_test 98 "Buffer-overflow check while parsing mount_opts" test_99() { [[ $(facet_fstype ost1) != ldiskfs ]] && - { skip "Only applicable to ldiskfs-based OSTs" && return; } + { skip "ldiskfs only test" && return; } [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] || { skip "Need OST version at least 2.8.57" && return 0; } @@ -7144,7 +7205,7 @@ test_99() do_facet ost1 $DEBUGFS -c -R stats `ostdevname 1` | grep "meta_bg" || error "meta_bg is not set" - return 0 + reformat } run_test 99 "Adding meta_bg option" @@ -7218,7 +7279,7 @@ test_102() { fi # unload all and only load libcfs to allow fail_loc setting - do_facet mds1 lustre_rmmod || error "unable to unload modules" + do_facet mds1 $LUSTRE_RMMOD || error "unable to unload modules" do_facet mds1 modprobe libcfs || error "libcfs not loaded" do_facet mds1 lsmod \| grep libcfs || error "libcfs not loaded" @@ -7243,7 +7304,7 @@ test_renamefs() { echo "rename $FSNAME to $newname" - if [ ! combined_mgs_mds ]; then + if ! combined_mgs_mds ; then local facet=$(mgsdevname) do_facet mgs \ @@ -7315,6 +7376,9 @@ test_103() { cp $LUSTRE/tests/test-framework.sh $DIR/$tdir || error "(2) Fail to copy test-framework.sh" + if ! combined_mgs_mds ; then + mount_mgs_client + fi do_facet mgs $LCTL pool_new $FSNAME.pool1 || error "(3) Fail to create $FSNAME.pool1" # name the pool name as the fsname @@ -7326,6 +7390,9 @@ test_103() { $SETSTRIPE -p $FSNAME $DIR/$tdir/d0 || error "(6) Fail to setstripe on $DIR/$tdir/d0" + if ! combined_mgs_mds ; then + umount_mgs_client + fi KEEP_ZPOOL=true stopall @@ -7335,6 +7402,9 @@ test_103() { FSNAME="mylustre" setupall + if ! combined_mgs_mds ; then + mount_mgs_client + fi test_103_check_pool $save_fsname 7 if [ $OSTCOUNT -ge 2 ]; then @@ -7343,6 +7413,9 @@ test_103() { $SETSTRIPE -p $save_fsname $DIR/$tdir/f0 || error "(16) Fail to setstripe on $DIR/$tdir/f0" + if ! combined_mgs_mds ; then + umount_mgs_client + fi stopall @@ -7351,8 +7424,14 @@ test_103() { FSNAME="tfs" setupall + if ! combined_mgs_mds ; then + mount_mgs_client + fi test_103_check_pool $save_fsname 17 + if ! combined_mgs_mds ; then + umount_mgs_client + fi stopall test_renamefs $save_fsname @@ -7421,7 +7500,7 @@ error_and_umount() { } test_105() { - cleanup + cleanup -f reformat setup mkdir -p $TMP/$tdir @@ -7452,6 +7531,32 @@ test_105() { } run_test 105 "check file creation for ro and rw bind mnt pt" +test_106() { + local repeat=5 + + reformat + setupall + mkdir -p $DIR/$tdir || error "create $tdir failed" + lfs setstripe -c 1 -i 0 $DIR/$tdir +#define OBD_FAIL_CAT_RECORDS 0x1312 + do_facet mds1 $LCTL set_param fail_loc=0x1312 fail_val=$repeat + + for ((i = 1; i <= $repeat; i++)); do + + #one full plain llog + createmany -o $DIR/$tdir/f- 64768 + + createmany -u $DIR/$tdir/f- 64768 + done + wait_delete_completed $((TIMEOUT * 7)) +#ASSERTION osp_sync_thread() ( thread->t_flags != SVC_RUNNING ) failed +#shows that osp code is buggy + do_facet mds1 $LCTL set_param fail_loc=0 fail_val=0 + + cleanupall +} +run_test 106 "check osp llog processing when catalog is wrapped" + test_107() { [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] || { skip "Need MDS version > 2.10.50"; return; } @@ -7472,6 +7577,513 @@ test_107() { } run_test 107 "Unknown config param should not fail target mounting" +t_108_prep() { + local facet + + $rcmd rm -rf $tmp > /dev/null 2>&1 + $rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely" + + for facet in $facets; do + [ $(facet_fstype $SINGLEMDS) = "zfs" ] && + $rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1 + $rcmd mkdir $tmp/mnt/$facet || + error "failed to mkdir $tmp/mnt/$facet" + $rcmd dd if=/dev/zero of=$tmp/images/$facet \ + seek=199 bs=1M count=1 || + error "failed to create $tmp/images/$facet" + done +} + +t_108_mkfs() { + local role=$1 + local idx=$2 + local bkfs=$3 + local mgs=$4 + local facet=${role}$((idx + 1)) + local pool="" + [ $# -eq 5 ] && pool=$5 + + do_facet $SINGLEMDS $MKFS --fsname=lustre --$mgs \ + --$role --index=$idx --replace --backfstype=$bkfs \ + --device-size=200000 --reformat $pool $tmp/images/$facet || + error "failed to mkfs for $facet" +} + +t_108_check() { + echo "mounting client..." + mount -t lustre ${nid}:/lustre $MOUNT || + error "failed to mount lustre" + + echo "check list" + ls -l $MOUNT/local_dir || error "failed to list" + + echo "check truncate && write" + echo "dummmmmmmmmmmmm" > $MOUNT/remote_dir/fsx.c || + error "failed to tuncate & write" + + echo "check create" + touch $MOUNT/foooo || + error "failed to create" + + echo "check read && write && append" + sha1sum $MOUNT/conf-sanity.sh | + awk '{ print $1 }' > $MOUNT/checksum.new || + error "failed to read(1)" + sha1sum $MOUNT/remote_dir/unlinkmany.c | + awk '{ print $1 }' >> $MOUNT/checksum.new || + error "failed to read(2)" + sha1sum $MOUNT/striped_dir/lockahead_test.o | + awk '{ print $1 }' >> $MOUNT/checksum.new || + error "failed to read(3)" + + echo "verify data" + diff $MOUNT/checksum.new $MOUNT/checksum.src || + error "failed to verify data" + + echo "done." +} + +t_108_cleanup() { + trap 0 + local facet + + echo "cleanup..." + umount -f $MOUNT || error "failed to umount client" + for facet in $facets; do + $rcmd umount -f $tmp/mnt/$facet || + error "failed to umount $facet" + if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then + $rcmd $ZPOOL export -f lustre-$facet || + error "failed to export lustre-$facet" + fi + done + + $rcmd rm -rf $tmp || error "failed to rm the dir $tmp" +} + +test_108a() { + [ "$CLIENTONLY" ] && skip "Client-only testing" && return + + [ $(facet_fstype $SINGLEMDS) != "zfs" ] && + skip "zfs only test" && return + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] && + skip "Need server version at least 2.10.58" && return + + stopall + load_modules + + local tmp=$TMP/$tdir + local rcmd="do_facet $SINGLEMDS" + local facets="mdt1 mdt2 ost1 ost2" + local nid=$($rcmd $LCTL list_nids | head -1) + local facet + + trap t_108_cleanup EXIT ERR + t_108_prep + + t_108_mkfs mdt 0 zfs mgs lustre-mdt1/mdt1 + t_108_mkfs mdt 1 zfs mgsnode=$nid lustre-mdt2/mdt2 + t_108_mkfs ost 0 zfs mgsnode=$nid lustre-ost1/ost1 + t_108_mkfs ost 1 zfs mgsnode=$nid lustre-ost2/ost2 + + for facet in $facets; do + $rcmd zfs set mountpoint=$tmp/mnt/$facet canmount=on \ + lustre-$facet/$facet || + error "failed to zfs set for $facet (1)" + $rcmd zfs mount lustre-$facet/$facet || + error "failed to local mount $facet" + $rcmd tar jxf $LUSTRE/tests/ldiskfs_${facet}_2_11.tar.bz2 \ + --xattrs --xattrs-include="trusted.*" \ + -C $tmp/mnt/$facet/ > /dev/null 2>&1 || + error "failed to untar image for $facet" + $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" || + error "failed to cleanup for $facet" + $rcmd zfs umount lustre-$facet/$facet || + error "failed to local umount $facet" + $rcmd zfs set canmount=off lustre-$facet/$facet || + error "failed to zfs set $facet (2)" + done + + echo "changing server nid..." + $rcmd mount -t lustre -o nosvc lustre-mdt1/mdt1 $tmp/mnt/mdt1 + $rcmd lctl replace_nids lustre-MDT0000 $nid + $rcmd lctl replace_nids lustre-MDT0001 $nid + $rcmd lctl replace_nids lustre-OST0000 $nid + $rcmd lctl replace_nids lustre-OST0001 $nid + $rcmd umount $tmp/mnt/mdt1 + + for facet in $facets; do + echo "mounting $facet from backup..." + $rcmd mount -t lustre -o abort_recov lustre-$facet/$facet \ + $tmp/mnt/$facet || error "failed to mount $facet" + done + + # ZFS backend can detect migration and trigger OI scrub automatically + # sleep 3 seconds for scrub done + sleep 3 + + t_108_check + t_108_cleanup +} +run_test 108a "migrate from ldiskfs to ZFS" + +test_108b() { + [ "$CLIENTONLY" ] && skip "Client-only testing" && return + + [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && + skip "ldiskfs only test" && return + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] && + skip "Need server version at least 2.10.58" && return + + stopall + load_modules + + local tmp=$TMP/$tdir + local rcmd="do_facet $SINGLEMDS" + local facets="mdt1 mdt2 ost1 ost2" + local scrub_list="MDT0000 MDT0001 OST0000 OST0001" + local nid=$($rcmd $LCTL list_nids | head -1) + local facet + + trap t_108_cleanup EXIT ERR + t_108_prep + + t_108_mkfs mdt 0 ldiskfs mgs + t_108_mkfs mdt 1 ldiskfs mgsnode=$nid + t_108_mkfs ost 0 ldiskfs mgsnode=$nid + t_108_mkfs ost 1 ldiskfs mgsnode=$nid + + for facet in $facets; do + $rcmd mount -t ldiskfs -o loop $tmp/images/$facet \ + $tmp/mnt/$facet || + error "failed to local mount $facet" + $rcmd tar jxf $LUSTRE/tests/zfs_${facet}_2_11.tar.bz2 \ + --xattrs --xattrs-include="*.*" \ + -C $tmp/mnt/$facet/ > /dev/null 2>&1 || + error "failed to untar image for $facet" + $rcmd "cd $tmp/mnt/$facet && rm -rf oi.* OI_* lfsck_* LFSCK" || + error "failed to cleanup for $facet" + $rcmd umount $tmp/mnt/$facet || + error "failed to local umount $facet" + done + + echo "changing server nid..." + $rcmd mount -t lustre -o nosvc,loop $tmp/images/mdt1 $tmp/mnt/mdt1 + $rcmd lctl replace_nids lustre-MDT0000 $nid + $rcmd lctl replace_nids lustre-MDT0001 $nid + $rcmd lctl replace_nids lustre-OST0000 $nid + $rcmd lctl replace_nids lustre-OST0001 $nid + $rcmd umount $tmp/mnt/mdt1 + + for facet in $facets; do + echo "mounting $facet from backup..." + $rcmd mount -t lustre -o loop,abort_recov $tmp/images/$facet \ + $tmp/mnt/$facet || error "failed to mount $facet" + done + + for facet in $scrub_list; do + $rcmd $LCTL lfsck_start -M lustre-$facet -t scrub || + error "failed to start OI scrub on $facet" + done + + # sleep 3 seconds for scrub done + sleep 3 + + t_108_check + t_108_cleanup +} +run_test 108b "migrate from ZFS to ldiskfs" + + +# +# set number of permanent parameters +# +test_109_set_params() { + local fsname=$1 + + set_conf_param_and_check mds \ + "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \ + "$fsname-MDT0000.mdd.atime_diff" \ + "62" + set_conf_param_and_check mds \ + "$LCTL get_param -n mdd.$fsname-MDT0000.atime_diff" \ + "$fsname-MDT0000.mdd.atime_diff" \ + "63" + set_conf_param_and_check client \ + "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \ + "$fsname.llite.max_read_ahead_mb" \ + "32" + set_conf_param_and_check client \ + "$LCTL get_param -n llite.$fsname*.max_read_ahead_mb" \ + "$fsname.llite.max_read_ahead_mb" \ + "64" + create_pool $fsname.pool1 || error "create pool failed" + do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 || + error "pool_add failed" + do_facet mgs $LCTL pool_remove $fsname.pool1 OST0000 || + error "pool_remove failed" + do_facet mgs $LCTL pool_add $fsname.pool1 OST0000 || + error "pool_add failed" +} + +# +# check permanent parameters +# +test_109_test_params() { + local fsname=$1 + + local atime_diff=$(do_facet mds $LCTL \ + get_param -n mdd.$fsname-MDT0000.atime_diff) + [ $atime_diff == 63 ] || error "wrong mdd parameter after clear_conf" + local max_read_ahead_mb=$(do_facet client $LCTL \ + get_param -n llite.$fsname*.max_read_ahead_mb) + [ $max_read_ahead_mb == 64 ] || + error "wrong llite parameter after clear_conf" + local ost_in_pool=$(do_facet mds $LCTL pool_list $fsname.pool1 | + grep -v "^Pool:" | sed 's/_UUID//') + [ $ost_in_pool = "$fsname-OST0000" ] || + error "wrong pool after clear_conf" +} + +# +# run lctl clear_conf, store CONFIGS before and after that +# +test_109_clear_conf() +{ + local clear_conf_arg=$1 + + local mgsdev + if ! combined_mgs_mds ; then + mgsdev=$MGSDEV + stop_mgs || error "stop_mgs failed" + start_mgs "-o nosvc" || error "start_mgs nosvc failed" + else + mgsdev=$(mdsdevname 1) + start_mdt 1 "-o nosvc" || error "start_mdt 1 nosvc failed" + fi + + do_facet mgs "rm -rf $TMP/${tdir}/conf1; mkdir -p $TMP/${tdir}/conf1;" \ + "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf1\\\" \ + $mgsdev" + + # + # the command being tested + # + do_facet mgs $LCTL clear_conf $clear_conf_arg || + error "clear_conf failed" + if ! combined_mgs_mds ; then + stop_mgs || error "stop_mgs failed" + else + stop_mdt 1 || error "stop_mdt 1 failed" + fi + + do_facet mgs "rm -rf $TMP/${tdir}/conf2; mkdir -p $TMP/${tdir}/conf2;" \ + "$DEBUGFS -c -R \\\"rdump CONFIGS $TMP/${tdir}/conf2\\\" \ + $mgsdev" +} + +test_109_file_shortened() { + local file=$1 + local sizes=($(do_facet mgs "stat -c %s " \ + "$TMP/${tdir}/conf1/CONFIGS/$file" \ + "$TMP/${tdir}/conf2/CONFIGS/$file")) + [ ${sizes[1]} -lt ${sizes[0]} ] && return 0 + return 1 +} + +test_109a() +{ + [ "$(facet_fstype mgs)" == "zfs" ] && + skip "LU-8727: no implementation for ZFS" && return + stopall + reformat + setup_noconfig + client_up || error "client_up failed" + + # + # set number of permanent parameters + # + test_109_set_params $FSNAME + + umount_client $MOUNT || error "umount_client failed" + stop_ost || error "stop_ost failed" + stop_mds || error "stop_mds failed" + + test_109_clear_conf $FSNAME + # + # make sure that all configs are cleared + # + test_109_file_shortened $FSNAME-MDT0000 || + error "failed to clear MDT0000 config" + test_109_file_shortened $FSNAME-client || + error "failed to clear client config" + + setup_noconfig + + # + # check that configurations are intact + # + test_109_test_params $FSNAME + + # + # Destroy pool. + # + destroy_test_pools || error "destroy test pools failed" + + cleanup +} +run_test 109a "test lctl clear_conf fsname" + +test_109b() +{ + [ "$(facet_fstype mgs)" == "zfs" ] && + skip "LU-8727: no implementation for ZFS" && return + stopall + reformat + setup_noconfig + client_up || error "client_up failed" + + # + # set number of permanent parameters + # + test_109_set_params $FSNAME + + umount_client $MOUNT || error "umount_client failed" + stop_ost || error "stop_ost failed" + stop_mds || error "stop_mds failed" + + test_109_clear_conf $FSNAME-MDT0000 + # + # make sure that only one config is cleared + # + test_109_file_shortened $FSNAME-MDT0000 || + error "failed to clear MDT0000 config" + test_109_file_shortened $FSNAME-client && + error "failed to clear client config" + + setup_noconfig + + # + # check that configurations are intact + # + test_109_test_params $FSNAME + + # + # Destroy pool. + # + destroy_test_pools || error "destroy test pools failed" + + cleanup +} +run_test 109b "test lctl clear_conf one config" + +cleanup_115() +{ + trap 0 + stopall + rm -f $TMP/$tdir/lustre-mdt + formatall +} + +test_115() { + IMAGESIZE=$((3072 << 30)) # 3072 GiB + + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + + stopall + # We need MDT size 3072GB, because it is smallest + # partition that can store 2B inodes + do_facet $SINGLEMDS "mkdir -p $TMP/$tdir" + local mdsimgname=$TMP/$tdir/lustre-mdt + do_facet $SINGLEMDS "rm -f $mdsimgname" + do_facet $SINGLEMDS "touch $mdsimgname" + trap cleanup_115 RETURN EXIT + do_facet $SINGLEMDS "$TRUNCATE $mdsimgname $IMAGESIZE" || + { skip "Backend FS doesn't support sparse files"; return 0; } + local mdsdev=$(do_facet $SINGLEMDS "losetup -f") + do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname" + + local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \ + --mkfsoptions='-O lazy_itable_init,large_xattr,^resize_inode,meta_bg \ + -i 1024'" + add mds1 $mds_opts --mgs --reformat $mdsdev || + { skip_env "format large MDT failed"; return 0; } + add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ + --reformat $(ostdevname 1) $(ostvdevname 1) + + start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed" + start_ost || error "start OSS failed" + mount_client $MOUNT || error "mount client failed" + + mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail" + for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do + do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal" + done + + touch $DIR/$tdir/$tfile + + # Add > 5k bytes to xattr + for i in {1..30}; do + ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) || + error "Can't make link" + done + + sync; sleep 5; sync + + local inode_num=$(do_facet $SINGLEMDS \ + "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" | + awk '/link =/ { print $4 }' | + sed -e 's/>//' -e 's/