X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=1f22874ffa46e32163188091e1680035e11eeee2;hp=c2b15d3cd9d7049702a74b3171bd5a2b0eb625dd;hb=658d349a5acac33d0e5b1d27c979873ae4c585b4;hpb=0f1cd71b2c98e80cb67c4d27ee11233b1a909329 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index c2b15d3..1f22874 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -9,8 +9,8 @@ set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT" +# bug number for skipped test: LU-2828 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! is_sles11() # LU-2181 @@ -49,6 +49,7 @@ PTLDEBUG=${PTLDEBUG:--1} SAVE_PWD=$PWD LUSTRE=${LUSTRE:-`dirname $0`/..} RLUSTRE=${RLUSTRE:-$LUSTRE} +LUSTRE_TESTS_API_DIR=${LUSTRE_TESTS_API_DIR:-${LUSTRE}/tests/clientapi} export MULTIOP=${MULTIOP:-multiop} . $LUSTRE/tests/test-framework.sh @@ -75,6 +76,10 @@ if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init) fi +[ $(facet_fstype $SINGLEMDS) = "zfs" ] && +# bug number for skipped test: LU-2778 LU-2059 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h" + init_logging # @@ -110,7 +115,7 @@ writeconf_or_reformat() { # Better reformat if it fails... writeconf_all $MDSCOUNT 2 || { echo "tunefs failed, reformatting instead" && - reformat_and_config && return 1; } + reformat_and_config && return 0; } return 0 } @@ -599,24 +604,25 @@ test_17() { return fi - setup - check_mount || return 41 - cleanup || return $? + setup + check_mount || return 41 + cleanup || return $? - echo "Remove mds config log" - if ! combined_mgs_mds ; then - stop mgs - fi + echo "Remove mds config log" + if ! combined_mgs_mds ; then + stop mgs + fi - do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MGSDEV || return \$?" || return $? + do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \ + $(mgsdevname) || return \$?" || return $? - if ! combined_mgs_mds ; then - start_mgs - fi + if ! combined_mgs_mds ; then + start_mgs + fi - start_ost - start_mds && return 42 - reformat_and_config + start_ost + start_mds && return 42 + reformat_and_config } run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)" @@ -783,16 +789,12 @@ test_21d() { run_test 21d "start mgs then ost and then mds" test_22() { - local num - start_mds echo Client mount with ost in logs, but none running start_ost # wait until mds connected to ost and open client connection - for num in $(seq 1 $MDSCOUNT); do - wait_osc_import_state mds${num} ost FULL - done + wait_osc_import_state mds ost FULL stop_ost mount_client $MOUNT # check_mount will block trying to contact ost @@ -812,9 +814,7 @@ test_22() { sleep $((TIMEOUT + TIMEOUT + TIMEOUT)) fi mount_client $MOUNT - for num in $(seq 1 $MDSCOUNT); do - wait_osc_import_state mds${num} ost FULL - done + wait_osc_import_state mds ost FULL wait_osc_import_state client ost FULL check_mount || return 41 pass @@ -871,8 +871,8 @@ test_23a() { # was test_23 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs" ps -ef | grep mount fi - stop_mds || error - stop_ost || error + stop_mds || error "stopping MDSes failed" + stop_ost || error "stopping OSSes failed" } run_test 23a "interrupt client during recovery mount delay" @@ -1206,6 +1206,11 @@ test_31() { # bug 10734 } run_test 31 "Connect to non-existent node (shouldn't crash)" + +T32_QID=60000 +T32_BLIMIT=20480 # Kbytes +T32_ILIMIT=2 + # # This is not really a test but a tool to create new disk # image tarballs for the upgrade tests. @@ -1236,10 +1241,19 @@ test_32newtarball() { mkdir $tmp/src tar cf - -C $src . | tar xf - -C $tmp/src + dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \ + count=$(($T32_BLIMIT / 1024 / 2)) + chown $T32_QID.$T32_QID $tmp/src/t32_qf_old formatall setupall + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] && + $LFS quotacheck -ug /mnt/$FSNAME + $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \ + /mnt/$FSNAME + tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME stopall @@ -1253,6 +1267,19 @@ test_32newtarball() { popd $LCTL get_param -n version | head -n 1 | sed -e 's/^lustre: *//' >$tmp/img/commit + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] && + $LFS quotaon -ug /mnt/$FSNAME + $LFS quota -u $T32_QID -v /mnt/$FSNAME + $LFS quota -v -u $T32_QID /mnt/$FSNAME | + awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*" > $tmp/img/bspace + $LFS quota -v -u $T32_QID /mnt/$FSNAME | + awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*" > $tmp/img/ispace + stopall pushd $tmp/src @@ -1298,11 +1325,6 @@ t32_check() { exit 0 fi - if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then - skip "LU-2200: Test cannot run over Infiniband" - exit 0 - fi - local IMGTYPE=$(facet_fstype $SINGLEMDS) tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\') @@ -1314,8 +1336,6 @@ t32_check() { } t32_test_cleanup() { - local node=$(facet_active_host $SINGLEMDS) - local r="do_node $node" local tmp=$TMP/t32 local rc=$? @@ -1331,8 +1351,9 @@ t32_test_cleanup() { if $shall_cleanup_ost; then $r umount -d $tmp/mnt/ost || rc=$? fi - $r rm -rf $tmp || rc=$? - rm -rf $tmp || rc=$? + + $r rm -rf $tmp + rm -rf $tmp return $rc } @@ -1359,7 +1380,7 @@ t32_reload_modules() { all_removed=true do_rpc_nodes $node check_mem_leak || return 1 if $all_removed; then - load_modules + do_rpc_nodes $node load_modules return 0 fi sleep 5 @@ -1377,8 +1398,7 @@ t32_wait_til_devices_gone() { echo wait for devices to go while ((i < 20)); do devices=$(do_rpc_nodes $node $LCTL device_list | wc -l) - echo $device - ((devices == 0)) && return 1 + ((devices == 0)) && return 0 sleep 5 i=$((i + 1)) done @@ -1386,10 +1406,91 @@ t32_wait_til_devices_gone() { return 1 } +t32_verify_quota() { + local node=$1 + local fsname=$2 + local mnt=$3 + local fstype=$(facet_fstype $SINGLEMDS) + local qval + local cmd + + $LFS quota -u $T32_QID -v $mnt + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $img_bspace ] || { + echo "bspace, act:$qval, exp:$img_bspace" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $img_ispace ] || { + echo "ispace, act:$qval, exp:$img_ispace" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $T32_BLIMIT ] || { + echo "blimit, act:$qval, exp:$T32_BLIMIT" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $T32_ILIMIT ] || { + echo "ilimit, act:$qval, exp:$T32_ILIMIT" + return 1 + } + + do_node $node $LCTL conf_param $fsname.quota.mdt=ug + cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000" + cmd=$cmd.quota_slave.enabled + wait_update $node "$cmd" "ug" || { + echo "Enable mdt quota failed" + return 1 + } + + do_node $node $LCTL conf_param $fsname.quota.ost=ug + cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000" + cmd=$cmd.quota_slave.enabled + wait_update $node "$cmd" "ug" || { + echo "Enable ost quota failed" + return 1 + } + + chmod 0777 $mnt + runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \ + bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && { + echo "Write succeed, but expect -EDQUOT" + return 1 + } + rm -f $mnt/t32_qf_new + + runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \ + $T32_ILIMIT && { + echo "Create succeed, but expect -EDQUOT" + return 1 + } + unlinkmany $mnt/t32_qf_ $T32_ILIMIT + + return 0 +} + t32_test() { local tarball=$1 local writeconf=$2 local dne_upgrade=${dne_upgrade:-"no"} + local ff_convert=${ff_convert:-"no"} local shall_cleanup_mdt=false local shall_cleanup_mdt1=false local shall_cleanup_ost=false @@ -1397,11 +1498,12 @@ t32_test() { local node=$(facet_active_host $SINGLEMDS) local r="do_node $node" local node2=$(facet_active_host mds2) - local r2="do_node $node2" local tmp=$TMP/t32 local img_commit local img_kernel local img_arch + local img_bspace + local img_ispace local fsname=t32fs local nid=$($r $LCTL list_nids | head -1) local mopts @@ -1409,6 +1511,7 @@ t32_test() { local nrpcs_orig local nrpcs local list + local fstype=$(facet_fstype $SINGLEMDS) trap 'trap - RETURN; t32_test_cleanup' RETURN @@ -1421,11 +1524,16 @@ t32_test() { img_commit=$($r cat $tmp/commit) img_kernel=$($r cat $tmp/kernel) img_arch=$($r cat $tmp/arch) + img_bspace=$($r cat $tmp/bspace) + img_ispace=$($r cat $tmp/ispace) echo "Upgrading from $(basename $tarball), created with:" echo " Commit: $img_commit" echo " Kernel: $img_kernel" echo " Arch: $img_arch" + local version=$(version_code $img_commit) + [[ $version -gt $(version_code 2.4.0) ]] && ff_convert="no" + $r $LCTL set_param debug="$PTLDEBUG" $r $TUNEFS --dryrun $tmp/mdt || { @@ -1434,7 +1542,27 @@ t32_test() { } if [ "$writeconf" ]; then mopts=loop,writeconf + if [ $fstype == "ldiskfs" ]; then + $r $TUNEFS --quota $tmp/mdt || { + error_noexit "Enable mdt quota feature" + return 1 + } + fi else + if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then + [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || + { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids " + "(Need MGS version at least 2.3.59)"; return 0; } + + local osthost=$(facet_active_host ost1) + local ostnid=$(do_node $osthost $LCTL list_nids | head -1) + + $r mount -t lustre -o loop,nosvc $tmp/mdt $tmp/mnt/mdt + $r lctl replace_nids $fsname-OST0000 $ostnid + $r lctl replace_nids $fsname-MDT0000 $nid + $r umount $tmp/mnt/mdt + fi + mopts=loop,exclude=$fsname-OST0000 fi @@ -1447,21 +1575,28 @@ t32_test() { shall_cleanup_mdt=true if [ "$dne_upgrade" != "no" ]; then - echo "mkfs new MDT...." - add mds2 $(mkfs_opts mds2 $(mdsdevname 2) $fsname) --reformat \ - $(mdsdevname 2) $(mdsvdevname 2) > /dev/null || { + local fs2mdsdev=$(mdsdevname 1_2) + local fs2mdsvdev=$(mdsvdevname 1_2) + + echo "mkfs new MDT on ${fs2mdsdev}...." + if [ $(facet_fstype mds1) == ldiskfs ]; then + mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" + fi + + add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ + $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || { error_noexit "Mkfs new MDT failed" return 1 } - $r2 $TUNEFS --dryrun $(mdsdevname 2) || { + $r $TUNEFS --dryrun $fs2mdsdev || { error_noexit "tunefs.lustre before mounting the MDT" return 1 } - echo "mount new MDT...." - $r2 mkdir -p $tmp/mnt/mdt1 - $r2 mount -t lustre -o $mopts $(mdsdevname 2) $tmp/mnt/mdt1 || { + echo "mount new MDT....$fs2mdsdev" + $r mkdir -p $tmp/mnt/mdt1 + $r mount -t lustre -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || { error_noexit "mount mdt1 failed" return 1 } @@ -1483,6 +1618,12 @@ t32_test() { } if [ "$writeconf" ]; then mopts=loop,mgsnode=$nid,$writeconf + if [ $fstype == "ldiskfs" ]; then + $r $TUNEFS --quota $tmp/ost || { + error_noexit "Enable ost quota feature" + return 1 + } + fi else mopts=loop,mgsnode=$nid fi @@ -1526,17 +1667,41 @@ t32_test() { return 1 } + if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then + $r $LCTL lfsck_start -M $fsname-OST0000 || { + error_noexit "Start OI scrub on OST0" + return 1 + } + + # The oi_scrub should be on ost1, but for test_32(), + # all on the SINGLEMDS. + wait_update_facet $SINGLEMDS "$LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^status/ { print \\\$2 }'" "completed" 30 || { + error_noexit "Failed to get the expected 'completed'" + return 1 + } + + local UPDATED=$($r $LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^updated/ { print $2 }') + [ $UPDATED -ge 1 ] || { + error_noexit "Only $UPDATED objects have been converted" + return 1 + } + fi + if [ "$dne_upgrade" != "no" ]; then - $r2 $LCTL conf_param \ + $r $LCTL conf_param \ $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || { error_noexit "Setting MDT1 \"max_rpcs_in_flight\"" return 1 } - $r2 $LCTL conf_param $fsname-MDT0001.failover.node=$nid || { + $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || { error_noexit "Setting MDT1 \"failover.node\"" return 1 } - $r2 $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || { + $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || { error_noexit "Setting MDT1 \"lov.stripesize\"" return 1 } @@ -1550,6 +1715,12 @@ t32_test() { } shall_cleanup_lustre=true $LCTL set_param debug="$PTLDEBUG" + + t32_verify_quota $node $fsname $tmp/mnt/lustre || { + error_noexit "verify quota failed" + return 1 + } + if [ "$dne_upgrade" != "no" ]; then $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || { error_noexit "set remote dir failed" @@ -1565,6 +1736,15 @@ t32_test() { popd fi + dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || { + error_noexit "dd failed" + return 1 + } + rm -rf $tmp/mnt/lustre/tmp_file || { + error_noexit "rm failed" + return 1 + } + if $r test -f $tmp/sha1sums; then # LU-2393 - do both sorts on same node to ensure locale # is identical @@ -1617,8 +1797,8 @@ t32_test() { # on an architecture with different number of bits per # "long". # - if [ $(t32_bits_per_long $(uname -m)) != \ - $(t32_bits_per_long $img_arch) ]; then + if [ $(t32_bits_per_long $(uname -m)) != \ + $(t32_bits_per_long $img_arch) ]; then echo "Different number of bits per \"long\" from the disk image" for list in list.orig list; do sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list @@ -1661,11 +1841,20 @@ t32_test() { } shall_cleanup_lustre=false else + if [ "$dne_upgrade" != "no" ]; then + $r umount -d $tmp/mnt/mdt1 || { + error_noexit "Unmounting the MDT2" + return 1 + } + shall_cleanup_mdt1=false + fi + $r umount -d $tmp/mnt/mdt || { error_noexit "Unmounting the MDT" return 1 } shall_cleanup_mdt=false + $r umount -d $tmp/mnt/ost || { error_noexit "Unmounting the OST" return 1 @@ -1682,7 +1871,7 @@ t32_test() { error_noexit "tunefs.lustre before remounting the MDT" return 1 } - $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \ + $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \ $tmp/mnt/mdt || { error_noexit "Remounting the MDT" return 1 @@ -1698,7 +1887,7 @@ test_32a() { t32_check for tarball in $tarballs; do - t32_test $tarball || rc=$? + t32_test $tarball || let "rc += $?" done return $rc } @@ -1711,7 +1900,7 @@ test_32b() { t32_check for tarball in $tarballs; do - t32_test $tarball writeconf || rc=$? + t32_test $tarball writeconf || let "rc += $?" done return $rc } @@ -1731,6 +1920,19 @@ test_32c() { } run_test 32c "dne upgrade test" +test_32d() { + local tarballs + local tarball + local rc=0 + + t32_check + for tarball in $tarballs; do + ff_convert=yes t32_test $tarball || rc=$? + done + return $rc +} +run_test 32d "convert ff test" + test_33a() { # bug 12333, was test_33 local rc=0 local FSNAME2=test-123 @@ -1847,9 +2049,10 @@ test_35a() { # bug 12459 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs "$LCTL conf_param ${device}.failover.node=" \ - "$(h2$NETTYPE $FAKENID)" || return 4 + local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 4 log "Wait for RECONNECT_INTERVAL seconds (10s)" sleep 10 @@ -1901,10 +2104,10 @@ test_35b() { # bug 18674 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | \ - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs "$LCTL conf_param ${device}.failover.node=" \ - "$(h2$NETTYPE $FAKENID)" || return 1 + local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 1 local at_max_saved=0 # adaptive timeouts may prevent seeing the issue @@ -2231,15 +2434,24 @@ test_41b() { run_test 41b "mount mds with --nosvc and --nomgs on first mount" test_42() { #bug 14693 - setup - check_mount || return 2 - do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10 - umount_client $MOUNT - mount_client $MOUNT || return 1 - cleanup - return 0 + setup + check_mount || error "client was not mounted" + + do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10 + umount_client $MOUNT || + error "unmounting client failed with invalid llite param" + mount_client $MOUNT || + error "mounting client failed with invalid llite param" + + do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20 + cleanup || error "stopping $FSNAME failed with invalid sys param" + load_modules + setup + check_mount || "client was not mounted with invalid sys param" + cleanup || error "stopping $FSNAME failed with invalid sys param" + return 0 } -run_test 42 "invalid config param should not prevent client from mounting" +run_test 42 "allow client/server mount/unmount with invalid config param" test_43() { [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root" @@ -2774,6 +2986,42 @@ test_50g() { } run_test 50g "deactivated OST should not cause panic=====================" +# LU-642 +test_50h() { + # prepare MDT/OST, make OSC inactive for OST1 + [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" || + error "tunefs OST1 failed" + start_mds || error "Unable to start MDT" + start_ost || error "Unable to start OST1" + start_ost2 || error "Unable to start OST2" + mount_client $MOUNT || error "client start failed" + + mkdir -p $DIR/$tdir + + # activatate OSC for OST1 + local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active" + set_conf_param_and_check client \ + "$TEST" "${FSNAME}-OST0000.osc.active" 1 || + error "Unable to activate OST1" + + mkdir -p $DIR/$tdir/2 + $LFS setstripe -c -1 -i 0 $DIR/$tdir/2 + sleep 1 && echo "create a file after OST1 is activated" + # create some file + createmany -o $DIR/$tdir/2/$tfile-%d 1 + + # check OSC import is working + stat $DIR/$tdir/2/* >/dev/null 2>&1 || + error "some OSC imports are still not connected" + + # cleanup + umount_client $MOUNT || error "Unable to umount client" + stop_ost2 || error "Unable to stop OST2" + cleanup_nocli +} +run_test 50h "LU-642: activate deactivated OST ===" + test_51() { local LOCAL_TIMEOUT=20 @@ -2966,7 +3214,6 @@ thread_sanity() { local nthrs shift 4 - setup check_mount || return 41 # We need to expand $parampat, but it may match multiple parameters, so @@ -3035,23 +3282,33 @@ thread_sanity() { tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min") tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max") tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started") - lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $? + lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? cleanup load_modules setup - cleanup } test_53a() { + setup thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16' + cleanup } run_test 53a "check OSS thread count params" test_53b() { - thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' '16' + setup + local mds=$(do_facet $SINGLEMDS "lctl get_param -N mds.*.*.threads_max \ + 2>/dev/null") + if [ -z "$mds" ]; then + #running this on an old MDT + thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16 + else + thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16 + fi + cleanup } -run_test 53b "check MDT thread count params" +run_test 53b "check MDS thread count params" test_54a() { if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then @@ -3147,6 +3404,7 @@ run_test 56 "check big indexes" test_57a() { # bug 22656 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost && error "OST registration from failnode should fail" @@ -3157,6 +3415,7 @@ run_test 57a "initial registration from failnode should fail (should return errs test_57b() { local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost || error "OST registration from servicenode should not fail" @@ -3178,14 +3437,21 @@ test_58() { # bug 22658 createmany -o $DIR/$tdir/$tfile-%d 100 # make sure that OSTs do not cancel llog cookies before we unmount the MDS #define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601 - do_facet mds "lctl set_param fail_loc=0x601" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x601" unlinkmany $DIR/$tdir/$tfile-%d 100 - stop mds - local MNTDIR=$(facet_mntpt mds) + stop_mds + + local MNTDIR=$(facet_mntpt $SINGLEMDS) + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local opts="" + if ! do_facet $SINGLEMDS "test -b $devname"; then + opts="-o loop" + fi + # remove all files from the OBJECTS dir - do_facet mds "mount -t ldiskfs $MDSDEV $MNTDIR" - do_facet mds "find $MNTDIR/OBJECTS -type f -delete" - do_facet mds "umount $MNTDIR" + do_facet $SINGLEMDS "mount -t ldiskfs $opts $devname $MNTDIR" + do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete" + do_facet $SINGLEMDS "umount $MNTDIR" # restart MDS with missing llog files start_mds do_facet mds "lctl set_param fail_loc=0" @@ -3377,9 +3643,225 @@ test_64() { echo "$LFS df" $LFS df --lazy || error "lfs df failed" cleanup || return $? + #writeconf to remove all ost2 traces for subsequent tests + writeconf_or_reformat } run_test 64 "check lfs df --lazy " +test_65() { # LU-2237 + # Currently, the test is only valid for ldiskfs backend + [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && + skip "non-ldiskfs backend" && return + + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local brpt=$(facet_mntpt brpt) + local opts="" + + if ! do_facet $SINGLEMDS "test -b $devname"; then + opts="-o loop" + fi + + stop_mds + local obj=$(do_facet $SINGLEMDS \ + "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | + grep Inode) + if [ -z "$obj" ]; then + # The MDT may be just re-formatted, mount the MDT for the + # first time to guarantee the "last_rcvd" file is there. + start_mds || error "fail to mount the MDS for the first time" + stop_mds + fi + + # remove the "last_rcvd" file + do_facet $SINGLEMDS "mkdir -p $brpt" + do_facet $SINGLEMDS \ + "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt" + do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd" + do_facet $SINGLEMDS "umount $brpt" + + # restart MDS, the "last_rcvd" file should be recreated. + start_mds || error "fail to restart the MDS" + stop_mds + obj=$(do_facet $SINGLEMDS \ + "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | grep Inode) + [ -n "$obj" ] || error "fail to re-create the last_rcvd" +} +run_test 65 "re-create the lost last_rcvd file when server mount" + +test_66() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || + { skip "Need MGS version at least 2.3.59"; return 0; } + + setup + local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1) + local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1) + + echo "replace_nids should fail if MDS, OSTs and clients are UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + umount_client $MOUNT || error "unmounting client failed" + echo "replace_nids should fail if MDS and OSTs are UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + stop_ost + echo "replace_nids should fail if MDS is UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + stop_mds || error "stopping mds failed" + + if combined_mgs_mds; then + start_mdt 1 "-o nosvc" || + error "starting mds with nosvc option failed" + fi + + echo "command should accept two parameters" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 && + error "command should accept two params" + + echo "correct device name should be passed" + do_facet mgs $LCTL replace_nids $FSNAME-WRONG0000 $OST1_NID && + error "wrong devname" + + echo "wrong nids list should not destroy the system" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" && + error "wrong parse" + + echo "replace OST nid" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID || + error "replace nids failed" + + echo "command should accept two parameters" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 && + error "command should accept two params" + + echo "wrong nids list should not destroy the system" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" && + error "wrong parse" + + echo "replace MDS nid" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID || + error "replace nids failed" + + if ! combined_mgs_mds ; then + stop_mgs + else + stop_mds + fi + + setup_noconfig + check_mount || error "error after nid replace" + cleanup || error "cleanup failed" + reformat +} +run_test 66 "replace nids" + +test_67() { #LU-2950 + local legacy="$TMP/legacy_lnet_config" + local new="$TMP/new_routes_test" + local out="$TMP/config_out_file" + local verify="$TMP/conv_verify" + local verify_conf="$TMP/conf_verify" + + # Create the legacy file that will be run through the + # lustre_routes_conversion script + cat <<- LEGACY_LNET_CONFIG > $legacy + tcp1 23 192.168.213.1@tcp:1; tcp5 34 193.30.4.3@tcp:4; + tcp2 54 10.1.3.2@tcp; + tcp3 10.3.4.3@tcp:3; + tcp4 10.3.3.4@tcp; + LEGACY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_conversion script against. + cat <<- VERIFY_LNET_CONFIG > $verify + tcp1: { gateway: 192.168.213.1@tcp, hop: 23, priority: 1 } + tcp5: { gateway: 193.30.4.3@tcp, hop: 34, priority: 4 } + tcp2: { gateway: 10.1.3.2@tcp, hop: 54 } + tcp3: { gateway: 10.3.4.3@tcp, priority: 3 } + tcp4: { gateway: 10.3.3.4@tcp } + VERIFY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_config script against + cat <<- VERIFY_LNET_CONFIG > $verify_conf + lctl --net tcp1 add_route 192.168.213.1@tcp 23 1 + lctl --net tcp5 add_route 193.30.4.3@tcp 34 4 + lctl --net tcp2 add_route 10.1.3.2@tcp 54 4 + lctl --net tcp3 add_route 10.3.4.3@tcp 1 3 + lctl --net tcp4 add_route 10.3.3.4@tcp 1 3 + VERIFY_LNET_CONFIG + + lustre_routes_conversion $legacy $new > /dev/null + if [ -f $new ]; then + # verify the conversion output + cmp -s $new $verify > /dev/null + if [ $? -eq 1 ]; then + error "routes conversion failed" + fi + + lustre_routes_config --dry-run --verbose $new > $out + # check that the script succeeded + cmp -s $out $verify_conf > /dev/null + if [ $? -eq 1 ]; then + error "routes config failed" + fi + else + error "routes conversion test failed" + fi + # remove generated files + rm -f $new $legacy $verify $verify_conf $out +} +run_test 67 "test routes conversion and configuration" + +test_68() { + local fid + local seq + local START + local END + + [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] || + { skip "Need MDS version at least 2.4.53"; return 0; } + + umount_client $MOUNT || error "umount client failed" + + start_mdt 1 || error "MDT start failed" + start_ost + + # START-END - the sequences we'll be reserving + START=$(do_facet $SINGLEMDS \ + lctl get_param -n seq.ctl*.space | awk -F'[[ ]' '{print $2}') + END=$((START + (1 << 30))) + do_facet $SINGLEMDS \ + lctl set_param seq.ctl*.fldb="[$START-$END\):0:mdt" + + # reset the sequences MDT0000 has already assigned + do_facet $SINGLEMDS \ + lctl set_param seq.srv*MDT0000.space=clear + + # remount to let the client allocate new sequence + mount_client $MOUNT || error "mount client failed" + + touch $DIR/$tfile + do_facet $SINGLEMDS \ + lctl get_param seq.srv*MDT0000.space + $LFS path2fid $DIR/$tfile + + local old_ifs="$IFS" + IFS='[:]' + fid=($($LFS path2fid $DIR/$tfile)) + IFS="$old_ifs" + let seq=${fid[1]} + + if [[ $seq < $END ]]; then + error "used reserved sequence $seq?" + fi + cleanup || return $? +} +run_test 68 "be able to reserve specific sequences in FLDB" + test_70a() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return local MDTIDX=1 @@ -3624,6 +4106,203 @@ test_71e() { } run_test 71e "start OST0, MDT1, OST1, MDT0" +test_72() { #LU-2634 + local mdsdev=$(mdsdevname 1) + local ostdev=$(ostdevname 1) + local cmd="$E2FSCK -fnvd $mdsdev" + local fn=3 + + [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && + skip "ldiskfs only test" && return + + #tune MDT with "-O extents" + + for num in $(seq $MDSCOUNT); do + add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) || + error "add mds $num failed" + $TUNE2FS -O extents $(mdsdevname $num) + done + + add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || + error "add $ostdev failed" + start_mgsmds || error "start mds failed" + start_ost || error "start ost failed" + mount_client $MOUNT || error "mount client failed" + + #create some short symlinks + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/$tfile-%d $fn + echo "create $fn short symlinks" + for i in $(seq -w 1 $fn); do + ln -s $DIR/$tdir/$tfile-$i $MOUNT/$tfile-$i + done + ls -al $MOUNT + + #umount + umount_client $MOUNT || error "umount client failed" + stop_mds || error "stop mds failed" + stop_ost || error "stop ost failed" + + #run e2fsck + run_e2fsck $(facet_active_host $SINGLEMDS) $mdsdev "-n" +} +run_test 72 "test fast symlink with extents flag enabled" + +test_73() { #LU-3006 + load_modules + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" || + error "1st tunefs failed" + start_mgsmds || error "start mds failed" + start_ost || error "start ost failed" + mount_client $MOUNT || error "mount client failed" + lctl get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids | + grep 1.2.3.4@$NETTYPE || error "failover nids haven't changed" + umount_client $MOUNT || error "umount client failed" + stopall + reformat +} +run_test 73 "failnode to update from mountdata properly" + +test_74() { # LU-1606 + for TESTPROG in $LUSTRE_TESTS_API_DIR/*.c; do + gcc -Wall -Werror $LUSTRE_TESTS_API_DIR/simple_test.c \ + -I$LUSTRE/include \ + -L$LUSTRE/utils -llustreapi || + error "client api broken" + done + cleanup || return $? +} +run_test 74 "Lustre client api program can compile and link" + +test_75() { # LU-2374 + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] && + skip "Need MDS version at least 2.4.1" && return + + local index=0 + local opts_mds="$(mkfs_opts mds1 $(mdsdevname 1)) \ + --reformat $(mdsdevname 1) $(mdsvdevname 1)" + local opts_ost="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + + #check with default parameters + add mds1 $opts_mds || error "add mds1 failed for default params" + add ost1 $opts_ost || error "add ost1 failed for default params" + + opts_mds=$(echo $opts_mds | sed -e "s/--mdt//") + opts_mds=$(echo $opts_mds | + sed -e "s/--index=$index/--index=$index --mdt/") + opts_ost=$(echo $opts_ost | sed -e "s/--ost//") + opts_ost=$(echo $opts_ost | + sed -e "s/--index=$index/--index=$index --ost/") + + add mds1 $opts_mds || error "add mds1 failed for new params" + add ost1 $opts_ost || error "add ost1 failed for new params" + return 0 +} +run_test 75 "The order of --index should be irrelevant" + +test_76() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || + { skip "Need MDS version at least 2.4.52" && return 0; } + setup + local MDMB_PARAM="osc.*.max_dirty_mb" + echo "Change MGS params" + local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | + head -1) + echo "max_dirty_mb: $MAX_DIRTY_MB" + local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB)) + echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB" + do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + echo "$MAX_DIRTY_MB" + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "error while apply max_dirty_mb" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "max_dirty_mb is not saved after remount" + + echo "Change OST params" + CLIENT_PARAM="obdfilter.*.client_cache_count" + local CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "client_cache_count: $CLIENT_CACHE_COUNT" + NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT)) + echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT" + do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "error while apply client_cache_count" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "client_cache_count is not saved after remount" + stopall +} +run_test 76 "set permanent params set_param -P" + +test_77() { # LU-3445 + local server_version=$(lustre_version_code $SINGLEMDS) + + [[ $server_version -ge $(version_code 2.2.60) ]] && + [[ $server_version -le $(version_code 2.4.0) ]] && + skip "Need MDS version < 2.2.60 or > 2.4.0" && return + + if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then + is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && + skip_env "mixed loopback and real device not working" && return + fi + + local fs2mdsdev=$(mdsdevname 1_2) + local fs2ostdev=$(ostdevname 1_2) + local fs2mdsvdev=$(mdsvdevname 1_2) + local fs2ostvdev=$(ostvdevname 1_2) + local fsname=test1234 + local mgsnid + local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + + add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \ + --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed" + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT || + error "start fs2mds failed" + + mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) + [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid" + + add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ + --failnode=$failnid --fsname=$fsname \ + --reformat $fs2ostdev $fs2ostvdev || + error "add fs2ost failed" + start fs2ost $fs2ostdev $OST_MOUNT_OPTS || error "start fs2ost failed" + + mkdir -p $MOUNT2 + mount -t lustre $mgsnid:/$fsname $MOUNT2 || error "mount $MOUNT2 failed" + DIR=$MOUNT2 MOUNT=$MOUNT2 check_mount || error "check $MOUNT2 failed" + cleanup_24a +} +run_test 77 "comma-separated MGS NIDs and failover node NIDs" + if ! combined_mgs_mds ; then stop mgs fi