X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=546780d939e3d4a7bfa30193a68d3988896f03ba;hp=12deabb7bdb3a3af23eae8841dba40a583d2e1d0;hb=11317196203c26f8c9e4d2a8261859b1c7a76c9d;hpb=10cf5e4ec079fed1fa339240e00c027232f7971e diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 12deabb..546780d 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -1,9 +1,9 @@ #!/bin/bash -# FIXME - there is no reason to use all of these different -# return codes, espcially when most of them are mapped to something -# else anyway. The combination of test number and return code -# figure out what failed. +# FIXME - there is no reason to use all of these different return codes, +# espcially when most of them are mapped to something else anyway. +# The tests should use error() to describe the failure more clearly, +# and reduce the need to look into the tests to see what failed. set -e @@ -49,35 +49,34 @@ PTLDEBUG=${PTLDEBUG:--1} SAVE_PWD=$PWD LUSTRE=${LUSTRE:-`dirname $0`/..} RLUSTRE=${RLUSTRE:-$LUSTRE} +LUSTRE_TESTS_API_DIR=${LUSTRE_TESTS_API_DIR:-${LUSTRE}/tests/clientapi} export MULTIOP=${MULTIOP:-multiop} . $LUSTRE/tests/test-framework.sh init_test_env $@ +. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} # use small MDS + OST size to speed formatting time # do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size +# STORED_MDSSIZE is used in test_18 +STORED_MDSSIZE=$MDSSIZE +STORED_OSTSIZE=$OSTSIZE MDSSIZE=200000 OSTSIZE=200000 -. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} if ! combined_mgs_mds; then # bug number for skipped test: 23954 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b" fi -# STORED_MDSSIZE is used in test_18 -if [ -n "$MDSSIZE" ]; then - STORED_MDSSIZE=$MDSSIZE -fi - # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init) fi [ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-2778 LU-2059 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h" +# bug number for skipped test: LU-2778 LU-2059 LU-4444 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 50h 69" init_logging @@ -85,7 +84,7 @@ init_logging require_dsh_mds || exit 0 require_dsh_ost || exit 0 # -[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69" assert_DIR @@ -124,7 +123,7 @@ reformat() { start_mgs () { echo "start mgs" - start mgs $MGSDEV $MGS_MOUNT_OPTS + start mgs $(mgsdevname) $MGS_MOUNT_OPTS } start_mdt() { @@ -870,8 +869,8 @@ test_23a() { # was test_23 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs" ps -ef | grep mount fi - stop_mds || error - stop_ost || error + stop_mds || error "stopping MDSes failed" + stop_ost || error "stopping OSSes failed" } run_test 23a "interrupt client during recovery mount delay" @@ -895,7 +894,7 @@ MDSDEV1_2=$fs2mds_DEV OSTDEV1_2=$fs2ost_DEV OSTDEV2_2=$fs3ost_DEV -cleanup_24a() { +cleanup_fs2() { trap 0 echo "umount $MOUNT2 ..." umount $MOUNT2 || true @@ -930,7 +929,7 @@ test_24a() { --reformat $fs2ostdev $fs2ostvdev || exit 10 setup - start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS mkdir -p $MOUNT2 mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1 @@ -955,7 +954,7 @@ test_24a() { stop_mds MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) [ -z "$MDS" ] && error "No MDT" && return 8 - cleanup_24a + cleanup_fs2 cleanup_nocli || return 6 } run_test 24a "Multiple MDTs on a single node" @@ -1063,6 +1062,78 @@ test_28() { } run_test 28 "permanent parameter setting" +test_28a() { # LU-4221 + [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] || + { skip "Need OST version at least 2.5.52" && return 0; } + [ "$(facet_fstype ost1)" = "zfs" ] && + skip "LU-4221: no such proc params for ZFS OSTs" && return + + local name + local param + local cmd + local old + local new + local device="$FSNAME-OST0000" + + setup + + # In this test we will set three kinds of proc parameters with + # lctl conf_param: + # 1. the ones moved from the OFD to the OSD, and only their + # symlinks kept in obdfilter + # 2. non-symlink ones in the OFD + # 3. non-symlink ones in the OSD + + # Check 1. + # prepare a symlink parameter in the OFD + name="writethrough_cache_enable" + param="$device.ost.$name" + cmd="$LCTL get_param -n obdfilter.$device.$name" + + # conf_param the symlink parameter in the OFD + old=$(do_facet ost1 $cmd) + new=$(((old + 1) % 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.ost.$param=$new failed" + + # conf_param the target parameter in the OSD + param="$device.osd.$name" + cmd="$LCTL get_param -n osd-*.$device.$name" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.osd.$param=$old failed" + + # Check 2. + # prepare a non-symlink parameter in the OFD + name="client_cache_seconds" + param="$device.ost.$name" + cmd="$LCTL get_param -n obdfilter.$device.$name" + + # conf_param the parameter in the OFD + old=$(do_facet ost1 $cmd) + new=$((old * 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.ost.$param=$new failed" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.ost.$param=$old failed" + + # Check 3. + # prepare a non-symlink parameter in the OSD + name="lma_self_repair" + param="$device.osd.$name" + cmd="$LCTL get_param -n osd-*.$device.$name" + + # conf_param the parameter in the OSD + old=$(do_facet ost1 $cmd) + new=$(((old + 1) % 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.osd.$param=$new failed" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.osd.$param=$old failed" + + cleanup +} +run_test 28a "set symlink parameters permanently with conf_param" + test_29() { [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return setup > /dev/null 2>&1 @@ -1244,7 +1315,9 @@ test_32newtarball() { count=$(($T32_BLIMIT / 1024 / 2)) chown $T32_QID.$T32_QID $tmp/src/t32_qf_old - formatall + # format ost with comma-separated NIDs to verify LU-4460 + local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall setupall @@ -1324,11 +1397,6 @@ t32_check() { exit 0 fi - if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then - skip "LU-2200: Test cannot run over Infiniband" - exit 0 - fi - local IMGTYPE=$(facet_fstype $SINGLEMDS) tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\') @@ -1341,6 +1409,7 @@ t32_check() { t32_test_cleanup() { local tmp=$TMP/t32 + local fstype=$(facet_fstype $SINGLEMDS) local rc=$? if $shall_cleanup_lustre; then @@ -1358,6 +1427,10 @@ t32_test_cleanup() { $r rm -rf $tmp rm -rf $tmp + if [ $fstype == "zfs" ]; then + $r $ZPOOL destroy t32fs-mdt1 || rc=$? + $r $ZPOOL destroy t32fs-ost1 || rc=$? + fi return $rc } @@ -1397,16 +1470,20 @@ t32_reload_modules() { t32_wait_til_devices_gone() { local node=$1 local devices + local loops local i=0 echo wait for devices to go while ((i < 20)); do devices=$(do_rpc_nodes $node $LCTL device_list | wc -l) - ((devices == 0)) && return 0 + loops=$(do_rpc_nodes $node losetup -a | grep -c t32) + ((devices == 0 && loops == 0)) && return 0 sleep 5 i=$((i + 1)) done - echo "waiting for devices on $node: Given up" + echo "waiting for dev on $node: dev $devices loop $loops given up" + do_rpc_nodes $node "losetup -a" + do_rpc_nodes $node "$LCTL devices_list" return 1 } @@ -1494,6 +1571,7 @@ t32_test() { local tarball=$1 local writeconf=$2 local dne_upgrade=${dne_upgrade:-"no"} + local ff_convert=${ff_convert:-"no"} local shall_cleanup_mdt=false local shall_cleanup_mdt1=false local shall_cleanup_ost=false @@ -1515,6 +1593,8 @@ t32_test() { local nrpcs local list local fstype=$(facet_fstype $SINGLEMDS) + local mdt_dev=$tmp/mdt + local ost_dev=$tmp/ost trap 'trap - RETURN; t32_test_cleanup' RETURN @@ -1534,27 +1614,68 @@ t32_test() { echo " Kernel: $img_kernel" echo " Arch: $img_arch" + local version=$(version_code $img_commit) + [[ $version -gt $(version_code 2.4.0) ]] && ff_convert="no" + + if [ $fstype == "zfs" ]; then + # import pool first + $r $ZPOOL import -f -d $tmp t32fs-mdt1 + $r $ZPOOL import -f -d $tmp t32fs-ost1 + mdt_dev=t32fs-mdt1/mdt1 + ost_dev=t32fs-ost1/ost1 + wait_update_facet $SINGLEMDS "$ZPOOL list | + awk '/^t32fs-mdt1/ { print \\\$1 }'" "t32fs-mdt1" || { + error_noexit "import zfs pool failed" + return 1 + } + fi + $r $LCTL set_param debug="$PTLDEBUG" - $r $TUNEFS --dryrun $tmp/mdt || { + $r $TUNEFS --dryrun $mdt_dev || { + $r losetup -a error_noexit "tunefs.lustre before mounting the MDT" return 1 } if [ "$writeconf" ]; then - mopts=loop,writeconf + mopts=writeconf if [ $fstype == "ldiskfs" ]; then - $r $TUNEFS --quota $tmp/mdt || { + mopts="loop,$mopts" + $r $TUNEFS --quota $mdt_dev || { + $r losetup -a error_noexit "Enable mdt quota feature" return 1 } fi else - mopts=loop,exclude=$fsname-OST0000 + if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then + [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || + { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids " + "(Need MGS version at least 2.3.59)"; return 0; } + + local osthost=$(facet_active_host ost1) + local ostnid=$(do_node $osthost $LCTL list_nids | head -1) + + mopts=nosvc + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + $r mount -t lustre -o $mopts $mdt_dev $tmp/mnt/mdt + $r lctl replace_nids $fsname-OST0000 $ostnid + $r lctl replace_nids $fsname-MDT0000 $nid + $r umount -d $tmp/mnt/mdt + fi + + mopts=exclude=$fsname-OST0000 + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi fi t32_wait_til_devices_gone $node - $r mount -t lustre -o $mopts $tmp/mdt $tmp/mnt/mdt || { + $r mount -t lustre -o $mopts $mdt_dev $tmp/mnt/mdt || { + $r losetup -a error_noexit "Mounting the MDT" return 1 } @@ -1598,22 +1719,27 @@ t32_test() { return 1 fi - $r $TUNEFS --dryrun $tmp/ost || { + $r $TUNEFS --dryrun $ost_dev || { error_noexit "tunefs.lustre before mounting the OST" return 1 } if [ "$writeconf" ]; then - mopts=loop,mgsnode=$nid,$writeconf + mopts=mgsnode=$nid,$writeconf if [ $fstype == "ldiskfs" ]; then - $r $TUNEFS --quota $tmp/ost || { + mopts="loop,$mopts" + $r $TUNEFS --quota $ost_dev || { + $r losetup -a error_noexit "Enable ost quota feature" return 1 } fi else - mopts=loop,mgsnode=$nid + mopts=mgsnode=$nid + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi fi - $r mount -t lustre -o $mopts $tmp/ost $tmp/mnt/ost || { + $r mount -t lustre -o $mopts $ost_dev $tmp/mnt/ost || { error_noexit "Mounting the OST" return 1 } @@ -1653,6 +1779,30 @@ t32_test() { return 1 } + if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then + $r $LCTL lfsck_start -M $fsname-OST0000 || { + error_noexit "Start OI scrub on OST0" + return 1 + } + + # The oi_scrub should be on ost1, but for test_32(), + # all on the SINGLEMDS. + wait_update_facet $SINGLEMDS "$LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^status/ { print \\\$2 }'" "completed" 30 || { + error_noexit "Failed to get the expected 'completed'" + return 1 + } + + local UPDATED=$($r $LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^updated/ { print $2 }') + [ $UPDATED -ge 1 ] || { + error_noexit "Only $UPDATED objects have been converted" + return 1 + } + fi + if [ "$dne_upgrade" != "no" ]; then $r $LCTL conf_param \ $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || { @@ -1759,8 +1909,8 @@ t32_test() { # on an architecture with different number of bits per # "long". # - if [ $(t32_bits_per_long $(uname -m)) != \ - $(t32_bits_per_long $img_arch) ]; then + if [ $(t32_bits_per_long $(uname -m)) != \ + $(t32_bits_per_long $img_arch) ]; then echo "Different number of bits per \"long\" from the disk image" for list in list.orig list; do sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list @@ -1829,12 +1979,17 @@ t32_test() { } # mount a second time to make sure we didnt leave upgrade flag on - $r $TUNEFS --dryrun $tmp/mdt || { + $r $TUNEFS --dryrun $mdt_dev || { + $r losetup -a error_noexit "tunefs.lustre before remounting the MDT" return 1 } - $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \ - $tmp/mnt/mdt || { + + mopts=exclude=$fsname-OST0000 + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + $r mount -t lustre -o $mopts $mdt_dev $tmp/mnt/mdt || { error_noexit "Remounting the MDT" return 1 } @@ -1849,7 +2004,7 @@ test_32a() { t32_check for tarball in $tarballs; do - t32_test $tarball || rc=$? + t32_test $tarball || let "rc += $?" done return $rc } @@ -1862,7 +2017,7 @@ test_32b() { t32_check for tarball in $tarballs; do - t32_test $tarball writeconf || rc=$? + t32_test $tarball writeconf || let "rc += $?" done return $rc } @@ -1882,6 +2037,19 @@ test_32c() { } run_test 32c "dne upgrade test" +test_32d() { + local tarballs + local tarball + local rc=0 + + t32_check + for tarball in $tarballs; do + ff_convert=yes t32_test $tarball || rc=$? + done + return $rc +} +run_test 32d "convert ff test" + test_33a() { # bug 12333, was test_33 local rc=0 local FSNAME2=test-123 @@ -1912,7 +2080,7 @@ test_33a() { # bug 12333, was test_33 --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \ $fs2ostvdev || exit 10 - start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1 mkdir -p $MOUNT2 @@ -1998,9 +2166,10 @@ test_35a() { # bug 12459 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs "$LCTL conf_param ${device}.failover.node=" \ - "$(h2$NETTYPE $FAKENID)" || return 4 + local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 4 log "Wait for RECONNECT_INTERVAL seconds (10s)" sleep 10 @@ -2052,10 +2221,10 @@ test_35b() { # bug 18674 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | \ - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs "$LCTL conf_param ${device}.failover.node=" \ - "$(h2$NETTYPE $FAKENID)" || return 1 + local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 1 local at_max_saved=0 # adaptive timeouts may prevent seeing the issue @@ -2676,7 +2845,7 @@ test_48() { # bug 17636 run_test 48 "too many acls on file" # check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE -test_49() { # bug 17710 +test_49a() { # bug 17710 local timeout_orig=$TIMEOUT local ldlm_timeout_orig=$LDLM_TIMEOUT local LOCAL_TIMEOUT=20 @@ -2686,49 +2855,60 @@ test_49() { # bug 17710 reformat setup_noconfig - check_mount || return 1 + check_mount || error "client mount failed" echo "check ldlm_timout..." - LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`" - LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`" - LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`" + local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)" + local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)" + local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)" - if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then + if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT" fi if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then - error "LDLM_TIMEOUT($LDLM_MDS) is not correct" + error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT / 3))" fi umount_client $MOUNT - stop_ost || return 2 - stop_mds || return 3 + stop_ost || error "problem stopping OSS" + stop_mds || error "problem stopping MDS" + + LDLM_TIMEOUT=$ldlm_timeout_orig + TIMEOUT=$timeout_orig +} +run_test 49a "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre" + +test_49b() { # bug 17710 + local timeout_orig=$TIMEOUT + local ldlm_timeout_orig=$LDLM_TIMEOUT + local LOCAL_TIMEOUT=20 LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1)) + TIMEOUT=$LOCAL_TIMEOUT reformat setup_noconfig - check_mount || return 7 + check_mount || error "client mount failed" - LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`" - LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`" - LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`" + local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)" + local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)" + local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)" - if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then + if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT" fi if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then - error "LDLM_TIMEOUT($LDLM_MDS) is not correct" + error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT - 1))" fi - cleanup || return $? + cleanup || error "cleanup failed" LDLM_TIMEOUT=$ldlm_timeout_orig TIMEOUT=$timeout_orig } -run_test 49 "check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE" +run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre" lazystatfs() { # Test both statfs and lfs df and fail if either one fails @@ -3113,7 +3293,13 @@ test_52() { do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found' [ $? -eq 0 ] || { error "Unable to move objects"; return 14; } + # recover objects dry-run + echo "ll_recover_lost_found_objs dry_run" + do_node $ost1node "ll_recover_lost_found_objs -n -d $ost1mnt/O" + [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; } + # recover objects + echo "ll_recover_lost_found_objs fix run" do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found" [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; } @@ -3230,7 +3416,7 @@ thread_sanity() { tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min") tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max") tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started") - lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $? + lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? cleanup load_modules @@ -3352,6 +3538,7 @@ run_test 56 "check big indexes" test_57a() { # bug 22656 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost && error "OST registration from failnode should fail" @@ -3362,6 +3549,7 @@ run_test 57a "initial registration from failnode should fail (should return errs test_57b() { local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost || error "OST registration from servicenode should not fail" @@ -3397,7 +3585,7 @@ test_58() { # bug 22658 # remove all files from the OBJECTS dir do_facet $SINGLEMDS "mount -t ldiskfs $opts $devname $MNTDIR" do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete" - do_facet $SINGLEMDS "umount $MNTDIR" + do_facet $SINGLEMDS "umount -d $MNTDIR" # restart MDS with missing llog files start_mds do_facet mds "lctl set_param fail_loc=0" @@ -3623,7 +3811,7 @@ test_65() { # LU-2237 do_facet $SINGLEMDS \ "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt" do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd" - do_facet $SINGLEMDS "umount $brpt" + do_facet $SINGLEMDS "umount -d $brpt" # restart MDS, the "last_rcvd" file should be recreated. start_mds || error "fail to restart the MDS" @@ -3704,10 +3892,164 @@ test_66() { } run_test 66 "replace nids" +test_67() { #LU-2950 + local legacy="$TMP/legacy_lnet_config" + local new="$TMP/new_routes_test" + local out="$TMP/config_out_file" + local verify="$TMP/conv_verify" + local verify_conf="$TMP/conf_verify" + + # Create the legacy file that will be run through the + # lustre_routes_conversion script + cat <<- LEGACY_LNET_CONFIG > $legacy + tcp1 23 192.168.213.1@tcp:1; tcp5 34 193.30.4.3@tcp:4; + tcp2 54 10.1.3.2@tcp; + tcp3 10.3.4.3@tcp:3; + tcp4 10.3.3.4@tcp; + LEGACY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_conversion script against. + cat <<- VERIFY_LNET_CONFIG > $verify + tcp1: { gateway: 192.168.213.1@tcp, hop: 23, priority: 1 } + tcp5: { gateway: 193.30.4.3@tcp, hop: 34, priority: 4 } + tcp2: { gateway: 10.1.3.2@tcp, hop: 54 } + tcp3: { gateway: 10.3.4.3@tcp, priority: 3 } + tcp4: { gateway: 10.3.3.4@tcp } + VERIFY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_config script against + cat <<- VERIFY_LNET_CONFIG > $verify_conf + lctl --net tcp1 add_route 192.168.213.1@tcp 23 1 + lctl --net tcp5 add_route 193.30.4.3@tcp 34 4 + lctl --net tcp2 add_route 10.1.3.2@tcp 54 4 + lctl --net tcp3 add_route 10.3.4.3@tcp 1 3 + lctl --net tcp4 add_route 10.3.3.4@tcp 1 3 + VERIFY_LNET_CONFIG + + lustre_routes_conversion $legacy $new > /dev/null + if [ -f $new ]; then + # verify the conversion output + cmp -s $new $verify > /dev/null + if [ $? -eq 1 ]; then + error "routes conversion failed" + fi + + lustre_routes_config --dry-run --verbose $new > $out + # check that the script succeeded + cmp -s $out $verify_conf > /dev/null + if [ $? -eq 1 ]; then + error "routes config failed" + fi + else + error "routes conversion test failed" + fi + # remove generated files + rm -f $new $legacy $verify $verify_conf $out +} +run_test 67 "test routes conversion and configuration" + +test_68() { + local fid + local seq + local START + local END + + [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] || + { skip "Need MDS version at least 2.4.53"; return 0; } + + umount_client $MOUNT || error "umount client failed" + + start_mdt 1 || error "MDT start failed" + start_ost + + # START-END - the sequences we'll be reserving + START=$(do_facet $SINGLEMDS \ + lctl get_param -n seq.ctl*.space | awk -F'[[ ]' '{print $2}') + END=$((START + (1 << 30))) + do_facet $SINGLEMDS \ + lctl set_param seq.ctl*.fldb="[$START-$END\):0:mdt" + + # reset the sequences MDT0000 has already assigned + do_facet $SINGLEMDS \ + lctl set_param seq.srv*MDT0000.space=clear + + # remount to let the client allocate new sequence + mount_client $MOUNT || error "mount client failed" + + touch $DIR/$tfile + do_facet $SINGLEMDS \ + lctl get_param seq.srv*MDT0000.space + $LFS path2fid $DIR/$tfile + + local old_ifs="$IFS" + IFS='[:]' + fid=($($LFS path2fid $DIR/$tfile)) + IFS="$old_ifs" + let seq=${fid[1]} + + if [[ $seq < $END ]]; then + error "used reserved sequence $seq?" + fi + cleanup || return $? +} +run_test 68 "be able to reserve specific sequences in FLDB" + +test_69() { + local server_version=$(lustre_version_code $SINGLEMDS) + + [[ $server_version -lt $(version_code 2.4.2) ]] && + skip "Need MDS version at least 2.4.2" && return + + [[ $server_version -ge $(version_code 2.4.50) ]] && + [[ $server_version -lt $(version_code 2.5.0) ]] && + skip "Need MDS version at least 2.5.0" && return + + setup + + # use OST0000 since it probably has the most creations + local OSTNAME=$(ostname_from_index 0) + local mdtosc_proc1=$(get_mdtosc_proc_path mds1 $OSTNAME) + local last_id=$(do_facet mds1 lctl get_param -n \ + osc.$mdtosc_proc1.prealloc_last_id) + + # Want to have OST LAST_ID over 1.5 * OST_MAX_PRECREATE to + # verify that the LAST_ID recovery is working properly. If + # not, then the OST will refuse to allow the MDS connect + # because the LAST_ID value is too different from the MDS + #define OST_MAX_PRECREATE=20000 + local num_create=$((20000 * 5)) + + mkdir -p $DIR/$tdir + $LFS setstripe -i 0 $DIR/$tdir + createmany -o $DIR/$tdir/$tfile- $num_create || + error "createmany: failed to create $num_create files: $?" + # delete all of the files with objects on OST0 so the + # filesystem is not inconsistent later on + $LFS find $MOUNT --ost 0 | xargs rm + + stop_ost || error "OST0 stop failure" + add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --reformat --replace \ + $(ostdevname 1) $(ostvdevname 1) || + error "reformat and replace $ostdev failed" + start_ost || error "OST0 restart failure" + wait_osc_import_state mds ost FULL + + touch $DIR/$tdir/$tfile-last || error "create file after reformat" + local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last) + [ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true + + cleanup +} +run_test 69 "replace an OST with the same index" + test_70a() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return local MDTIDX=1 + cleanup + start_mdt 1 || error "MDT0 start fail" start_ost || error "OST0 start fail" @@ -3993,24 +4335,310 @@ run_test 72 "test fast symlink with extents flag enabled" test_73() { #LU-3006 load_modules - do_facet ost1 "$TUNEFS --failnode=1.2.3.4@tcp $(ostdevname 1)" || + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" || error "1st tunefs failed" start_mgsmds || error "start mds failed" start_ost || error "start ost failed" mount_client $MOUNT || error "mount client failed" lctl get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids | - grep 1.2.3.4@tcp || error "failover nids haven't changed" + grep 1.2.3.4@$NETTYPE || error "failover nids haven't changed" umount_client $MOUNT || error "umount client failed" - stop_all + stopall reformat } run_test 73 "failnode to update from mountdata properly" +test_74() { # LU-1606 + for TESTPROG in $LUSTRE_TESTS_API_DIR/*.c; do + gcc -Wall -Werror $LUSTRE_TESTS_API_DIR/simple_test.c \ + -I$LUSTRE/include \ + -L$LUSTRE/utils -llustreapi || + error "client api broken" + done + cleanup || return $? +} +run_test 74 "Lustre client api program can compile and link" + +test_75() { # LU-2374 + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] && + skip "Need MDS version at least 2.4.1" && return + + local index=0 + local opts_mds="$(mkfs_opts mds1 $(mdsdevname 1)) \ + --reformat $(mdsdevname 1) $(mdsvdevname 1)" + local opts_ost="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + + #check with default parameters + add mds1 $opts_mds || error "add mds1 failed for default params" + add ost1 $opts_ost || error "add ost1 failed for default params" + + opts_mds=$(echo $opts_mds | sed -e "s/--mdt//") + opts_mds=$(echo $opts_mds | + sed -e "s/--index=$index/--index=$index --mdt/") + opts_ost=$(echo $opts_ost | sed -e "s/--ost//") + opts_ost=$(echo $opts_ost | + sed -e "s/--index=$index/--index=$index --ost/") + + add mds1 $opts_mds || error "add mds1 failed for new params" + add ost1 $opts_ost || error "add ost1 failed for new params" + return 0 +} +run_test 75 "The order of --index should be irrelevant" + +test_76() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || + { skip "Need MDS version at least 2.4.52" && return 0; } + setup + local MDMB_PARAM="osc.*.max_dirty_mb" + echo "Change MGS params" + local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | + head -1) + echo "max_dirty_mb: $MAX_DIRTY_MB" + local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB)) + echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB" + do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + echo "$MAX_DIRTY_MB" + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "error while apply max_dirty_mb" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "max_dirty_mb is not saved after remount" + + echo "Change OST params" + CLIENT_PARAM="obdfilter.*.client_cache_count" + local CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "client_cache_count: $CLIENT_CACHE_COUNT" + NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT)) + echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT" + do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "error while apply client_cache_count" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "client_cache_count is not saved after remount" + stopall +} +run_test 76 "set permanent params set_param -P" + +test_77() { # LU-3445 + local server_version=$(lustre_version_code $SINGLEMDS) + + [[ $server_version -ge $(version_code 2.2.60) ]] && + [[ $server_version -le $(version_code 2.4.0) ]] && + skip "Need MDS version < 2.2.60 or > 2.4.0" && return + + if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then + is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && + skip_env "mixed loopback and real device not working" && return + fi + + local fs2mdsdev=$(mdsdevname 1_2) + local fs2ostdev=$(ostdevname 1_2) + local fs2mdsvdev=$(mdsvdevname 1_2) + local fs2ostvdev=$(ostvdevname 1_2) + local fsname=test1234 + local mgsnid + local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + + add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \ + --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed" + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT || + error "start fs2mds failed" + + mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) + [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid" + + add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ + --failnode=$failnid --fsname=$fsname \ + --reformat $fs2ostdev $fs2ostvdev || + error "add fs2ost failed" + start fs2ost $fs2ostdev $OST_MOUNT_OPTS || error "start fs2ost failed" + + mkdir -p $MOUNT2 + mount -t lustre $mgsnid:/$fsname $MOUNT2 || error "mount $MOUNT2 failed" + DIR=$MOUNT2 MOUNT=$MOUNT2 check_mount || error "check $MOUNT2 failed" + cleanup_fs2 +} +run_test 77 "comma-separated MGS NIDs and failover node NIDs" + +test_78() { + [[ $(facet_fstype $SINGLEMDS) != ldiskfs || + $(facet_fstype ost1) != ldiskfs ]] && + skip "only applicable to ldiskfs-based MDTs and OSTs" && return + + # reformat the Lustre filesystem with a smaller size + local saved_MDSSIZE=$MDSSIZE + local saved_OSTSIZE=$OSTSIZE + MDSSIZE=$((MDSSIZE - 20000)) + OSTSIZE=$((OSTSIZE - 20000)) + reformat || error "(1) reformat Lustre filesystem failed" + MDSSIZE=$saved_MDSSIZE + OSTSIZE=$saved_OSTSIZE + + # mount the Lustre filesystem + setup_noconfig || error "(2) setup Lustre filesystem failed" + + # create some files + log "create test files" + local i + local file + local num_files=100 + mkdir -p $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed" + for i in $(seq $num_files); do + file=$MOUNT/$tdir/$tfile-$i + dd if=/dev/urandom of=$file count=1 bs=1M || + error "(4) create $file failed" + done + + # unmount the Lustre filesystem + cleanup || error "(5) cleanup Lustre filesystem failed" + + # run e2fsck on the MDT and OST devices + local mds_host=$(facet_active_host $SINGLEMDS) + local ost_host=$(facet_active_host ost1) + local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + local ost_dev=$(ostdevname 1) + + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # get the original block count of the MDT and OST filesystems + local mds_orig_blks=$(get_block_count $SINGLEMDS $mds_dev) + local ost_orig_blks=$(get_block_count ost1 $ost_dev) + + # expand the MDT and OST filesystems to the device size + run_resize2fs $SINGLEMDS $mds_dev "" || error "expand $SINGLEMDS failed" + run_resize2fs ost1 $ost_dev "" || error "expand ost1 failed" + + # run e2fsck on the MDT and OST devices again + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # mount the Lustre filesystem + setup + + # check the files + log "check files after expanding the MDT and OST filesystems" + for i in $(seq $num_files); do + file=$MOUNT/$tdir/$tfile-$i + $CHECKSTAT -t file -s 1048576 $file || + error "(6) checkstat $file failed" + done + + # create more files + log "create more files after expanding the MDT and OST filesystems" + for i in $(seq $((num_files + 1)) $((num_files + 10))); do + file=$MOUNT/$tdir/$tfile-$i + dd if=/dev/urandom of=$file count=1 bs=1M || + error "(7) create $file failed" + done + + # unmount the Lustre filesystem + cleanup || error "(8) cleanup Lustre filesystem failed" + + # run e2fsck on the MDT and OST devices + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # get the maximum block count of the MDT and OST filesystems + local mds_max_blks=$(get_block_count $SINGLEMDS $mds_dev) + local ost_max_blks=$(get_block_count ost1 $ost_dev) + + # get the minimum block count of the MDT and OST filesystems + local mds_min_blks=$(run_resize2fs $SINGLEMDS $mds_dev "" "-P" 2>&1 | + grep minimum | sed -e 's/^.*filesystem: //g') + local ost_min_blks=$(run_resize2fs ost1 $ost_dev "" "-P" 2>&1 | + grep minimum | sed -e 's/^.*filesystem: //g') + + # shrink the MDT and OST filesystems to a smaller size + local shrunk=false + local new_blks + local base_blks + if [[ $mds_max_blks -gt $mds_min_blks && + $mds_max_blks -gt $mds_orig_blks ]]; then + [[ $mds_orig_blks -gt $mds_min_blks ]] && + base_blks=$mds_orig_blks || base_blks=$mds_min_blks + new_blks=$(( (mds_max_blks - base_blks) / 2 + base_blks )) + run_resize2fs $SINGLEMDS $mds_dev $new_blks || + error "shrink $SINGLEMDS to $new_blks failed" + shrunk=true + fi + + if [[ $ost_max_blks -gt $ost_min_blks && + $ost_max_blks -gt $ost_orig_blks ]]; then + [[ $ost_orig_blks -gt $ost_min_blks ]] && + base_blks=$ost_orig_blks || base_blks=$ost_min_blks + new_blks=$(( (ost_max_blks - base_blks) / 2 + base_blks )) + run_resize2fs ost1 $ost_dev $new_blks || + error "shrink ost1 to $new_blks failed" + shrunk=true + fi + + # check whether the MDT or OST filesystem was shrunk or not + if ! $shrunk; then + combined_mgs_mds || stop_mgs || error "(9) stop mgs failed" + reformat || error "(10) reformat Lustre filesystem failed" + return 0 + fi + + # run e2fsck on the MDT and OST devices again + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # mount the Lustre filesystem again + setup + + # check the files + log "check files after shrinking the MDT and OST filesystems" + for i in $(seq $((num_files + 10))); do + file=$MOUNT/$tdir/$tfile-$i + $CHECKSTAT -t file -s 1048576 $file || + error "(11) checkstat $file failed" + done + + # unmount and reformat the Lustre filesystem + cleanup || error "(12) cleanup Lustre filesystem failed" + combined_mgs_mds || stop_mgs || error "(13) stop mgs failed" + reformat || error "(14) reformat Lustre filesystem failed" +} +run_test 78 "run resize2fs on MDT and OST filesystems" + if ! combined_mgs_mds ; then stop mgs fi cleanup_gss +# restore the values of MDSSIZE and OSTSIZE +MDSSIZE=$STORED_MDSSIZE +OSTSIZE=$STORED_OSTSIZE +reformat + complete $SECONDS exit_status