X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=edaf102876d4e3b5f22b2138b9411c4d97bf5ee5;hp=7397bc5e09b25a13d88eb7624eb9f9ba5eeefdac;hb=ab37ffd3ad85280b39fc9c12e5fc64ed3d4fd4a9;hpb=f5f3670b5fa3e972929d732f4d217c6a8442f014 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 7397bc5..edaf102 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -1,25 +1,47 @@ #!/bin/bash -# FIXME - there is no reason to use all of these different -# return codes, espcially when most of them are mapped to something -# else anyway. The combination of test number and return code -# figure out what failed. +# FIXME - there is no reason to use all of these different return codes, +# espcially when most of them are mapped to something else anyway. +# The tests should use error() to describe the failure more clearly, +# and reduce the need to look into the tests to see what failed. set -e ONLY=${ONLY:-"$*"} -# bug number for skipped test: -# 15977 -ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT" +# bug number for skipped test: LU-2828 +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 59 64" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! +is_sles11() # LU-2181 +{ + if [ -r /etc/SuSE-release ] + then + local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'` + local patchlev=`grep PATCHLEVEL /etc/SuSE-release \ + | awk '{print $3}'` + if [ $vers -eq 11 ] && [ $patchlev -eq 2 ] + then + return 0 + fi + fi + return 1 +} + +if is_sles11; then # LU-2181 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b" +fi + if [ "$FAILURE_MODE" = "HARD" ]; then CONFIG_EXCEPTIONS="24a " && \ echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \ ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS" fi +# bug number for skipped test: +# a tool to create lustre filesystem images +ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT" + SRCDIR=`dirname $0` PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH @@ -31,64 +53,41 @@ export MULTIOP=${MULTIOP:-multiop} . $LUSTRE/tests/test-framework.sh init_test_env $@ +. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} # use small MDS + OST size to speed formatting time # do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size +# STORED_MDSSIZE is used in test_18 +STORED_MDSSIZE=$MDSSIZE +STORED_OSTSIZE=$OSTSIZE MDSSIZE=200000 OSTSIZE=200000 -. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} if ! combined_mgs_mds; then # bug number for skipped test: 23954 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24b" fi -# STORED_MDSSIZE is used in test_18 -if [ -n "$MDSSIZE" ]; then - STORED_MDSSIZE=$MDSSIZE -fi - # pass "-E lazy_itable_init" to mke2fs to speed up the formatting time if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init) fi +[ $(facet_fstype $SINGLEMDS) = "zfs" ] && +# bug number for skipped test: LU-2778 LU-4444 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 57b 69" + init_logging # require_dsh_mds || exit 0 require_dsh_ost || exit 0 # -[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45 69" assert_DIR -reformat() { - formatall -} - -writeconf1() { - local facet=$1 - local dev=$2 - - stop ${facet} -f - rm -f ${facet}active - # who knows if/where $TUNEFS is installed? Better reformat if it fails... - do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || - { echo "tunefs failed, reformatting instead" && reformat_and_config && return 1; } - return 0 -} - -writeconf() { - # we need ldiskfs - load_modules - # if writeconf fails anywhere, we reformat everything - writeconf1 mds `mdsdevname 1` || return 0 - writeconf1 ost1 `ostdevname 1` || return 0 - writeconf1 ost2 `ostdevname 2` || return 0 -} - gen_config() { # The MGS must be started before the OSTs for a new fs, so start # and stop to generate the startup logs. @@ -107,20 +106,54 @@ reformat_and_config() { gen_config } +writeconf_or_reformat() { + # There are at most 2 OSTs for write_conf test + # who knows if/where $TUNEFS is installed? + # Better reformat if it fails... + writeconf_all $MDSCOUNT 2 || + { echo "tunefs failed, reformatting instead" && + reformat_and_config && return 0; } + return 0 +} + +reformat() { + formatall +} + start_mgs () { echo "start mgs" - start mgs $MGSDEV $MGS_MOUNT_OPTS + start mgs $(mgsdevname) $MGS_MOUNT_OPTS } -start_mds() { - local facet=$SINGLEMDS - # we can not use MDSDEV1 here because SINGLEMDS could be set not to mds1 only - local num=$(echo $facet | tr -d "mds") +start_mdt() { + local num=$1 + local facet=mds$num local dev=$(mdsdevname $num) + shift 1 + echo "start mds service on `facet_active_host $facet`" start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94 } +stop_mdt() { + local num=$1 + local facet=mds$num + local dev=$(mdsdevname $num) + shift 1 + + echo "stop mds service on `facet_active_host $facet`" + # These tests all use non-failover stop + stop $facet -f || return 97 +} + +start_mds() { + local num + + for num in $(seq $MDSCOUNT); do + start_mdt $num $@ || return 94 + done +} + start_mgsmds() { if ! combined_mgs_mds ; then start_mgs @@ -129,9 +162,10 @@ start_mgsmds() { } stop_mds() { - echo "stop mds service on `facet_active_host $SINGLEMDS`" - # These tests all use non-failover stop - stop $SINGLEMDS -f || return 97 + local num + for num in $(seq $MDSCOUNT); do + stop_mdt $num || return 97 + done } stop_mgs() { @@ -169,7 +203,7 @@ mount_client() { } remount_client() { - local mountopt="-o remount,$1" + local mountopt="remount,$1" local MOUNTPATH=$2 echo "remount '$1' lustre on ${MOUNTPATH}....." zconf_mount `hostname` $MOUNTPATH "$mountopt" || return 96 @@ -278,9 +312,9 @@ test_1() { run_test 1 "start up ost twice (should return errors)" test_2() { - start_mds + start_mdt 1 echo "start mds second time.." - start_mds && error "2nd MDT start should fail" + start_mdt 1 && error "2nd MDT start should fail" start_ost mount_client $MOUNT check_mount || return 43 @@ -330,7 +364,7 @@ test_5a() { # was test_5 wait $UMOUNT_PID if grep " $MOUNT " /proc/mounts; then echo "test 5: /proc/mounts after failed umount" - umount $MOUNT & + umount -f $MOUNT & UMOUNT_PID=$! sleep 2 echo "killing umount" @@ -559,24 +593,30 @@ is_blkdev () { # test_17() { - setup - check_mount || return 41 - cleanup || return $? + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi - echo "Remove mds config log" - if ! combined_mgs_mds ; then - stop mgs - fi + setup + check_mount || return 41 + cleanup || return $? + + echo "Remove mds config log" + if ! combined_mgs_mds ; then + stop mgs + fi - do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MGSDEV || return \$?" || return $? + do_facet mgs "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' \ + $(mgsdevname) || return \$?" || return $? - if ! combined_mgs_mds ; then - start_mgs - fi + if ! combined_mgs_mds ; then + start_mgs + fi - start_ost - start_mds && return 42 - reformat_and_config + start_ost + start_mds && return 42 + reformat_and_config } run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)" @@ -601,8 +641,14 @@ test_18() { log "use STORED_MDSSIZE=$STORED_MDSSIZE" # check if the block device is large enough - [ -z "$OK" ] && $(is_blkdev $SINGLEMDS $MDSDEV $MIN) && OK=1 && - myMDSSIZE=$MIN && log "use device $MDSDEV with MIN=$MIN" + is_blkdev $SINGLEMDS $MDSDEV $MIN + local large_enough=$? + if [ -n "$OK" ]; then + [ $large_enough -ne 0 ] && OK="" + else + [ $large_enough -eq 0 ] && OK=1 && myMDSSIZE=$MIN && + log "use device $MDSDEV with MIN=$MIN" + fi # check if a loopback device has enough space for fs metadata (5%) @@ -702,7 +748,7 @@ test_21c() { stop_ost2 stop_mds #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 21c "start mds between two osts, stop mds last" @@ -725,7 +771,7 @@ test_21d() { stop_mds stop_mgs #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat start_mgs } run_test 21d "start mgs then ost and then mds" @@ -736,7 +782,7 @@ test_22() { echo Client mount with ost in logs, but none running start_ost # wait until mds connected to ost and open client connection - wait_osc_import_state mds ost FULL + wait_osc_import_state mds ost FULL stop_ost mount_client $MOUNT # check_mount will block trying to contact ost @@ -756,8 +802,8 @@ test_22() { sleep $((TIMEOUT + TIMEOUT + TIMEOUT)) fi mount_client $MOUNT - wait_osc_import_state mds ost FULL - wait_osc_import_state client ost FULL + wait_osc_import_state mds ost FULL + wait_osc_import_state client ost FULL check_mount || return 41 pass @@ -813,8 +859,8 @@ test_23a() { # was test_23 "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs" ps -ef | grep mount fi - stop_mds || error - stop_ost || error + stop_mds || error "stopping MDSes failed" + stop_ost || error "stopping OSSes failed" } run_test 23a "interrupt client during recovery mount delay" @@ -838,7 +884,7 @@ MDSDEV1_2=$fs2mds_DEV OSTDEV1_2=$fs2ost_DEV OSTDEV2_2=$fs3ost_DEV -cleanup_24a() { +cleanup_fs2() { trap 0 echo "umount $MOUNT2 ..." umount $MOUNT2 || true @@ -865,17 +911,18 @@ test_24a() { # test 8-char fsname as well local FSNAME2=test1234 - add fs2mds $(mkfs_opts mds1) --nomgs --mgsnode=$MGSNID \ + + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --nomgs --mgsnode=$MGSNID \ --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10 - add fs2ost $(mkfs_opts ost1) --fsname=${FSNAME2} --reformat \ - $fs2ostdev $fs2ostvdev || exit 10 + add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --fsname=${FSNAME2} \ + --reformat $fs2ostdev $fs2ostvdev || exit 10 setup - start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS mkdir -p $MOUNT2 - mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1 + $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || return 1 # 1 still works check_mount || return 2 # files written on 1 should not show up on 2 @@ -897,7 +944,7 @@ test_24a() { stop_mds MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) [ -z "$MDS" ] && error "No MDT" && return 8 - cleanup_24a + cleanup_fs2 cleanup_nocli || return 6 } run_test 24a "Multiple MDTs on a single node" @@ -915,10 +962,11 @@ test_24b() { local fs2mdsdev=$(mdsdevname 1_2) local fs2mdsvdev=$(mdsvdevname 1_2) - add fs2mds $(mkfs_opts mds1) --mgs --fsname=${FSNAME}2 --reformat \ - $fs2mdsdev $fs2mdsvdev || exit 10 + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev} ) --mgs --fsname=${FSNAME}2 \ + --reformat $fs2mdsdev $fs2mdsvdev || exit 10 setup start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2 + stop fs2mds -f cleanup || return 6 } run_test 24b "Multiple MGSs on a single node (should return err)" @@ -942,45 +990,38 @@ test_26() { lctl get_param -n devices DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l) [ $DEVS -gt 0 ] && return 2 + # start mds to drop writeconf setting + start_mds || return 3 + stop_mds || return 4 unload_modules_conf || return $? } run_test 26 "MDT startup failure cleans LOV (should return errs)" -set_and_check() { - local myfacet=$1 - local TEST=$2 - local PARAM=$3 - local ORIG=$(do_facet $myfacet "$TEST") - if [ $# -gt 3 ]; then - local FINAL=$4 - else - local -i FINAL - FINAL=$(($ORIG + 5)) - fi - echo "Setting $PARAM from $ORIG to $FINAL" - do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" || error conf_param failed - - wait_update $(facet_host $myfacet) "$TEST" "$FINAL" || error check failed! -} - test_27a() { start_ost || return 1 start_mds || return 2 echo "Requeue thread should have started: " ps -e | grep ll_cfg_requeue - set_and_check ost1 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3 + set_conf_param_and_check ost1 \ + "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \ + "$FSNAME-OST0000.ost.client_cache_seconds" || return 3 cleanup_nocli } run_test 27a "Reacquire MGS lock if OST started first" test_27b() { # FIXME. ~grev - setup - local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }') + setup + local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT0000") { print $4 }') facet_failover $SINGLEMDS - set_and_check $SINGLEMDS "lctl get_param -n mdt.$device.identity_acquire_expire" "$device.mdt.identity_acquire_expire" || return 3 - set_and_check client "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight" "$device.mdc.max_rpcs_in_flight" || return 4 + set_conf_param_and_check $SINGLEMDS \ + "lctl get_param -n mdt.$device.identity_acquire_expire" \ + "$device.mdt.identity_acquire_expire" || return 3 + set_conf_param_and_check client \ + "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\ + "$device.mdc.max_rpcs_in_flight" || return 4 check_mount cleanup } @@ -992,9 +1033,9 @@ test_28() { PARAM="$FSNAME.llite.max_read_ahead_whole_mb" ORIG=$($TEST) FINAL=$(($ORIG + 1)) - set_and_check client "$TEST" "$PARAM" $FINAL || return 3 + set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3 FINAL=$(($FINAL + 1)) - set_and_check client "$TEST" "$PARAM" $FINAL || return 4 + set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4 umount_client $MOUNT || return 200 mount_client $MOUNT RESULT=$($TEST) @@ -1004,11 +1045,83 @@ test_28() { else echo "New config success: got $RESULT" fi - set_and_check client "$TEST" "$PARAM" $ORIG || return 5 + set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5 cleanup } run_test 28 "permanent parameter setting" +test_28a() { # LU-4221 + [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] || + { skip "Need OST version at least 2.5.52" && return 0; } + [ "$(facet_fstype ost1)" = "zfs" ] && + skip "LU-4221: no such proc params for ZFS OSTs" && return + + local name + local param + local cmd + local old + local new + local device="$FSNAME-OST0000" + + setup + + # In this test we will set three kinds of proc parameters with + # lctl conf_param: + # 1. the ones moved from the OFD to the OSD, and only their + # symlinks kept in obdfilter + # 2. non-symlink ones in the OFD + # 3. non-symlink ones in the OSD + + # Check 1. + # prepare a symlink parameter in the OFD + name="writethrough_cache_enable" + param="$device.ost.$name" + cmd="$LCTL get_param -n obdfilter.$device.$name" + + # conf_param the symlink parameter in the OFD + old=$(do_facet ost1 $cmd) + new=$(((old + 1) % 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.ost.$param=$new failed" + + # conf_param the target parameter in the OSD + param="$device.osd.$name" + cmd="$LCTL get_param -n osd-*.$device.$name" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.osd.$param=$old failed" + + # Check 2. + # prepare a non-symlink parameter in the OFD + name="client_cache_seconds" + param="$device.ost.$name" + cmd="$LCTL get_param -n obdfilter.$device.$name" + + # conf_param the parameter in the OFD + old=$(do_facet ost1 $cmd) + new=$((old * 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.ost.$param=$new failed" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.ost.$param=$old failed" + + # Check 3. + # prepare a non-symlink parameter in the OSD + name="lma_self_repair" + param="$device.osd.$name" + cmd="$LCTL get_param -n osd-*.$device.$name" + + # conf_param the parameter in the OSD + old=$(do_facet ost1 $cmd) + new=$(((old + 1) % 2)) + set_conf_param_and_check ost1 "$cmd" "$param" $new || + error "lctl conf_param $device.osd.$param=$new failed" + set_conf_param_and_check ost1 "$cmd" "$param" $old || + error "lctl conf_param $device.osd.$param=$old failed" + + cleanup +} +run_test 28a "set symlink parameters permanently with conf_param" + test_29() { [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return setup > /dev/null 2>&1 @@ -1021,7 +1134,8 @@ test_29() { ACTV=$(lctl get_param -n $PROC_ACT) DEAC=$((1 - $ACTV)) - set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2 + set_conf_param_and_check client \ + "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2 # also check ost_server_uuid status RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV) if [ -z "$RESULT" ]; then @@ -1031,31 +1145,30 @@ test_29() { echo "Live client success: got $RESULT" fi - # check MDT too - local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $FSNAME-OST0001) - mdtosc=${mdtosc/-MDT*/-MDT\*} - local MPROC="osc.$mdtosc.active" - local MAX=30 - local WAIT=0 - while [ 1 ]; do - sleep 5 - RESULT=`do_facet $SINGLEMDS " lctl get_param -n $MPROC"` - [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC" - if [ $RESULT -eq $DEAC ]; then - echo "MDT deactivated also after $WAIT sec (got $RESULT)" - break - fi - WAIT=$((WAIT + 5)) - if [ $WAIT -eq $MAX ]; then - echo "MDT not deactivated: wanted $DEAC got $RESULT" - return 4 - fi - echo "Waiting $(($MAX - $WAIT)) secs for MDT deactivated" + # check MDTs too + for num in $(seq $MDSCOUNT); do + local mdtosc=$(get_mdtosc_proc_path mds${num} $FSNAME-OST0001) + local MPROC="osc.$mdtosc.active" + local MAX=30 + local WAIT=0 + while [ 1 ]; do + sleep 5 + RESULT=$(do_facet mds${num} " lctl get_param -n $MPROC") + [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $MPROC" + if [ $RESULT -eq $DEAC ]; then + echo -n "MDT deactivated also after" + echo "$WAIT sec (got $RESULT)" + break + fi + WAIT=$((WAIT + 5)) + if [ $WAIT -eq $MAX ]; then + echo -n "MDT not deactivated: wanted $DEAC" + echo "got $RESULT" + return 4 + fi + echo "Waiting $(($MAX - $WAIT))secs for MDT deactivated" + done done - - # quotacheck should not fail immediately after deactivate - [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; } - # test new client starts deactivated umount_client $MOUNT || return 200 mount_client $MOUNT @@ -1067,17 +1180,15 @@ test_29() { echo "New client success: got $RESULT" fi - # quotacheck should not fail after umount/mount operation - [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; } - # make sure it reactivates - set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6 + set_conf_param_and_check client \ + "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6 umount_client $MOUNT stop_ost2 cleanup_nocli #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 29 "permanently remove an OST" @@ -1089,7 +1200,8 @@ test_30a() { ORIG=$($TEST) LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5) for i in ${LIST[@]}; do - set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3 + set_conf_param_and_check client "$TEST" \ + "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3 done # make sure client restart still works umount_client $MOUNT @@ -1122,20 +1234,30 @@ test_30b() { NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/") echo "Using fake nid $NEW" - TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'" - set_and_check client "$TEST" "$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW" - NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids) + TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | + grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'" + set_conf_param_and_check client "$TEST" \ + "$FSNAME-OST0000.failover.node" $NEW || + error "didn't add failover nid $NEW" + NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | + grep failover_nids) echo $NIDS - NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1)) + # The NIDS value is the failover nid strings and "[" and "]". So + # we need to subtract the space taken by the delimiters. This has + # changed from earlier version of Lustre but this test is run only + # locally so this change will not break interop. See LU-3386 + NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 3)) echo "should have 2 failover nids: $NIDCOUNT" [ $NIDCOUNT -eq 2 ] || error "Failover nid not added" - do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed" + do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || + error "conf_param delete failed" umount_client $MOUNT mount_client $MOUNT || return 3 - NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids) + NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | + grep failover_nids) echo $NIDS - NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1)) + NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 3)) echo "only 1 final nid should remain: $NIDCOUNT" [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed" @@ -1144,198 +1266,789 @@ test_30b() { run_test 30b "Remove failover nids" test_31() { # bug 10734 - # ipaddr must not exist - mount -t lustre 4.3.2.1@tcp:/lustre $MOUNT || true + # ipaddr must not exist + $MOUNT_CMD 4.3.2.1@tcp:/lustre $MOUNT || true cleanup } run_test 31 "Connect to non-existent node (shouldn't crash)" -# Use these start32/stop32 fn instead of t-f start/stop fn, -# for local devices, to skip global facet vars init -stop32 () { - local facet=$1 - shift - echo "Stopping local ${MOUNT%/*}/${facet} (opts:$@)" - umount -d $@ ${MOUNT%/*}/${facet} - losetup -a -} - -start32 () { - local facet=$1 - shift - local device=$1 - shift - mkdir -p ${MOUNT%/*}/${facet} - - echo "Starting local ${facet}: $@ $device ${MOUNT%/*}/${facet}" - mount -t lustre $@ ${device} ${MOUNT%/*}/${facet} - local RC=$? - if [ $RC -ne 0 ]; then - echo "mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}" - echo "Start of ${device} of local ${facet} failed ${RC}" - fi - losetup -a - return $RC + +T32_QID=60000 +T32_BLIMIT=20480 # Kbytes +T32_ILIMIT=2 + +# +# This is not really a test but a tool to create new disk +# image tarballs for the upgrade tests. +# +# Disk image tarballs should be created on single-node +# clusters by running this test with default configurations +# plus a few mandatory environment settings that are verified +# at the beginning of the test. +# +test_32newtarball() { + local version + local dst=. + local src=/etc/rc.d + local tmp=$TMP/t32_image_create + + if [ $FSNAME != t32fs -o $MDSCOUNT -ne 1 -o \ + \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o $OSTCOUNT -ne 1 -o \ + -z "$OSTDEV1" ]; then + error "Needs FSNAME=t32fs MDSCOUNT=1 MDSDEV1=" \ + "(or MDSDEV, in the case of b1_8) OSTCOUNT=1" \ + "OSTDEV1=" + fi + + mkdir $tmp || { + echo "Found stale $tmp" + return 1 + } + + mkdir $tmp/src + tar cf - -C $src . | tar xf - -C $tmp/src + dd if=/dev/zero of=$tmp/src/t32_qf_old bs=1M \ + count=$(($T32_BLIMIT / 1024 / 2)) + chown $T32_QID.$T32_QID $tmp/src/t32_qf_old + + # format ost with comma-separated NIDs to verify LU-4460 + local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + MGSNID="$MGSNID,$MGSNID" OSTOPT="--failnode=$failnid" formatall + + setupall + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] && + $LFS quotacheck -ug /mnt/$FSNAME + $LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \ + /mnt/$FSNAME + + tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME + stopall + + mkdir $tmp/img + + setupall + pushd /mnt/$FSNAME + ls -Rni --time-style=+%s >$tmp/img/list + find . ! -name .lustre -type f -exec sha1sum {} \; | + sort -k 2 >$tmp/img/sha1sums + popd + $LCTL get_param -n version | head -n 1 | + sed -e 's/^lustre: *//' >$tmp/img/commit + + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ] && + $LFS quotaon -ug /mnt/$FSNAME + $LFS quota -u $T32_QID -v /mnt/$FSNAME + $LFS quota -v -u $T32_QID /mnt/$FSNAME | + awk 'BEGIN { num='1' } { if ($1 == "'/mnt/$FSNAME'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*" > $tmp/img/bspace + $LFS quota -v -u $T32_QID /mnt/$FSNAME | + awk 'BEGIN { num='5' } { if ($1 == "'/mnt/$FSNAME'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*" > $tmp/img/ispace + + stopall + + pushd $tmp/src + find -type f -exec sha1sum {} \; | sort -k 2 >$tmp/sha1sums.src + popd + + if ! diff -u $tmp/sha1sums.src $tmp/img/sha1sums; then + echo "Data verification failed" + fi + + uname -r >$tmp/img/kernel + uname -m >$tmp/img/arch + + mv ${MDSDEV1:-$MDSDEV} $tmp/img + mv $OSTDEV1 $tmp/img + + version=$(sed -e 's/\(^[0-9]\+\.[0-9]\+\)\(.*$\)/\1/' $tmp/img/commit | + sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8" + dst=$(cd $dst; pwd) + pushd $tmp/img + tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S * + popd + + rm -r $tmp +} +#run_test 32newtarball "Create a new test_32 disk image tarball for this version" + +# +# The list of applicable tarballs is returned via the caller's +# variable "tarballs". +# +t32_check() { + local node=$(facet_active_host $SINGLEMDS) + local r="do_node $node" + + if [ "$CLIENTONLY" ]; then + skip "Client-only testing" + exit 0 + fi + + if ! $r which $TUNEFS; then + skip_env "tunefs.lustre required on $node" + exit 0 + fi + + local IMGTYPE=$(facet_fstype $SINGLEMDS) + + tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\') + + if [ -z "$tarballs" ]; then + skip "No applicable tarballs found" + exit 0 + fi +} + +t32_test_cleanup() { + local tmp=$TMP/t32 + local fstype=$(facet_fstype $SINGLEMDS) + local rc=$? + + if $shall_cleanup_lustre; then + umount $tmp/mnt/lustre || rc=$? + fi + if $shall_cleanup_mdt; then + $r umount -d $tmp/mnt/mdt || rc=$? + fi + if $shall_cleanup_mdt1; then + $r umount -d $tmp/mnt/mdt1 || rc=$? + fi + if $shall_cleanup_ost; then + $r umount -d $tmp/mnt/ost || rc=$? + fi + + $r rm -rf $tmp + rm -rf $tmp + if [ $fstype == "zfs" ]; then + $r $ZPOOL destroy t32fs-mdt1 || rc=$? + $r $ZPOOL destroy t32fs-ost1 || rc=$? + fi + return $rc } -cleanup_nocli32 () { - stop32 mds1 -f - stop32 ost1 -f - wait_exit_ST client +t32_bits_per_long() { + # + # Yes, this is not meant to be perfect. + # + case $1 in + ppc64|x86_64) + echo -n 64;; + i*86) + echo -n 32;; + esac } -cleanup_32() { - trap 0 - echo "Cleanup test_32 umount $MOUNT ..." - umount -f $MOUNT || true - echo "Cleanup local mds ost1 ..." - cleanup_nocli32 - combined_mgs_mds || start_mgs - unload_modules_conf +t32_reload_modules() { + local node=$1 + local all_removed=false + local i=0 + + while ((i < 20)); do + echo "Unloading modules on $node: Attempt $i" + do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) && + all_removed=true + do_rpc_nodes $node check_mem_leak || return 1 + if $all_removed; then + do_rpc_nodes $node load_modules + return 0 + fi + sleep 5 + i=$((i + 1)) + done + echo "Unloading modules on $node: Given up" + return 1 } -test_32a() { - client_only && skip "client only testing" && return 0 - [ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; } - [ -z "$TUNEFS" ] && skip_env "No tunefs" && return 0 +t32_wait_til_devices_gone() { + local node=$1 + local devices + local loops + local i=0 + + echo wait for devices to go + while ((i < 20)); do + devices=$(do_rpc_nodes $node $LCTL device_list | wc -l) + loops=$(do_rpc_nodes $node losetup -a | grep -c t32) + ((devices == 0 && loops == 0)) && return 0 + sleep 5 + i=$((i + 1)) + done + echo "waiting for dev on $node: dev $devices loop $loops given up" + do_rpc_nodes $node "losetup -a" + do_rpc_nodes $node "$LCTL devices_list" + return 1 +} - local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2 - [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0 - local tmpdir=$TMP/conf32a - mkdir -p $tmpdir - tar xjvf $DISK1_8 -C $tmpdir || \ - { skip_env "Cannot untar $DISK1_8" && return 0; } +t32_verify_quota() { + local node=$1 + local fsname=$2 + local mnt=$3 + local fstype=$(facet_fstype $SINGLEMDS) + local qval + local cmd + + $LFS quota -u $T32_QID -v $mnt + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='1' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $img_bspace ] || { + echo "bspace, act:$qval, exp:$img_bspace" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='5' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $img_ispace ] || { + echo "ispace, act:$qval, exp:$img_ispace" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='3' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $T32_BLIMIT ] || { + echo "blimit, act:$qval, exp:$T32_BLIMIT" + return 1 + } + + qval=$($LFS quota -v -u $T32_QID $mnt | + awk 'BEGIN { num='7' } { if ($1 == "'$mnt'") \ + { if (NF == 1) { getline } else { num++ } ; print $num;} }' \ + | tr -d "*") + [ $qval -eq $T32_ILIMIT ] || { + echo "ilimit, act:$qval, exp:$T32_ILIMIT" + return 1 + } - load_modules - $LCTL set_param debug="$PTLDEBUG" + do_node $node $LCTL conf_param $fsname.quota.mdt=ug + cmd="$LCTL get_param -n osd-$fstype.$fsname-MDT0000" + cmd=$cmd.quota_slave.enabled + wait_update $node "$cmd" "ug" || { + echo "Enable mdt quota failed" + return 1 + } - $TUNEFS $tmpdir/mds || error "tunefs failed" + do_node $node $LCTL conf_param $fsname.quota.ost=ug + cmd="$LCTL get_param -n osd-$fstype.$fsname-OST0000" + cmd=$cmd.quota_slave.enabled + wait_update $node "$cmd" "ug" || { + echo "Enable ost quota failed" + return 1 + } - combined_mgs_mds || stop mgs + chmod 0777 $mnt + runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \ + bs=1M count=$(($T32_BLIMIT / 1024)) oflag=sync && { + echo "Write succeed, but expect -EDQUOT" + return 1 + } + rm -f $mnt/t32_qf_new - # nids are wrong, so client wont work, but server should start - start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \ - trap cleanup_32 EXIT INT || return 3 + runas -u $T32_QID -g $T32_QID createmany -m $mnt/t32_qf_ \ + $T32_ILIMIT && { + echo "Create succeed, but expect -EDQUOT" + return 1 + } + unlinkmany $mnt/t32_qf_ $T32_ILIMIT - local UUID=$($LCTL get_param -n mdt.lustre-MDT0000.uuid) - echo MDS uuid $UUID - [ "$UUID" == "lustre-MDT0000_UUID" ] || error "UUID is wrong: $UUID" + return 0 +} - $TUNEFS --mgsnode=$HOSTNAME $tmpdir/ost1 || error "tunefs failed" - start32 ost1 $tmpdir/ost1 "-o loop" || return 5 - UUID=$($LCTL get_param -n obdfilter.lustre-OST0000.uuid) - echo OST uuid $UUID - [ "$UUID" == "lustre-OST0000_UUID" ] || error "UUID is wrong: $UUID" +t32_test() { + local tarball=$1 + local writeconf=$2 + local dne_upgrade=${dne_upgrade:-"no"} + local ff_convert=${ff_convert:-"no"} + local shall_cleanup_mdt=false + local shall_cleanup_mdt1=false + local shall_cleanup_ost=false + local shall_cleanup_lustre=false + local node=$(facet_active_host $SINGLEMDS) + local r="do_node $node" + local node2=$(facet_active_host mds2) + local tmp=$TMP/t32 + local img_commit + local img_kernel + local img_arch + local img_bspace + local img_ispace + local fsname=t32fs + local nid=$($r $LCTL list_nids | head -1) + local mopts + local uuid + local nrpcs_orig + local nrpcs + local list + local fstype=$(facet_fstype $SINGLEMDS) + local mdt_dev=$tmp/mdt + local ost_dev=$tmp/ost + + trap 'trap - RETURN; t32_test_cleanup' RETURN + + mkdir -p $tmp/mnt/lustre + $r mkdir -p $tmp/mnt/{mdt,ost} + $r tar xjvf $tarball -S -C $tmp || { + error_noexit "Unpacking the disk image tarball" + return 1 + } + img_commit=$($r cat $tmp/commit) + img_kernel=$($r cat $tmp/kernel) + img_arch=$($r cat $tmp/arch) + img_bspace=$($r cat $tmp/bspace) + img_ispace=$($r cat $tmp/ispace) + echo "Upgrading from $(basename $tarball), created with:" + echo " Commit: $img_commit" + echo " Kernel: $img_kernel" + echo " Arch: $img_arch" + + local version=$(version_code $img_commit) + [[ $version -ge $(version_code 2.5.0) ]] && ff_convert="no" + + if [ $fstype == "zfs" ]; then + # import pool first + $r $ZPOOL import -f -d $tmp t32fs-mdt1 + $r $ZPOOL import -f -d $tmp t32fs-ost1 + mdt_dev=t32fs-mdt1/mdt1 + ost_dev=t32fs-ost1/ost1 + wait_update_facet $SINGLEMDS "$ZPOOL list | + awk '/^t32fs-mdt1/ { print \\\$1 }'" "t32fs-mdt1" || { + error_noexit "import zfs pool failed" + return 1 + } + fi - local NID=$($LCTL list_nids | head -1) + $r $LCTL set_param debug="$PTLDEBUG" - echo "OSC changes should succeed:" - $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7 - $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8 - echo "ok." + $r $TUNEFS --dryrun $mdt_dev || { + $r losetup -a + error_noexit "tunefs.lustre before mounting the MDT" + return 1 + } + if [ "$writeconf" ]; then + mopts=writeconf + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + $r $TUNEFS --quota $mdt_dev || { + $r losetup -a + error_noexit "Enable mdt quota feature" + return 1 + } + fi + else + if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then + [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || + { skip "LU-2200: Cannot run over Inifiniband w/o lctl replace_nids " + "(Need MGS version at least 2.3.59)"; return 0; } + + local osthost=$(facet_active_host ost1) + local ostnid=$(do_node $osthost $LCTL list_nids | head -1) + + mopts=nosvc + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt + $r lctl replace_nids $fsname-OST0000 $ostnid + $r lctl replace_nids $fsname-MDT0000 $nid + $r umount -d $tmp/mnt/mdt + fi - echo "MDC changes should succeed:" - $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9 - $LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10 - echo "ok." + mopts=exclude=$fsname-OST0000 + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + fi - echo "LOV changes should succeed:" - $LCTL pool_new lustre.interop || return 11 - $LCTL conf_param lustre-MDT0000.lov.stripesize=4M || return 12 - echo "ok." + t32_wait_til_devices_gone $node - cleanup_32 + $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt || { + $r losetup -a + error_noexit "Mounting the MDT" + return 1 + } + shall_cleanup_mdt=true - # mount a second time to make sure we didnt leave upgrade flag on - load_modules - $TUNEFS --dryrun $tmpdir/mds || error "tunefs failed" + if [ "$dne_upgrade" != "no" ]; then + local fs2mdsdev=$(mdsdevname 1_2) + local fs2mdsvdev=$(mdsvdevname 1_2) - combined_mgs_mds || stop mgs + echo "mkfs new MDT on ${fs2mdsdev}...." + if [ $(facet_fstype mds1) == ldiskfs ]; then + mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" + fi - start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \ - trap cleanup_32 EXIT INT || return 12 + add fs2mds $(mkfs_opts mds2 $fs2mdsdev $fsname) --reformat \ + $mkfsoptions $fs2mdsdev $fs2mdsvdev > /dev/null || { + error_noexit "Mkfs new MDT failed" + return 1 + } - cleanup_32 + $r $TUNEFS --dryrun $fs2mdsdev || { + error_noexit "tunefs.lustre before mounting the MDT" + return 1 + } - rm -rf $tmpdir || true # true is only for TMP on NFS -} -run_test 32a "Upgrade from 1.8 (not live)" + echo "mount new MDT....$fs2mdsdev" + $r mkdir -p $tmp/mnt/mdt1 + $r $MOUNT_CMD -o $mopts $fs2mdsdev $tmp/mnt/mdt1 || { + error_noexit "mount mdt1 failed" + return 1 + } -test_32b() { - client_only && skip "client only testing" && return 0 - [ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; } - [ -z "$TUNEFS" ] && skip_env "No tunefs" && return + $r $LCTL set_param -n mdt.${fsname}*.enable_remote_dir=1 || + error_noexit "enable remote dir create failed" - local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2 - [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0 - local tmpdir=$TMP/conf32b - mkdir -p $tmpdir - tar xjvf $DISK1_8 -C $tmpdir || \ - { skip_env "Cannot untar $DISK1_8" && return ; } + shall_cleanup_mdt1=true + fi - load_modules - $LCTL set_param debug="+config" - local NEWNAME=lustre + uuid=$($r $LCTL get_param -n mdt.$fsname-MDT0000.uuid) || { + error_noexit "Getting MDT UUID" + return 1 + } + if [ "$uuid" != $fsname-MDT0000_UUID ]; then + error_noexit "Unexpected MDT UUID: \"$uuid\"" + return 1 + fi + + $r $TUNEFS --dryrun $ost_dev || { + error_noexit "tunefs.lustre before mounting the OST" + return 1 + } + if [ "$writeconf" ]; then + mopts=mgsnode=$nid,$writeconf + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + $r $TUNEFS --quota $ost_dev || { + $r losetup -a + error_noexit "Enable ost quota feature" + return 1 + } + fi + else + mopts=mgsnode=$nid + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + fi + $r $MOUNT_CMD -o $mopts $ost_dev $tmp/mnt/ost || { + error_noexit "Mounting the OST" + return 1 + } + shall_cleanup_ost=true + + uuid=$($r $LCTL get_param -n obdfilter.$fsname-OST0000.uuid) || { + error_noexit "Getting OST UUID" + return 1 + } + if [ "$uuid" != $fsname-OST0000_UUID ]; then + error_noexit "Unexpected OST UUID: \"$uuid\"" + return 1 + fi + + $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || { + error_noexit "Setting \"max_dirty_mb\"" + return 1 + } + $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || { + error_noexit "Setting OST \"failover.node\"" + return 1 + } + $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || { + error_noexit "Setting \"max_rpcs_in_flight\"" + return 1 + } + $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || { + error_noexit "Setting MDT \"failover.node\"" + return 1 + } + $r $LCTL pool_new $fsname.interop || { + error_noexit "Setting \"interop\"" + return 1 + } + $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || { + error_noexit "Setting \"lov.stripesize\"" + return 1 + } + + if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then + $r $LCTL lfsck_start -M $fsname-OST0000 || { + error_noexit "Start OI scrub on OST0" + return 1 + } + + # The oi_scrub should be on ost1, but for test_32(), + # all on the SINGLEMDS. + wait_update_facet $SINGLEMDS "$LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^status/ { print \\\$2 }'" "completed" 30 || { + error_noexit "Failed to get the expected 'completed'" + return 1 + } + + local UPDATED=$($r $LCTL get_param -n \ + osd-ldiskfs.$fsname-OST0000.oi_scrub | + awk '/^updated/ { print $2 }') + [ $UPDATED -ge 1 ] || { + error_noexit "Only $UPDATED objects have been converted" + return 1 + } + fi + + if [ "$dne_upgrade" != "no" ]; then + $r $LCTL conf_param \ + $fsname-MDT0001.mdc.max_rpcs_in_flight=9 || { + error_noexit "Setting MDT1 \"max_rpcs_in_flight\"" + return 1 + } + $r $LCTL conf_param $fsname-MDT0001.failover.node=$nid || { + error_noexit "Setting MDT1 \"failover.node\"" + return 1 + } + $r $LCTL conf_param $fsname-MDT0001.lov.stripesize=4M || { + error_noexit "Setting MDT1 \"lov.stripesize\"" + return 1 + } - # writeconf will cause servers to register with their current nids - $TUNEFS --writeconf --erase-params \ - --param mdt.identity_upcall=$L_GETIDENTITY \ - --fsname=$NEWNAME $tmpdir/mds || error "tunefs failed" - combined_mgs_mds || stop mgs + fi + + if [ "$writeconf" ]; then + $MOUNT_CMD $nid:/$fsname $tmp/mnt/lustre || { + error_noexit "Mounting the client" + return 1 + } + shall_cleanup_lustre=true + $LCTL set_param debug="$PTLDEBUG" + + t32_verify_quota $node $fsname $tmp/mnt/lustre || { + error_noexit "verify quota failed" + return 1 + } + + if [ "$dne_upgrade" != "no" ]; then + $LFS mkdir -i 1 $tmp/mnt/lustre/remote_dir || { + error_noexit "set remote dir failed" + return 1 + } + + pushd $tmp/mnt/lustre + tar -cf - . --exclude=./remote_dir | + tar -xvf - -C remote_dir 1>/dev/null || { + error_noexit "cp to remote dir failed" + return 1 + } + popd + fi - start32 mds1 $tmpdir/mds "-o loop" && \ - trap cleanup_32 EXIT INT || return 3 + dd if=/dev/zero of=$tmp/mnt/lustre/tmp_file bs=10k count=10 || { + error_noexit "dd failed" + return 1 + } + rm -rf $tmp/mnt/lustre/tmp_file || { + error_noexit "rm failed" + return 1 + } + + if $r test -f $tmp/sha1sums; then + # LU-2393 - do both sorts on same node to ensure locale + # is identical + $r cat $tmp/sha1sums | sort -k 2 >$tmp/sha1sums.orig + if [ "$dne_upgrade" != "no" ]; then + pushd $tmp/mnt/lustre/remote_dir + else + pushd $tmp/mnt/lustre + fi + + find ! -name .lustre -type f -exec sha1sum {} \; | + sort -k 2 >$tmp/sha1sums || { + error_noexit "sha1sum" + return 1 + } + popd + if ! diff -ub $tmp/sha1sums.orig $tmp/sha1sums; then + error_noexit "sha1sum verification failed" + return 1 + fi + else + echo "sha1sum verification skipped" + fi - local UUID=$($LCTL get_param -n mdt.${NEWNAME}-MDT0000.uuid) - echo MDS uuid $UUID - [ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID" + if [ "$dne_upgrade" != "no" ]; then + rm -rf $tmp/mnt/lustre/remote_dir || { + error_noexit "remove remote dir failed" + return 1 + } + fi - $TUNEFS --writeconf --erase-params \ - --mgsnode=$HOSTNAME --fsname=$NEWNAME $tmpdir/ost1 ||\ - error "tunefs failed" - start32 ost1 $tmpdir/ost1 "-o loop" || return 5 - UUID=$($LCTL get_param -n obdfilter.${NEWNAME}-OST0000.uuid) - echo OST uuid $UUID - [ "$UUID" == "${NEWNAME}-OST0000_UUID" ] || error "UUID is wrong: $UUID" + if $r test -f $tmp/list; then + # + # There is not a Test Framework API to copy files to or + # from a remote node. + # + # LU-2393 - do both sorts on same node to ensure locale + # is identical + $r cat $tmp/list | sort -k 6 >$tmp/list.orig + pushd $tmp/mnt/lustre + ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || { + error_noexit "ls" + return 1 + } + popd + # + # 32-bit and 64-bit clients use different algorithms to + # convert FIDs into inode numbers. Hence, remove the inode + # numbers from the lists, if the original list was created + # on an architecture with different number of bits per + # "long". + # + if [ $(t32_bits_per_long $(uname -m)) != \ + $(t32_bits_per_long $img_arch) ]; then + echo "Different number of bits per \"long\" from the disk image" + for list in list.orig list; do + sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list + done + fi + if ! diff -ub $tmp/list.orig $tmp/list; then + error_noexit "list verification failed" + return 1 + fi + else + echo "list verification skipped" + fi - local NID=$($LCTL list_nids | head -1) + # + # When adding new data verification tests, please check for + # the presence of the required reference files first, like + # the "sha1sums" and "list" tests above, to avoid the need to + # regenerate every image for each test addition. + # + + nrpcs_orig=$($LCTL get_param \ + -n mdc.*MDT0000*.max_rpcs_in_flight) || { + error_noexit "Getting \"max_rpcs_in_flight\"" + return 1 + } + nrpcs=$((nrpcs_orig + 5)) + $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || { + error_noexit "Changing \"max_rpcs_in_flight\"" + return 1 + } + wait_update $HOSTNAME "$LCTL get_param \ + -n mdc.*MDT0000*.max_rpcs_in_flight" $nrpcs || { + error_noexit "Verifying \"max_rpcs_in_flight\"" + return 1 + } + + umount $tmp/mnt/lustre || { + error_noexit "Unmounting the client" + return 1 + } + shall_cleanup_lustre=false + else + if [ "$dne_upgrade" != "no" ]; then + $r umount -d $tmp/mnt/mdt1 || { + error_noexit "Unmounting the MDT2" + return 1 + } + shall_cleanup_mdt1=false + fi - echo "OSC changes should succeed:" - $LCTL conf_param ${NEWNAME}-OST0000.osc.max_dirty_mb=15 || return 7 - $LCTL conf_param ${NEWNAME}-OST0000.failover.node=$NID || return 8 - echo "ok." + $r umount -d $tmp/mnt/mdt || { + error_noexit "Unmounting the MDT" + return 1 + } + shall_cleanup_mdt=false + + $r umount -d $tmp/mnt/ost || { + error_noexit "Unmounting the OST" + return 1 + } + shall_cleanup_ost=false + + t32_reload_modules $node || { + error_noexit "Reloading modules" + return 1 + } + + # mount a second time to make sure we didnt leave upgrade flag on + $r $TUNEFS --dryrun $mdt_dev || { + $r losetup -a + error_noexit "tunefs.lustre before remounting the MDT" + return 1 + } + + mopts=exclude=$fsname-OST0000 + if [ $fstype == "ldiskfs" ]; then + mopts="loop,$mopts" + fi + $r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt || { + error_noexit "Remounting the MDT" + return 1 + } + shall_cleanup_mdt=true + fi +} - echo "MDC changes should succeed:" - $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=9 || return 9 - $LCTL conf_param ${NEWNAME}-MDT0000.failover.node=$NID || return 10 - echo "ok." +test_32a() { + local tarballs + local tarball + local rc=0 - echo "LOV changes should succeed:" - $LCTL pool_new ${NEWNAME}.interop || return 11 - $LCTL conf_param ${NEWNAME}-MDT0000.lov.stripesize=4M || return 12 - echo "ok." + t32_check + for tarball in $tarballs; do + t32_test $tarball || let "rc += $?" + done + return $rc +} +run_test 32a "Upgrade (not live)" - # MDT and OST should have registered with new nids, so we should have - # a fully-functioning client - echo "Check client and old fs contents" +test_32b() { + local tarballs + local tarball + local rc=0 - local device=`h2$NETTYPE $HOSTNAME`:/$NEWNAME - echo "Starting local client: $HOSTNAME: $device $MOUNT" - mount -t lustre $device $MOUNT || return 1 + t32_check + for tarball in $tarballs; do + t32_test $tarball writeconf || let "rc += $?" + done + return $rc +} +run_test 32b "Upgrade with writeconf" - local old=$($LCTL get_param -n mdc.*.max_rpcs_in_flight) - local new=$((old + 5)) - $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new - wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" $new || return 11 +test_32c() { + local tarballs + local tarball + local rc=0 - [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "94306271 1478" ] || return 12 - echo "ok." + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + t32_check + for tarball in $tarballs; do + dne_upgrade=yes t32_test $tarball writeconf || rc=$? + done + return $rc +} +run_test 32c "dne upgrade test" - cleanup_32 +test_32d() { + local tarballs + local tarball + local rc=0 - rm -rf $tmpdir || true # true is only for TMP on NFS + t32_check + for tarball in $tarballs; do + ff_convert=yes t32_test $tarball || rc=$? + done + return $rc } -run_test 32b "Upgrade from 1.8 with writeconf" +run_test 32d "convert ff test" test_33a() { # bug 12333, was test_33 local rc=0 @@ -1361,16 +2074,17 @@ test_33a() { # bug 12333, was test_33 mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931. fi - add fs2mds $(mkfs_opts mds1) --fsname=${FSNAME2} --reformat \ - $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10 - add fs2ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \ - --index=8191 --reformat $fs2ostdev $fs2ostvdev || exit 10 + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \ + --reformat $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10 + add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \ + --fsname=${FSNAME2} --index=8191 --reformat $fs2ostdev \ + $fs2ostvdev || exit 10 - start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT start fs2ost $fs2ostdev $OST_MOUNT_OPTS do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1 mkdir -p $MOUNT2 - mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2 + $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || rc=2 echo "ok." cp /etc/hosts $MOUNT2/ || rc=3 @@ -1389,7 +2103,7 @@ test_33b() { # was test_34 do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1 # Drop lock cancelation reply during umount - #define OBD_FAIL_LDLM_CANCEL 0x304 + #define OBD_FAIL_LDLM_CANCEL_NET 0x304 do_facet client lctl set_param fail_loc=0x80000304 #lctl set_param debug=-1 umount_client $MOUNT @@ -1452,8 +2166,10 @@ test_35a() { # bug 12459 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs $LCTL conf_param ${device}.failover.node=$FAKENID || return 4 + local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 4 log "Wait for RECONNECT_INTERVAL seconds (10s)" sleep 10 @@ -1461,13 +2177,13 @@ test_35a() { # bug 12459 MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`" $LCTL clear log "$MSG" - log "Stopping the MDT:" - stop_mds || return 5 + log "Stopping the MDT: $device" + stop_mdt 1 || return 5 df $MOUNT > /dev/null 2>&1 & DFPID=$! - log "Restarting the MDT:" - start_mds || return 6 + log "Restarting the MDT: $device" + start_mdt 1 || return 6 log "Wait for df ($DFPID) ... " wait $DFPID log "done" @@ -1489,7 +2205,7 @@ test_35a() { # bug 12459 [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7 cleanup # remove nid settings - writeconf + writeconf_or_reformat } run_test 35a "Reconnect to the last active server first" @@ -1505,10 +2221,10 @@ test_35b() { # bug 18674 log "Set up a fake failnode for the MDS" FAKENID="127.0.0.2" - local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | \ - awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) - do_facet mgs "$LCTL conf_param ${device}.failover.node=$FAKENID" || \ - return 1 + local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | + awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1) + do_facet mgs "$LCTL conf_param \ + ${device}.failover.node=$(h2$NETTYPE $FAKENID)" || return 1 local at_max_saved=0 # adaptive timeouts may prevent seeing the issue @@ -1569,7 +2285,7 @@ test_35b() { # bug 18674 cleanup # remove nid settings - writeconf + writeconf_or_reformat } run_test 35b "Continue reconnection retries, if the active server is busy" @@ -1598,20 +2314,20 @@ test_36() { # 12743 local fs2ostvdev=$(ostvdevname 1_2) local fs3ostvdev=$(ostvdevname 2_2) - add fs2mds $(mkfs_opts mds1) --fsname=${FSNAME2} --reformat \ - $fs2mdsdev $fs2mdsvdev || exit 10 + add fs2mds $(mkfs_opts mds1 ${fs2mdsdev}) --mgs --fsname=${FSNAME2} \ + --reformat $fs2mdsdev $fs2mdsvdev || exit 10 # XXX after we support non 4K disk blocksize in ldiskfs, specify a # different one than the default value here. - add fs2ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \ - --reformat $fs2ostdev $fs2ostvdev || exit 10 - add fs3ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \ - --reformat $fs3ostdev $fs3ostvdev || exit 10 + add fs2ost $(mkfs_opts ost1 ${fs2ostdev}) --mgsnode=$MGSNID \ + --fsname=${FSNAME2} --reformat $fs2ostdev $fs2ostvdev || exit 10 + add fs3ost $(mkfs_opts ost2 ${fs3ostdev}) --mgsnode=$MGSNID \ + --fsname=${FSNAME2} --reformat $fs3ostdev $fs3ostvdev || exit 10 start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS start fs2ost $fs2ostdev $OST_MOUNT_OPTS start fs3ost $fs3ostdev $OST_MOUNT_OPTS mkdir -p $MOUNT2 - mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1 + $MOUNT_CMD $MGSNID:/${FSNAME2} $MOUNT2 || return 1 sleep 5 # until 11778 fixed @@ -1695,26 +2411,30 @@ test_37() { run_test 37 "verify set tunables works for symlink device" test_38() { # bug 14222 + local fstype=$(facet_fstype $SINGLEMDS) + local mntpt=$(facet_mntpt $SINGLEMDS) + setup # like runtests - COUNT=10 - SRC="/etc /bin" - FILES=`find $SRC -type f -mtime +1 | head -n $COUNT` + local COUNT=10 + local SRC="/etc /bin" + local FILES=$(find $SRC -type f -mtime +1 | head -n $COUNT) log "copying $(echo $FILES | wc -w) files to $DIR/$tdir" mkdir -p $DIR/$tdir - tar cf - $FILES | tar xf - -C $DIR/$tdir || \ + tar cf - $FILES | tar xf - -C $DIR/$tdir || error "copying $SRC to $DIR/$tdir" sync umount_client $MOUNT + do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id" stop_mds - log "rename lov_objid file on MDS" - rm -f $TMP/lov_objid.orig + log "delete lov_objid file on MDS" - local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) - do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV" - do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV" + mount_fstype $SINGLEMDS || error "mount MDS failed (1)" + + do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid; rm $mntpt/lov_objid" + + unmount_fstype $SINGLEMDS || error "umount failed (1)" - do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.orig" # check create in mds_lov_connect start_mds mount_client $MOUNT @@ -1722,17 +2442,28 @@ test_38() { # bug 14222 [ $V ] && log "verifying $DIR/$tdir/$f" diff -q $f $DIR/$tdir/$f || ERROR=y done - do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new\\\" $MDSDEV" - do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new" - [ "$ERROR" = "y" ] && error "old and new files are different after connect" || true + do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id" + if [ "$ERROR" = "y" ]; then + # check it's updates in sync + umount_client $MOUNT + stop_mds + mount_fstype $SIGNLEMDS + do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid" + unmount_fstype $SINGLEMDS + error "old and new files are different after connect" || true + fi + touch $DIR/$tdir/f2 || error "f2 file create failed" # check it's updates in sync umount_client $MOUNT stop_mds - do_facet $SINGLEMDS dd if=/dev/zero of=$TMP/lov_objid.clear bs=4096 count=1 - do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV" - do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"write $TMP/lov_objid.clear lov_objid\\\" $MDSDEV " + mount_fstype $SINGLEMDS || error "mount MDS failed (3)" + + do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid" + do_facet $SINGLEMDS dd if=/dev/zero of=$mntpt/lov_objid.clear count=8 + + unmount_fstype $SINGLEMDS || error "umount failed (3)" start_mds mount_client $MOUNT @@ -1740,11 +2471,16 @@ test_38() { # bug 14222 [ $V ] && log "verifying $DIR/$tdir/$f" diff -q $f $DIR/$tdir/$f || ERROR=y done - do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new1\\\" $MDSDEV" - do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new1" + touch $DIR/$tdir/f3 || error "f3 file create failed" + do_facet $SINGLEMDS "$LCTL get_param osp.*.prealloc_next_id" umount_client $MOUNT stop_mds - [ "$ERROR" = "y" ] && error "old and new files are different after sync" || true + mount_fstype $SINGLEMDS || error "mount MDS failed (4)" + do_facet $SINGLEMDS "od -Ax -td8 $mntpt/lov_objid" + unmount_fstype $SINGLEMDS || error "umount failed (4)" + + [ "$ERROR" = "y" ] && + error "old and new files are different after sync" || true log "files compared the same" cleanup @@ -1829,29 +2565,142 @@ test_41b() { } run_test 41b "mount mds with --nosvc and --nomgs on first mount" -test_42() { #bug 14693 - setup - check_mount || return 2 - do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10 - umount_client $MOUNT - mount_client $MOUNT || return 1 - cleanup - return 0 -} -run_test 42 "invalid config param should not prevent client from mounting" - -test_43() { - [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root" - setup - chmod ugo+x $DIR || error "chmod 0 failed" - set_and_check mds \ - "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \ - "$FSNAME.mdt.root_squash" \ - "0:0" - set_and_check mds \ - "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ - "$FSNAME.mdt.nosquash_nids" \ - "NONE" +test_41c() { + cleanup + # MDT concurent start + #define OBD_FAIL_TGT_DELAY_CONNECT 0x703 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x703" + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & + local pid=$! + sleep 2 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x0" + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS & + local pid2=$! + wait $pid2 + local rc2=$? + wait $pid + local rc=$? + if [ $rc == 0 ] && [ $rc2 == 114 ]; then + echo "1st MDT start succeed" + echo "2nd MDT start failed with EALREADY" + elif [ $rc2 == 0 ] && [ $rc == 114 ]; then + echo "1st MDT start failed with EALREADY" + echo "2nd MDT start succeed" + else + stop mds1 -f + error "unexpected concurent MDT mounts result, rc=$rc rc2=$rc2" + fi + + # OST concurent start + #define OBD_FAIL_TGT_DELAY_CONNECT 0x703 + do_facet ost1 "lctl set_param fail_loc=0x703" + start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & + pid=$! + sleep 2 + do_facet ost1 "lctl set_param fail_loc=0x0" + start ost1 $(ostdevname 1) $OST_MOUNT_OPTS & + pid2=$! + wait $pid2 + rc2=$? + wait $pid + rc=$? + if [ $rc == 0 ] && [ $rc2 == 114 ]; then + echo "1st OST start succeed" + echo "2nd OST start failed with EALREADY" + elif [ $rc2 == 0 ] && [ $rc == 114 ]; then + echo "1st OST start failed with EALREADY" + echo "2nd OST start succeed" + else + stop mds1 -f + stop ost1 -f + error "unexpected concurent OST mounts result, rc=$rc rc2=$rc2" + fi + # cleanup + stop mds1 -f + stop ost1 -f + + # verify everything ok + start_mds + if [ $? != 0 ] + then + stop mds1 -f + error "MDT(s) start failed" + fi + + start_ost + if [ $? != 0 ] + then + stop mds1 -f + stop ost1 -f + error "OST(s) start failed" + fi + + mount_client $MOUNT + if [ $? != 0 ] + then + stop mds1 -f + stop ost1 -f + error "client start failed" + fi + check_mount + if [ $? != 0 ] + then + stop mds1 -f + stop ost1 -f + error "client mount failed" + fi + cleanup +} +run_test 41c "concurent mounts of MDT/OST should all fail but one" + +test_42() { #bug 14693 + setup + check_mount || error "client was not mounted" + + do_facet mgs $LCTL conf_param $FSNAME.llite.some_wrong_param=10 + umount_client $MOUNT || + error "unmounting client failed with invalid llite param" + mount_client $MOUNT || + error "mounting client failed with invalid llite param" + + do_facet mgs $LCTL conf_param $FSNAME.sys.some_wrong_param=20 + cleanup || error "stopping $FSNAME failed with invalid sys param" + load_modules + setup + check_mount || "client was not mounted with invalid sys param" + cleanup || error "stopping $FSNAME failed with invalid sys param" + return 0 +} +run_test 42 "allow client/server mount/unmount with invalid config param" + +test_43() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.5.58) ]] || + { skip "Need MDS version at least 2.5.58" && return 0; } + [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root" + + ID1=${ID1:-501} + USER1=$(cat /etc/passwd | grep :$ID1:$ID1: | cut -d: -f1) + [ -z "$USER1" ] && skip_env "missing user with uid=$ID1 gid=$ID1" && + return + + setup + chmod ugo+x $DIR || error "chmod 0 failed" + set_conf_param_and_check mds \ + "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \ + "$FSNAME.mdt.root_squash" \ + "0:0" + wait_update $HOSTNAME \ + "lctl get_param -n llite.${FSNAME}*.root_squash" \ + "0:0" || + error "check llite root_squash failed!" + set_conf_param_and_check mds \ + "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ + "$FSNAME.mdt.nosquash_nids" \ + "NONE" + wait_update $HOSTNAME \ + "lctl get_param -n llite.${FSNAME}*.nosquash_nids" \ + "NONE" || + error "check llite nosquash_nids failed!" # # create set of test files @@ -1867,15 +2716,23 @@ test_43() { chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed" touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed" - # - # check root_squash: - # set root squash UID:GID to RUNAS_ID - # root should be able to access only files owned by RUNAS_ID - # - set_and_check mds \ - "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \ - "$FSNAME.mdt.root_squash" \ - "$RUNAS_ID:$RUNAS_ID" + echo "777" > $DIR/$tfile-user1file || error "write 7 failed" + chmod go-rw $DIR/$tfile-user1file || error "chmod 7 failed" + chown $ID1.$ID1 $DIR/$tfile-user1file || error "chown failed" + + # + # check root_squash: + # set root squash UID:GID to RUNAS_ID + # root should be able to access only files owned by RUNAS_ID + # + set_conf_param_and_check mds \ + "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \ + "$FSNAME.mdt.root_squash" \ + "$RUNAS_ID:$RUNAS_ID" + wait_update $HOSTNAME \ + "lctl get_param -n llite.${FSNAME}*.root_squash" \ + "$RUNAS_ID:$RUNAS_ID" || + error "check llite root_squash failed!" ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile) dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \ @@ -1883,7 +2740,7 @@ test_43() { echo "$ST: root read permission is granted - ok" echo "444" | \ - dd conv=notrunc if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \ + dd conv=notrunc of=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \ error "$ST: root write permission is denied" echo "$ST: root write permission is granted - ok" @@ -1906,18 +2763,47 @@ test_43() { error "$ST: root create permission is granted" echo "$ST: root create permission is denied - ok" - # - # check nosquash_nids: - # put client's NID into nosquash_nids list, - # root should be able to access root file after that - # - local NIDLIST=$(lctl list_nids all | tr '\n' ' ') - NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp" - NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ') - set_and_check mds \ - "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ - "$FSNAME-MDTall.mdt.nosquash_nids" \ - "$NIDLIST" + + # LU-1778 + # check root_squash is enforced independently + # of client cache content + # + # access file by USER1, keep access open + # root should be denied access to user file + + runas -u $ID1 tail -f $DIR/$tfile-user1file 1>/dev/null 2>&1 & + pid=$! + sleep 1 + + ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-user1file) + dd if=$DIR/$tfile-user1file 1>/dev/null 2>&1 && + { kill $pid; error "$ST: root read permission is granted"; } + echo "$ST: root read permission is denied - ok" + + echo "777" | \ + dd conv=notrunc of=$DIR/$tfile-user1file 1>/dev/null 2>&1 && + { kill $pid; error "$ST: root write permission is granted"; } + echo "$ST: root write permission is denied - ok" + + kill $pid + wait $pid + + # + # check nosquash_nids: + # put client's NID into nosquash_nids list, + # root should be able to access root file after that + # + local NIDLIST=$(lctl list_nids all | tr '\n' ' ') + NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp" + NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ') + set_conf_param_and_check mds \ + "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \ + "$FSNAME-MDTall.mdt.nosquash_nids" \ + "$NIDLIST" + wait_update $HOSTNAME \ + "lctl get_param -n llite.${FSNAME}*.nosquash_nids" \ + "$NIDLIST" || + error "check llite nosquash_nids failed!" ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile) dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \ @@ -1995,7 +2881,7 @@ cleanup_46a() { stop_mds || rc=$? cleanup_nocli || rc=$? #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat return $rc } @@ -2115,7 +3001,7 @@ test_48() { # bug 17636 run_test 48 "too many acls on file" # check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE -test_49() { # bug 17710 +test_49a() { # bug 17710 local timeout_orig=$TIMEOUT local ldlm_timeout_orig=$LDLM_TIMEOUT local LOCAL_TIMEOUT=20 @@ -2125,49 +3011,60 @@ test_49() { # bug 17710 reformat setup_noconfig - check_mount || return 1 + check_mount || error "client mount failed" echo "check ldlm_timout..." - LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`" - LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`" - LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`" + local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)" + local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)" + local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)" - if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then + if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT" fi if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then - error "LDLM_TIMEOUT($LDLM_MDS) is not correct" + error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT / 3))" fi umount_client $MOUNT - stop_ost || return 2 - stop_mds || return 3 + stop_ost || error "problem stopping OSS" + stop_mds || error "problem stopping MDS" + + LDLM_TIMEOUT=$ldlm_timeout_orig + TIMEOUT=$timeout_orig +} +run_test 49a "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre" + +test_49b() { # bug 17710 + local timeout_orig=$TIMEOUT + local ldlm_timeout_orig=$LDLM_TIMEOUT + local LOCAL_TIMEOUT=20 LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1)) + TIMEOUT=$LOCAL_TIMEOUT reformat setup_noconfig - check_mount || return 7 + check_mount || error "client mount failed" - LDLM_MDS="`do_facet $SINGLEMDS lctl get_param -n ldlm_timeout`" - LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`" - LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`" + local LDLM_MDS="$(do_facet $SINGLEMDS lctl get_param -n ldlm_timeout)" + local LDLM_OST1="$(do_facet ost1 lctl get_param -n ldlm_timeout)" + local LDLM_CLIENT="$(do_facet client lctl get_param -n ldlm_timeout)" - if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then + if [ $LDLM_MDS -ne $LDLM_OST1 -o $LDLM_MDS -ne $LDLM_CLIENT ]; then error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT" fi if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then - error "LDLM_TIMEOUT($LDLM_MDS) is not correct" + error "LDLM_TIMEOUT($LDLM_MDS) is not $((LOCAL_TIMEOUT - 1))" fi - cleanup || return $? + cleanup || error "cleanup failed" LDLM_TIMEOUT=$ldlm_timeout_orig TIMEOUT=$timeout_orig } -run_test 49 "check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE" +run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre" lazystatfs() { # Test both statfs and lfs df and fail if either one fails @@ -2237,7 +3134,7 @@ test_50c() { stop_ost2 || error "Unable to stop OST2" stop_mds || error "Unable to stop MDS" #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 50c "lazystatfs one server down ==========================" @@ -2259,7 +3156,7 @@ test_50d() { stop_ost2 || error "Unable to stop OST2" stop_mds || error "Unable to stop MDS" #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 50d "lazystatfs client/server conn race ==========================" @@ -2341,7 +3238,7 @@ test_50f() { stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 50f "normal statfs one server in down ==========================" @@ -2369,10 +3266,48 @@ test_50g() { stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 50g "deactivated OST should not cause panic=====================" +# LU-642 +test_50h() { + # prepare MDT/OST, make OSC inactive for OST1 + [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return + + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" || + error "tunefs OST1 failed" + start_mds || error "Unable to start MDT" + start_ost || error "Unable to start OST1" + start_ost2 || error "Unable to start OST2" + mount_client $MOUNT || error "client start failed" + + mkdir -p $DIR/$tdir + + # activatate OSC for OST1 + local TEST="$LCTL get_param -n osc.${FSNAME}-OST0000-osc-[!M]*.active" + set_conf_param_and_check client \ + "$TEST" "${FSNAME}-OST0000.osc.active" 1 || + error "Unable to activate OST1" + + mkdir -p $DIR/$tdir/2 + $LFS setstripe -c -1 -i 0 $DIR/$tdir/2 + sleep 1 && echo "create a file after OST1 is activated" + # create some file + createmany -o $DIR/$tdir/2/$tfile-%d 1 + + # check OSC import is working + stat $DIR/$tdir/2/* >/dev/null 2>&1 || + error "some OSC imports are still not connected" + + # cleanup + umount_client $MOUNT || error "Unable to umount client" + stop_ost2 || error "Unable to stop OST2" + cleanup_nocli +} +run_test 50h "LU-642: activate deactivated OST ===" + test_51() { local LOCAL_TIMEOUT=20 @@ -2392,7 +3327,7 @@ test_51() { stop_ost2 || return 3 cleanup #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added" @@ -2445,6 +3380,11 @@ diff_files_xattrs() } test_52() { + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + start_mds [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; } start_ost @@ -2501,16 +3441,23 @@ test_52() { # backup objects echo backup objects to $ost1tmp/objects - local objects=$(do_node $ost1node 'find '$ost1mnt'/O/0 -type f -size +0'\ - '-newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"') - copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects + local objects=$(do_node $ost1node 'find '$ost1mnt'/O/[0-9]* -type f'\ + '-size +0 -newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"') + copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs \ + $objects [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; } # move objects to lost+found do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found' [ $? -eq 0 ] || { error "Unable to move objects"; return 14; } + # recover objects dry-run + echo "ll_recover_lost_found_objs dry_run" + do_node $ost1node "ll_recover_lost_found_objs -n -d $ost1mnt/O" + [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; } + # recover objects + echo "ll_recover_lost_found_objs fix run" do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found" [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; } @@ -2547,6 +3494,7 @@ thread_sanity() { local facet=$2 local parampat=$3 local opts=$4 + local basethr=$5 local tmin local tmin2 local tmax @@ -2554,10 +3502,10 @@ thread_sanity() { local tstarted local paramp local msg="Insane $modname thread counts" - local ncpts=$(check_cpt_number) + local ncpts=$(check_cpt_number $facet) + local nthrs shift 4 - setup check_mount || return 41 # We need to expand $parampat, but it may match multiple parameters, so @@ -2576,17 +3524,23 @@ thread_sanity() { tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0) lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $? lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? + nthrs=$(expr $tmax - $tmin) + if [ $nthrs -lt $ncpts ]; then + nthrs=0 + else + nthrs=$ncpts + fi [ $tmin -eq $tmax -a $tmin -eq $tstarted ] && skip_env "module parameter forced $facet thread count" && tmin=3 && tmax=$((3 * tmax)) # Check that we can change min/max - do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + ncpts))" - do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - ncpts))" + do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + nthrs))" + do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - nthrs))" tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0) - lassert 25 "$msg" '(($tmin2 == ($tmin + $ncpts) && $tmax2 == ($tmax - $ncpts)))' || return $? + lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && $tmax2 == ($tmax - $nthrs)))' || return $? # Check that we can set min/max to the same value tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) @@ -2606,7 +3560,8 @@ thread_sanity() { LOAD_MODULES_REMOTE=true cleanup local oldvalue - setmodopts -a $modname "$opts" oldvalue + local newvalue="${opts}=$(expr $basethr \* $ncpts)" + setmodopts -a $modname "$newvalue" oldvalue load_modules setup @@ -2619,36 +3574,40 @@ thread_sanity() { tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min") tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max") tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started") - lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $? + lassert 28 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $? cleanup - # Workaround a YALA bug where YALA expects that modules will remain - # loaded on the servers - LOAD_MODULES_REMOTE=false load_modules setup - cleanup } test_53a() { - local ncpts=$(check_cpt_number) - local nthrs - - nthrs=`expr 16 \* $ncpts` - thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads='$nthrs + setup + thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16' + cleanup } run_test 53a "check OSS thread count params" test_53b() { - local ncpts=$(check_cpt_number) - local nthrs - - nthrs=`expr 16 \* $ncpts` - thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads='$nthrs + setup + local mds=$(do_facet $SINGLEMDS "lctl get_param -N mds.*.*.threads_max \ + 2>/dev/null") + if [ -z "$mds" ]; then + #running this on an old MDT + thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' 16 + else + thread_sanity MDT $SINGLEMDS 'mds.*.*.' 'mds_num_threads' 16 + fi + cleanup } -run_test 53b "check MDT thread count params" +run_test 53b "check MDS thread count params" test_54a() { + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p [ $? -eq 0 ] || error "llverdev failed!" reformat_and_config @@ -2656,6 +3615,11 @@ test_54a() { run_test 54a "test llverdev and partial verify of device" test_54b() { + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + setup run_llverfs $MOUNT -p [ $? -eq 0 ] || error "llverfs failed!" @@ -2670,15 +3634,20 @@ lov_objid_size() } test_55() { + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + local mdsdev=$(mdsdevname 1) local mdsvdev=$(mdsvdevname 1) for i in 1023 2048 do - add mds1 $(mkfs_opts mds1) --reformat $mdsdev $mdsvdev || - exit 10 - add ost1 $(mkfs_opts ost1) --index=$i --reformat \ - $(ostdevname 1) $(ostvdevname 1) + add mds1 $(mkfs_opts mds1 ${mdsdev}) --reformat $mdsdev \ + $mdsvdev || exit 10 + add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \ + --reformat $(ostdevname 1) $(ostvdevname 1) setup_noconfig stopall setup_noconfig @@ -2702,10 +3671,14 @@ test_56() { local mds_journal_size_orig=$MDSJOURNALSIZE MDSJOURNALSIZE=16 - add mds1 $(mkfs_opts mds1) --reformat $(mdsdevname 1) $(mdsvdevname 1) - add ost1 $(mkfs_opts ost1) --index=1000 --reformat \ + + for num in $(seq 1 $MDSCOUNT); do + add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) + done + add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=1000 --reformat \ $(ostdevname 1) $(ostvdevname 1) - add ost2 $(mkfs_opts ost2) --index=10000 --reformat \ + add ost2 $(mkfs_opts ost2 $(ostdevname 2)) --index=10000 --reformat \ $(ostdevname 2) $(ostvdevname 2) start_mgsmds @@ -2714,7 +3687,6 @@ test_56() { mount_client $MOUNT || error "Unable to mount client" echo ok $LFS osts - [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; } stopall MDSJOURNALSIZE=$mds_journal_size_orig reformat @@ -2723,7 +3695,8 @@ run_test 56 "check big indexes" test_57a() { # bug 22656 local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') - writeconf + writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost && error "OST registration from failnode should fail" @@ -2733,7 +3706,8 @@ run_test 57a "initial registration from failnode should fail (should return errs test_57b() { local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') - writeconf + writeconf_or_reformat + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" || error "tunefs failed" start_mgsmds start_ost || error "OST registration from servicenode should not fail" @@ -2746,23 +3720,24 @@ count_osts() { } test_58() { # bug 22658 - if [ $(facet_fstype mds) == zfs ]; then - skip "Does not work with ZFS-based MDTs yet" - return - fi setup_noconfig mkdir -p $DIR/$tdir createmany -o $DIR/$tdir/$tfile-%d 100 # make sure that OSTs do not cancel llog cookies before we unmount the MDS #define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601 - do_facet mds "lctl set_param fail_loc=0x601" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x601" unlinkmany $DIR/$tdir/$tfile-%d 100 - stop mds - local MNTDIR=$(facet_mntpt mds) + stop_mds + + local MNTDIR=$(facet_mntpt $SINGLEMDS) + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + # remove all files from the OBJECTS dir - do_facet mds "mount -t ldiskfs $MDSDEV $MNTDIR" - do_facet mds "find $MNTDIR/OBJECTS -type f -delete" - do_facet mds "umount $MNTDIR" + mount_fstype $SINGLEMDS + + do_facet $SINGLEMDS "find $MNTDIR/O/1/d* -type f -delete" + + unmount_fstype $SINGLEMDS # restart MDS with missing llog files start_mds do_facet mds "lctl set_param fail_loc=0" @@ -2798,19 +3773,24 @@ test_59() { stop_ost2 >> /dev/null cleanup_nocli >> /dev/null #writeconf to remove all ost2 traces for subsequent tests - writeconf + writeconf_or_reformat } run_test 59 "writeconf mount option" test_60() { # LU-471 + local num + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then skip "Only applicable to ldiskfs-based MDTs" return fi - add mds1 $(mkfs_opts mds1) \ - --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' --reformat \ - $(mdsdevname 1) $(mdsvdevname 1) || exit 10 + for num in $(seq $MDSCOUNT); do + add mds${num} $(mkfs_opts mds${num} $(mdsdevname $num)) \ + --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) || + exit 10 + done dump=$(do_facet $SINGLEMDS dumpe2fs $(mdsdevname 1)) rc=${PIPESTATUS[0]} @@ -2835,10 +3815,13 @@ test_61() { # LU-80 if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && ! large_xattr_enabled; then reformat=true - local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) LDISKFS_MKFS_OPTS+=" -O large_xattr" - add $SINGLEMDS $(mkfs_opts $SINGLEMDS) --reformat $mds_dev || - error "reformatting $mds_dev failed" + + for num in $(seq $MDSCOUNT); do + add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) || + error "add mds $num failed" + done fi setup_noconfig || error "setting up the filesystem failed" @@ -2894,26 +3877,982 @@ test_61() { # LU-80 run_test 61 "large xattr" test_62() { - # MRP-118 - local mdsdev=$(mdsdevname 1) - local ostdev=$(ostdevname 1) - - echo "disable journal for mds" - do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed" - start_mds && error "MDT start should fail" - echo "disable journal for ost" - do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed" - start_ost && error "OST start should fail" - cleanup || return $? - reformat_and_config + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + + # MRP-118 + local mdsdev=$(mdsdevname 1) + local ostdev=$(ostdevname 1) + + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] || + { skip "Need MDS version at least 2.2.51"; return 0; } + + echo "disable journal for mds" + do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed" + start_mds && error "MDT start should fail" + echo "disable journal for ost" + do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed" + start_ost && error "OST start should fail" + cleanup || return $? + reformat_and_config } run_test 62 "start with disabled journal" +test_63() { + if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then + skip "Only applicable to ldiskfs-based MDTs" + return + fi + + local inode_slab=$(do_facet $SINGLEMDS \ + "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo") + if [ -z "$inode_slab" ]; then + skip "ldiskfs module has not been loaded" + return + fi + + echo "$inode_slab ldisk inodes per page" + [ "$inode_slab" -ge "3" ] || + error "ldisk inode size is too big, $inode_slab objs per page" + return +} +run_test 63 "Verify each page can at least hold 3 ldisk inodes" + +test_64() { + start_mds + start_ost + start_ost2 || error "Unable to start second ost" + mount_client $MOUNT || error "Unable to mount client" + stop_ost2 || error "Unable to stop second ost" + echo "$LFS df" + $LFS df --lazy || error "lfs df failed" + cleanup || return $? + #writeconf to remove all ost2 traces for subsequent tests + writeconf_or_reformat +} +run_test 64 "check lfs df --lazy " + +test_65() { # LU-2237 + # Currently, the test is only valid for ldiskfs backend + [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && + skip "non-ldiskfs backend" && return + + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local brpt=$(facet_mntpt brpt) + local opts="" + + if ! do_facet $SINGLEMDS "test -b $devname"; then + opts="-o loop" + fi + + stop_mds + local obj=$(do_facet $SINGLEMDS \ + "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | + grep Inode) + if [ -z "$obj" ]; then + # The MDT may be just re-formatted, mount the MDT for the + # first time to guarantee the "last_rcvd" file is there. + start_mds || error "fail to mount the MDS for the first time" + stop_mds + fi + + # remove the "last_rcvd" file + do_facet $SINGLEMDS "mkdir -p $brpt" + do_facet $SINGLEMDS \ + "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt" + do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd" + do_facet $SINGLEMDS "umount -d $brpt" + + # restart MDS, the "last_rcvd" file should be recreated. + start_mds || error "fail to restart the MDS" + stop_mds + obj=$(do_facet $SINGLEMDS \ + "$DEBUGFS -c -R \\\"stat last_rcvd\\\" $devname" | grep Inode) + [ -n "$obj" ] || error "fail to re-create the last_rcvd" +} +run_test 65 "re-create the lost last_rcvd file when server mount" + +test_66() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] || + { skip "Need MGS version at least 2.3.59"; return 0; } + + setup + local OST1_NID=$(do_facet ost1 $LCTL list_nids | head -1) + local MDS_NID=$(do_facet $SINGLEMDS $LCTL list_nids | head -1) + + echo "replace_nids should fail if MDS, OSTs and clients are UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + umount_client $MOUNT || error "unmounting client failed" + echo "replace_nids should fail if MDS and OSTs are UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + stop_ost + echo "replace_nids should fail if MDS is UP" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID && + error "replace_nids fail" + + stop_mds || error "stopping mds failed" + + if combined_mgs_mds; then + start_mdt 1 "-o nosvc" || + error "starting mds with nosvc option failed" + fi + + echo "command should accept two parameters" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 && + error "command should accept two params" + + echo "correct device name should be passed" + do_facet mgs $LCTL replace_nids $FSNAME-WRONG0000 $OST1_NID && + error "wrong devname" + + echo "wrong nids list should not destroy the system" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 "wrong nids list" && + error "wrong parse" + + echo "replace OST nid" + do_facet mgs $LCTL replace_nids $FSNAME-OST0000 $OST1_NID || + error "replace nids failed" + + echo "command should accept two parameters" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 && + error "command should accept two params" + + echo "wrong nids list should not destroy the system" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 "wrong nids list" && + error "wrong parse" + + echo "replace MDS nid" + do_facet mgs $LCTL replace_nids $FSNAME-MDT0000 $MDS_NID || + error "replace nids failed" + + if ! combined_mgs_mds ; then + stop_mgs + else + stop_mds + fi + + setup_noconfig + check_mount || error "error after nid replace" + cleanup || error "cleanup failed" + reformat +} +run_test 66 "replace nids" + +test_67() { #LU-2950 + local legacy="$TMP/legacy_lnet_config" + local new="$TMP/new_routes_test" + local out="$TMP/config_out_file" + local verify="$TMP/conv_verify" + local verify_conf="$TMP/conf_verify" + + # Create the legacy file that will be run through the + # lustre_routes_conversion script + cat <<- LEGACY_LNET_CONFIG > $legacy + tcp1 23 192.168.213.1@tcp:1; tcp5 34 193.30.4.3@tcp:4; + tcp2 54 10.1.3.2@tcp; + tcp3 10.3.4.3@tcp:3; + tcp4 10.3.3.4@tcp; + LEGACY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_conversion script against. + cat <<- VERIFY_LNET_CONFIG > $verify + tcp1: { gateway: 192.168.213.1@tcp, hop: 23, priority: 1 } + tcp5: { gateway: 193.30.4.3@tcp, hop: 34, priority: 4 } + tcp2: { gateway: 10.1.3.2@tcp, hop: 54 } + tcp3: { gateway: 10.3.4.3@tcp, priority: 3 } + tcp4: { gateway: 10.3.3.4@tcp } + VERIFY_LNET_CONFIG + + # Create the verification file to verify the output of + # lustre_routes_config script against + cat <<- VERIFY_LNET_CONFIG > $verify_conf + lctl --net tcp1 add_route 192.168.213.1@tcp 23 1 + lctl --net tcp5 add_route 193.30.4.3@tcp 34 4 + lctl --net tcp2 add_route 10.1.3.2@tcp 54 4 + lctl --net tcp3 add_route 10.3.4.3@tcp 1 3 + lctl --net tcp4 add_route 10.3.3.4@tcp 1 3 + VERIFY_LNET_CONFIG + + lustre_routes_conversion $legacy $new > /dev/null + if [ -f $new ]; then + # verify the conversion output + cmp -s $new $verify > /dev/null + if [ $? -eq 1 ]; then + error "routes conversion failed" + fi + + lustre_routes_config --dry-run --verbose $new > $out + # check that the script succeeded + cmp -s $out $verify_conf > /dev/null + if [ $? -eq 1 ]; then + error "routes config failed" + fi + else + error "routes conversion test failed" + fi + # remove generated files + rm -f $new $legacy $verify $verify_conf $out +} +run_test 67 "test routes conversion and configuration" + +test_68() { + local fid + local seq + local START + local END + + [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] || + { skip "Need MDS version at least 2.4.53"; return 0; } + + umount_client $MOUNT || error "umount client failed" + + start_mdt 1 || error "MDT start failed" + start_ost + + # START-END - the sequences we'll be reserving + START=$(do_facet $SINGLEMDS \ + lctl get_param -n seq.ctl*.space | awk -F'[[ ]' '{print $2}') + END=$((START + (1 << 30))) + do_facet $SINGLEMDS \ + lctl set_param seq.ctl*.fldb="[$START-$END\):0:mdt" + + # reset the sequences MDT0000 has already assigned + do_facet $SINGLEMDS \ + lctl set_param seq.srv*MDT0000.space=clear + + # remount to let the client allocate new sequence + mount_client $MOUNT || error "mount client failed" + + touch $DIR/$tfile + do_facet $SINGLEMDS \ + lctl get_param seq.srv*MDT0000.space + $LFS path2fid $DIR/$tfile + + local old_ifs="$IFS" + IFS='[:]' + fid=($($LFS path2fid $DIR/$tfile)) + IFS="$old_ifs" + let seq=${fid[1]} + + if [[ $seq < $END ]]; then + error "used reserved sequence $seq?" + fi + cleanup || return $? +} +run_test 68 "be able to reserve specific sequences in FLDB" + +test_69() { + local server_version=$(lustre_version_code $SINGLEMDS) + + [[ $server_version -lt $(version_code 2.4.2) ]] && + skip "Need MDS version at least 2.4.2" && return + + [[ $server_version -ge $(version_code 2.4.50) ]] && + [[ $server_version -lt $(version_code 2.5.0) ]] && + skip "Need MDS version at least 2.5.0" && return + + setup + + # use OST0000 since it probably has the most creations + local OSTNAME=$(ostname_from_index 0) + local mdtosc_proc1=$(get_mdtosc_proc_path mds1 $OSTNAME) + local last_id=$(do_facet mds1 lctl get_param -n \ + osc.$mdtosc_proc1.prealloc_last_id) + + # Want to have OST LAST_ID over 1.5 * OST_MAX_PRECREATE to + # verify that the LAST_ID recovery is working properly. If + # not, then the OST will refuse to allow the MDS connect + # because the LAST_ID value is too different from the MDS + #define OST_MAX_PRECREATE=20000 + local num_create=$((20000 * 5)) + + mkdir -p $DIR/$tdir + $LFS setstripe -i 0 $DIR/$tdir + createmany -o $DIR/$tdir/$tfile- $num_create || + error "createmany: failed to create $num_create files: $?" + # delete all of the files with objects on OST0 so the + # filesystem is not inconsistent later on + $LFS find $MOUNT --ost 0 | xargs rm + + stop_ost || error "OST0 stop failure" + add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --reformat --replace \ + $(ostdevname 1) $(ostvdevname 1) || + error "reformat and replace $ostdev failed" + start_ost || error "OST0 restart failure" + wait_osc_import_state mds ost FULL + + touch $DIR/$tdir/$tfile-last || error "create file after reformat" + local idx=$($LFS getstripe -i $DIR/$tdir/$tfile-last) + [ $idx -ne 0 ] && error "$DIR/$tdir/$tfile-last on $idx not 0" || true + + cleanup +} +run_test 69 "replace an OST with the same index" + +test_70a() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + local MDTIDX=1 + + cleanup + + start_mdt 1 || error "MDT0 start fail" + + start_ost || error "OST0 start fail" + + start_mdt 2 || error "MDT1 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "create dir fail" + + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir fail" + + rm -rf $DIR/$tdir || error "delete dir fail" + cleanup || return $? +} +run_test 70a "start MDT0, then OST, then MDT1" + +test_70b() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + local MDTIDX=1 + + start_ost || error "OST0 start fail" + + start_mdt 1 || error "MDT0 start fail" + start_mdt 2 || error "MDT1 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "create dir fail" + + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir fail" + + rm -rf $DIR/$tdir || error "delete dir fail" + + cleanup || return $? +} +run_test 70b "start OST, MDT1, MDT0" + +test_70c() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + local MDTIDX=1 + + start_mdt 1 || error "MDT0 start fail" + start_mdt 2 || error "MDT1 start fail" + start_ost || error "OST0 start fail" + + mount_client $MOUNT || error "mount client fails" + stop_mdt 1 || error "MDT1 start fail" + + local mdc_for_mdt1=$($LCTL dl | grep MDT0000-mdc | awk '{print $4}') + echo "deactivate $mdc_for_mdt1" + $LCTL --device $mdc_for_mdt1 deactivate || return 1 + + mkdir -p $DIR/$tdir && error "mkdir succeed" + + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir && + error "create remote dir succeed" + + cleanup || return $? +} +run_test 70c "stop MDT0, mkdir fail, create remote dir fail" + +test_70d() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + local MDTIDX=1 + + start_mdt 1 || error "MDT0 start fail" + start_mdt 2 || error "MDT1 start fail" + start_ost || error "OST0 start fail" + + mount_client $MOUNT || error "mount client fails" + + stop_mdt 2 || error "MDT1 start fail" + + local mdc_for_mdt2=$($LCTL dl | grep MDT0001-mdc | + awk '{print $4}') + echo "deactivate $mdc_for_mdt2" + $LCTL --device $mdc_for_mdt2 deactivate || + error "set $mdc_for_mdt2 deactivate failed" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir && + error "create remote dir succeed" + + rm -rf $DIR/$tdir || error "delete dir fail" + + cleanup || return $? +} +run_test 70d "stop MDT1, mkdir succeed, create remote dir fail" + +test_71a() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + if combined_mgs_mds; then + skip "needs separate MGS/MDT" && return + fi + local MDTIDX=1 + + start_mdt 1 || error "MDT0 start fail" + start_ost || error "OST0 start fail" + start_mdt 2 || error "MDT1 start fail" + start_ost2 || error "OST1 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir succeed" + + mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed" + rm -rf $DIR/$tdir || error "delete dir fail" + + umount_client $MOUNT + stop_mdt 1 || error "MDT0 stop fail" + stop_mdt 2 || error "MDT1 stop fail" + stop_ost || error "OST0 stop fail" + stop_ost2 || error "OST1 stop fail" +} +run_test 71a "start MDT0 OST0, MDT1, OST1" + +test_71b() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + if combined_mgs_mds; then + skip "needs separate MGS/MDT" && return + fi + local MDTIDX=1 + + start_mdt 2 || error "MDT1 start fail" + start_ost || error "OST0 start fail" + start_mdt 1 || error "MDT0 start fail" + start_ost2 || error "OST1 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir succeed" + + mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed" + rm -rf $DIR/$tdir || error "delete dir fail" + + umount_client $MOUNT + stop_mdt 1 || error "MDT0 stop fail" + stop_mdt 2 || error "MDT1 stop fail" + stop_ost || error "OST0 stop fail" + stop_ost2 || error "OST1 stop fail" +} +run_test 71b "start MDT1, OST0, MDT0, OST1" + +test_71c() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + if combined_mgs_mds; then + skip "needs separate MGS/MDT" && return + fi + local MDTIDX=1 + + start_ost || error "OST0 start fail" + start_ost2 || error "OST1 start fail" + start_mdt 2 || error "MDT1 start fail" + start_mdt 1 || error "MDT0 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir succeed" + + mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed" + rm -rf $DIR/$tdir || error "delete dir fail" + + umount_client $MOUNT + stop_mdt 1 || error "MDT0 stop fail" + stop_mdt 2 || error "MDT1 stop fail" + stop_ost || error "OST0 stop fail" + stop_ost2 || error "OST1 stop fail" + +} +run_test 71c "start OST0, OST1, MDT1, MDT0" + +test_71d() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + if combined_mgs_mds; then + skip "needs separate MGS/MDT" && return + fi + local MDTIDX=1 + + start_ost || error "OST0 start fail" + start_mdt 2 || error "MDT0 start fail" + start_mdt 1 || error "MDT0 start fail" + start_ost2 || error "OST1 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir succeed" + + mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed" + rm -rf $DIR/$tdir || error "delete dir fail" + + umount_client $MOUNT + stop_mdt 1 || error "MDT0 stop fail" + stop_mdt 2 || error "MDT1 stop fail" + stop_ost || error "OST0 stop fail" + stop_ost2 || error "OST1 stop fail" + +} +run_test 71d "start OST0, MDT1, MDT0, OST1" + +test_71e() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + if combined_mgs_mds; then + skip "needs separate MGS/MDT" && return + fi + local MDTIDX=1 + + start_ost || error "OST0 start fail" + start_mdt 2 || error "MDT1 start fail" + start_ost2 || error "OST1 start fail" + start_mdt 1 || error "MDT0 start fail" + + mount_client $MOUNT || error "mount client fails" + + mkdir -p $DIR/$tdir || error "mkdir fail" + $LFS mkdir -i $MDTIDX $DIR/$tdir/remote_dir || + error "create remote dir succeed" + + mcreate $DIR/$tdir/remote_dir/$tfile || error "create file failed" + rm -rf $DIR/$tdir || error "delete dir fail" + + umount_client $MOUNT + stop_mdt 1 || error "MDT0 stop fail" + stop_mdt 2 || error "MDT1 stop fail" + stop_ost || error "OST0 stop fail" + stop_ost2 || error "OST1 stop fail" + +} +run_test 71e "start OST0, MDT1, OST1, MDT0" + +test_72() { #LU-2634 + local mdsdev=$(mdsdevname 1) + local ostdev=$(ostdevname 1) + local cmd="$E2FSCK -fnvd $mdsdev" + local fn=3 + + [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] && + skip "ldiskfs only test" && return + + #tune MDT with "-O extents" + + for num in $(seq $MDSCOUNT); do + add mds${num} $(mkfs_opts mds$num $(mdsdevname $num)) \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) || + error "add mds $num failed" + $TUNE2FS -O extents $(mdsdevname $num) + done + + add ost1 $(mkfs_opts ost1 $ostdev) --reformat $ostdev || + error "add $ostdev failed" + start_mgsmds || error "start mds failed" + start_ost || error "start ost failed" + mount_client $MOUNT || error "mount client failed" + + #create some short symlinks + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/$tfile-%d $fn + echo "create $fn short symlinks" + for i in $(seq -w 1 $fn); do + ln -s $DIR/$tdir/$tfile-$i $MOUNT/$tfile-$i + done + ls -al $MOUNT + + #umount + umount_client $MOUNT || error "umount client failed" + stop_mds || error "stop mds failed" + stop_ost || error "stop ost failed" + + #run e2fsck + run_e2fsck $(facet_active_host $SINGLEMDS) $mdsdev "-n" +} +run_test 72 "test fast symlink with extents flag enabled" + +test_73() { #LU-3006 + load_modules + [ $(facet_fstype ost1) == zfs ] && import_zpool ost1 + do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" || + error "1st tunefs failed" + start_mgsmds || error "start mds failed" + start_ost || error "start ost failed" + mount_client $MOUNT || error "mount client failed" + lctl get_param -n osc.*OST0000-osc-[^M]*.import | grep failover_nids | + grep 1.2.3.4@$NETTYPE || error "failover nids haven't changed" + umount_client $MOUNT || error "umount client failed" + stopall + reformat +} +run_test 73 "failnode to update from mountdata properly" + +test_75() { # LU-2374 + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] && + skip "Need MDS version at least 2.4.1" && return + + local index=0 + local opts_mds="$(mkfs_opts mds1 $(mdsdevname 1)) \ + --reformat $(mdsdevname 1) $(mdsvdevname 1)" + local opts_ost="$(mkfs_opts ost1 $(ostdevname 1)) \ + --reformat $(ostdevname 1) $(ostvdevname 1)" + + #check with default parameters + add mds1 $opts_mds || error "add mds1 failed for default params" + add ost1 $opts_ost || error "add ost1 failed for default params" + + opts_mds=$(echo $opts_mds | sed -e "s/--mdt//") + opts_mds=$(echo $opts_mds | + sed -e "s/--index=$index/--index=$index --mdt/") + opts_ost=$(echo $opts_ost | sed -e "s/--ost//") + opts_ost=$(echo $opts_ost | + sed -e "s/--index=$index/--index=$index --ost/") + + add mds1 $opts_mds || error "add mds1 failed for new params" + add ost1 $opts_ost || error "add ost1 failed for new params" + return 0 +} +run_test 75 "The order of --index should be irrelevant" + +test_76a() { + [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] || + { skip "Need MDS version at least 2.4.52" && return 0; } + setup + local MDMB_PARAM="osc.*.max_dirty_mb" + echo "Change MGS params" + local MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | + head -1) + echo "max_dirty_mb: $MAX_DIRTY_MB" + local NEW_MAX_DIRTY_MB=$((MAX_DIRTY_MB + MAX_DIRTY_MB)) + echo "new_max_dirty_mb: $NEW_MAX_DIRTY_MB" + do_facet mgs $LCTL set_param -P $MDMB_PARAM=$NEW_MAX_DIRTY_MB + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + echo "$MAX_DIRTY_MB" + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "error while apply max_dirty_mb" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $HOSTNAME "lctl get_param -n $MDMB_PARAM | + head -1" $NEW_MAX_DIRTY_MB + MAX_DIRTY_MB=$($LCTL get_param -n $MDMB_PARAM | head -1) + [ $MAX_DIRTY_MB = $NEW_MAX_DIRTY_MB ] || + error "max_dirty_mb is not saved after remount" + + echo "Change OST params" + CLIENT_PARAM="obdfilter.*.client_cache_count" + local CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "client_cache_count: $CLIENT_CACHE_COUNT" + NEW_CLIENT_CACHE_COUNT=$((CLIENT_CACHE_COUNT+CLIENT_CACHE_COUNT)) + echo "new_client_cache_count: $NEW_CLIENT_CACHE_COUNT" + do_facet mgs $LCTL set_param -P $CLIENT_PARAM=$NEW_CLIENT_CACHE_COUNT + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "error while apply client_cache_count" + + echo "Check the value is stored after remount" + stopall + setupall + wait_update $(facet_host ost1) "lctl get_param -n $CLIENT_PARAM | + head -1" $NEW_CLIENT_CACHE_COUNT + CLIENT_CACHE_COUNT=$(do_facet ost1 $LCTL get_param -n $CLIENT_PARAM | + head -1) + echo "$CLIENT_CACHE_COUNT" + [ $CLIENT_CACHE_COUNT = $NEW_CLIENT_CACHE_COUNT ] || + error "client_cache_count is not saved after remount" + stopall +} +run_test 76a "set permanent params set_param -P" + +test_76b() { # LU-4783 + [[ $(lustre_version_code mgs) -ge $(version_code 2.5.57) ]] || + { skip "Need MGS version at least 2.5.57" && return 0; } + stopall + setupall + do_facet mgs $LCTL get_param mgs.MGS.live.params || + error "start params log failed" + stopall +} +run_test 76b "verify params log setup correctly" + +test_77() { # LU-3445 + local server_version=$(lustre_version_code $SINGLEMDS) + + [[ $server_version -ge $(version_code 2.2.60) ]] && + [[ $server_version -le $(version_code 2.4.0) ]] && + skip "Need MDS version < 2.2.60 or > 2.4.0" && return + + if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then + is_blkdev $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && + skip_env "mixed loopback and real device not working" && return + fi + + local fs2mdsdev=$(mdsdevname 1_2) + local fs2ostdev=$(ostdevname 1_2) + local fs2mdsvdev=$(mdsvdevname 1_2) + local fs2ostvdev=$(ostvdevname 1_2) + local fsname=test1234 + local mgsnid + local failnid="$(h2$NETTYPE 1.2.3.4),$(h2$NETTYPE 4.3.2.1)" + + add fs2mds $(mkfs_opts mds1 $fs2mdsdev) --mgs --fsname=$fsname \ + --reformat $fs2mdsdev $fs2mdsvdev || error "add fs2mds failed" + start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_fs2 EXIT INT || + error "start fs2mds failed" + + mgsnid=$(do_facet fs2mds $LCTL list_nids | xargs | tr ' ' ,) + [[ $mgsnid = *,* ]] || mgsnid+=",$mgsnid" + + add fs2ost $(mkfs_opts ost1 $fs2ostdev) --mgsnode=$mgsnid \ + --failnode=$failnid --fsname=$fsname \ + --reformat $fs2ostdev $fs2ostvdev || + error "add fs2ost failed" + start fs2ost $fs2ostdev $OST_MOUNT_OPTS || error "start fs2ost failed" + + mkdir -p $MOUNT2 + $MOUNT_CMD $mgsnid:/$fsname $MOUNT2 || error "mount $MOUNT2 failed" + DIR=$MOUNT2 MOUNT=$MOUNT2 check_mount || error "check $MOUNT2 failed" + cleanup_fs2 +} +run_test 77 "comma-separated MGS NIDs and failover node NIDs" + +test_78() { + [[ $(facet_fstype $SINGLEMDS) != ldiskfs || + $(facet_fstype ost1) != ldiskfs ]] && + skip "only applicable to ldiskfs-based MDTs and OSTs" && return + + # reformat the Lustre filesystem with a smaller size + local saved_MDSSIZE=$MDSSIZE + local saved_OSTSIZE=$OSTSIZE + MDSSIZE=$((MDSSIZE - 20000)) + OSTSIZE=$((OSTSIZE - 20000)) + reformat || error "(1) reformat Lustre filesystem failed" + MDSSIZE=$saved_MDSSIZE + OSTSIZE=$saved_OSTSIZE + + # mount the Lustre filesystem + setup_noconfig || error "(2) setup Lustre filesystem failed" + + # create some files + log "create test files" + local i + local file + local num_files=100 + mkdir -p $MOUNT/$tdir || error "(3) mkdir $MOUNT/$tdir failed" + for i in $(seq $num_files); do + file=$MOUNT/$tdir/$tfile-$i + dd if=/dev/urandom of=$file count=1 bs=1M || + error "(4) create $file failed" + done + + # unmount the Lustre filesystem + cleanup || error "(5) cleanup Lustre filesystem failed" + + # run e2fsck on the MDT and OST devices + local mds_host=$(facet_active_host $SINGLEMDS) + local ost_host=$(facet_active_host ost1) + local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + local ost_dev=$(ostdevname 1) + + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # get the original block count of the MDT and OST filesystems + local mds_orig_blks=$(get_block_count $SINGLEMDS $mds_dev) + local ost_orig_blks=$(get_block_count ost1 $ost_dev) + + # expand the MDT and OST filesystems to the device size + run_resize2fs $SINGLEMDS $mds_dev "" || error "expand $SINGLEMDS failed" + run_resize2fs ost1 $ost_dev "" || error "expand ost1 failed" + + # run e2fsck on the MDT and OST devices again + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # mount the Lustre filesystem + setup + + # check the files + log "check files after expanding the MDT and OST filesystems" + for i in $(seq $num_files); do + file=$MOUNT/$tdir/$tfile-$i + $CHECKSTAT -t file -s 1048576 $file || + error "(6) checkstat $file failed" + done + + # create more files + log "create more files after expanding the MDT and OST filesystems" + for i in $(seq $((num_files + 1)) $((num_files + 10))); do + file=$MOUNT/$tdir/$tfile-$i + dd if=/dev/urandom of=$file count=1 bs=1M || + error "(7) create $file failed" + done + + # unmount the Lustre filesystem + cleanup || error "(8) cleanup Lustre filesystem failed" + + # run e2fsck on the MDT and OST devices + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # get the maximum block count of the MDT and OST filesystems + local mds_max_blks=$(get_block_count $SINGLEMDS $mds_dev) + local ost_max_blks=$(get_block_count ost1 $ost_dev) + + # get the minimum block count of the MDT and OST filesystems + local mds_min_blks=$(run_resize2fs $SINGLEMDS $mds_dev "" "-P" 2>&1 | + grep minimum | sed -e 's/^.*filesystem: //g') + local ost_min_blks=$(run_resize2fs ost1 $ost_dev "" "-P" 2>&1 | + grep minimum | sed -e 's/^.*filesystem: //g') + + # shrink the MDT and OST filesystems to a smaller size + local shrunk=false + local new_blks + local base_blks + if [[ $mds_max_blks -gt $mds_min_blks && + $mds_max_blks -gt $mds_orig_blks ]]; then + [[ $mds_orig_blks -gt $mds_min_blks ]] && + base_blks=$mds_orig_blks || base_blks=$mds_min_blks + new_blks=$(( (mds_max_blks - base_blks) / 2 + base_blks )) + run_resize2fs $SINGLEMDS $mds_dev $new_blks || + error "shrink $SINGLEMDS to $new_blks failed" + shrunk=true + fi + + if [[ $ost_max_blks -gt $ost_min_blks && + $ost_max_blks -gt $ost_orig_blks ]]; then + [[ $ost_orig_blks -gt $ost_min_blks ]] && + base_blks=$ost_orig_blks || base_blks=$ost_min_blks + new_blks=$(( (ost_max_blks - base_blks) / 2 + base_blks )) + run_resize2fs ost1 $ost_dev $new_blks || + error "shrink ost1 to $new_blks failed" + shrunk=true + fi + + # check whether the MDT or OST filesystem was shrunk or not + if ! $shrunk; then + combined_mgs_mds || stop_mgs || error "(9) stop mgs failed" + reformat || error "(10) reformat Lustre filesystem failed" + return 0 + fi + + # run e2fsck on the MDT and OST devices again + run_e2fsck $mds_host $mds_dev "-y" + run_e2fsck $ost_host $ost_dev "-y" + + # mount the Lustre filesystem again + setup + + # check the files + log "check files after shrinking the MDT and OST filesystems" + for i in $(seq $((num_files + 10))); do + file=$MOUNT/$tdir/$tfile-$i + $CHECKSTAT -t file -s 1048576 $file || + error "(11) checkstat $file failed" + done + + # unmount and reformat the Lustre filesystem + cleanup || error "(12) cleanup Lustre filesystem failed" + combined_mgs_mds || stop_mgs || error "(13) stop mgs failed" + reformat || error "(14) reformat Lustre filesystem failed" +} +run_test 78 "run resize2fs on MDT and OST filesystems" + +test_79() { # LU-4227 + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.5.59) ]] || + { skip "Need MDS version at least 2.5.59"; return 0; } + + local mdsdev1=$(mdsdevname 1) + local mdsvdev1=$(mdsvdevname 1) + local mdsdev2=$(mdsdevname 2) + local mdsvdev2=$(mdsvdevname 2) + local ostdev1=$(ostdevname 1) + local ostvdev1=$(ostvdevname 1) + local opts_mds1="$(mkfs_opts mds1 $mdsdev1) --reformat" + local opts_mds2="$(mkfs_opts mds2 $mdsdev2) --reformat" + local opts_ost1="$(mkfs_opts ost1 $ostdev1) --reformat" + local mgsnode_opt + + # remove --mgs/--mgsnode from mkfs.lustre options + opts_mds1=$(echo $opts_mds1 | sed -e "s/--mgs//") + + mgsnode_opt=$(echo $opts_mds2 | + awk '{ for ( i = 1; i < NF; i++ ) + if ( $i ~ "--mgsnode" ) { print $i; break } }') + [ -n $mgsnode_opt ] && + opts_mds2=$(echo $opts_mds2 | sed -e "s/$mgsnode_opt//") + + mgsnode_opt=$(echo $opts_ost1 | + awk '{ for ( i = 1; i < NF; i++ ) + if ( $i ~ "--mgsnode" ) { print $i; break } }') + [ -n $mgsnode_opt ] && + opts_ost1=$(echo $opts_ost1 | sed -e "s/$mgsnode_opt//") + + # -MGS, format a mdt without --mgs option + add mds1 $opts_mds1 $mdsdev1 $mdsvdev1 && + error "Must specify --mgs when formatting mdt combined with mgs" + + # +MGS, format a mdt/ost without --mgsnode option + add mds1 $(mkfs_opts mds1 $mdsdev1) --reformat $mdsdev1 $mdsvdev1 \ + > /dev/null || error "start mds1 failed" + add mds2 $opts_mds2 $mdsdev2 $mdsvdev2 && + error "Must specify --mgsnode when formatting a mdt" + add ost1 $opts_ost1 $ostdev1 $ostvdev2 && + error "Must specify --mgsnode when formatting an ost" + + return 0 +} +run_test 79 "format MDT/OST without mgs option (should return errors)" + +test_80() { + start_mds + start_ost + uuid=$(do_facet ost1 lctl get_param -n mgc.*.uuid) +#define OBD_FAIL_MGS_PAUSE_TARGET_CON 0x906 + do_facet ost1 "lctl set_param fail_val=10 fail_loc=0x906" + do_facet mgs "lctl set_param fail_val=10 fail_loc=0x906" + do_facet mgs "lctl set_param -n mgs/MGS/evict_client $uuid" + sleep 30 + start_ost2 + + do_facet ost1 "lctl set_param fail_loc=0" + stopall +} +run_test 80 "mgc import reconnect race" + if ! combined_mgs_mds ; then stop mgs fi cleanup_gss -complete $(basename $0) $SECONDS +# restore the values of MDSSIZE and OSTSIZE +MDSSIZE=$STORED_MDSSIZE +OSTSIZE=$STORED_OSTSIZE +reformat + +complete $SECONDS exit_status