X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fconf-sanity.sh;h=a6e690bc1fc69c834b2368f9291152368ed37dbd;hp=303d37cd6be15e264e346f930eb40b10025e828e;hb=938a12769147505c7e42908df89733be19982205;hpb=d9872851247f32423b6ee9abb70d95f7eb037493 diff --git a/lustre/tests/conf-sanity.sh b/lustre/tests/conf-sanity.sh index 303d37c..a6e690b 100644 --- a/lustre/tests/conf-sanity.sh +++ b/lustre/tests/conf-sanity.sh @@ -13,9 +13,15 @@ ONLY=${ONLY:-"$*"} # bug number for skipped test: # 15977 -ALWAYS_EXCEPT=" 39 $CONF_SANITY_EXCEPT" +ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT" # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! +if [ "$FAILURE_MODE" = "HARD" ]; then + CONFIG_EXCEPTIONS="24a " && \ + echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \ + ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS" +fi + SRCDIR=`dirname $0` PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH @@ -26,20 +32,26 @@ RLUSTRE=${RLUSTRE:-$LUSTRE} . $LUSTRE/tests/test-framework.sh init_test_env $@ + +# use small MDS + OST size to speed formatting time +# do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size +MDSSIZE=200000 +OSTSIZE=200000 +. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} + # STORED_MDSSIZE is used in test_18 if [ -n "$MDSSIZE" ]; then STORED_MDSSIZE=$MDSSIZE fi -# use small MDS + OST size to speed formatting time -MDSSIZE=40000 -OSTSIZE=40000 -. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} -remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 -remote_ost_nodsh && skip "remote OST with nodsh" && exit 0 +init_logging # -[ "$SLOW" = "no" ] && EXCEPT_SLOW="0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a 45" +require_dsh_mds || exit 0 +require_dsh_ost || exit 0 +# +[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45" + assert_DIR @@ -47,16 +59,25 @@ reformat() { formatall } -writeconf() { - local facet=$SINGLEMDS - local dev=${facet}_dev - shift +writeconf1() { + local facet=$1 + local dev=$2 + stop ${facet} -f rm -f ${facet}active # who knows if/where $TUNEFS is installed? Better reformat if it fails... - do_facet ${facet} "$TUNEFS --writeconf ${!dev}" || echo "tunefs failed, reformatting instead" && reformat + do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || + { echo "tunefs failed, reformatting instead" && reformat_and_config && return 1; } + return 0 +} - gen_config +writeconf() { + # we need ldiskfs + load_modules + # if writeconf fails anywhere, we reformat everything + writeconf1 mds `mdsdevname 1` || return 0 + writeconf1 ost1 `ostdevname 1` || return 0 + writeconf1 ost2 `ostdevname 2` || return 0 } gen_config() { @@ -71,16 +92,31 @@ gen_config() { reformat_and_config() { reformat + if ! combined_mgs_mds ; then + start_mgs + fi gen_config } +start_mgs () { + echo "start mgs" + start mgs $MGSDEV $MGS_MOUNT_OPTS +} + start_mds() { local facet=$SINGLEMDS # we can not use MDSDEV1 here because SINGLEMDS could be set not to mds1 only local num=$(echo $facet | tr -d "mds") local dev=$(mdsdevname $num) echo "start mds service on `facet_active_host $facet`" - start $facet ${dev} $MDS_MOUNT_OPTS || return 94 + start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94 +} + +start_mgsmds() { + if ! combined_mgs_mds ; then + start_mgs + fi + start_mds $@ } stop_mds() { @@ -91,7 +127,7 @@ stop_mds() { start_ost() { echo "start ost1 service on `facet_active_host ost1`" - start ost1 `ostdevname 1` $OST_MOUNT_OPTS || return 95 + start ost1 `ostdevname 1` $OST_MOUNT_OPTS $@ || return 95 } stop_ost() { @@ -102,7 +138,7 @@ stop_ost() { start_ost2() { echo "start ost2 service on `facet_active_host ost2`" - start ost2 `ostdevname 2` $OST_MOUNT_OPTS || return 92 + start ost2 `ostdevname 2` $OST_MOUNT_OPTS $@ || return 92 } stop_ost2() { @@ -142,20 +178,24 @@ manual_umount_client(){ } setup() { - start_ost - start_mds - mount_client $MOUNT + start_mds || error "MDT start failed" + start_ost || error "OST start failed" + mount_client $MOUNT || error "client start failed" } setup_noconfig() { + if ! combined_mgs_mds ; then + start_mgs + fi + start_mds start_ost mount_client $MOUNT } cleanup_nocli() { - stop_mds || return 201 stop_ost || return 202 + stop_mds || return 201 unload_modules || return 203 } @@ -207,19 +247,21 @@ test_0() { run_test 0 "single mount setup" test_1() { + start_mds || error "MDT start failed" start_ost echo "start ost second time..." - setup + start_ost && error "2nd OST start should fail" + mount_client $MOUNT || error "client start failed" check_mount || return 42 cleanup || return $? } run_test 1 "start up ost twice (should return errors)" test_2() { - start_ost start_mds echo "start mds second time.." - start_mds + start_mds && error "2nd MDT start should fail" + start_ost mount_client $MOUNT check_mount || return 43 cleanup || return $? @@ -229,7 +271,7 @@ run_test 2 "start up mds twice (should return err)" test_3() { setup #mount.lustre returns an error if already in mtab - mount_client $MOUNT && return $? + mount_client $MOUNT && error "2nd client mount should fail" check_mount || return 44 cleanup || return $? } @@ -312,8 +354,8 @@ test_5b() { run_test 5b "mds down, cleanup after failed mount (bug 2712) (should return errs)" test_5c() { - start_ost start_mds + start_ost [ -d $MOUNT ] || mkdir -p $MOUNT grep " $MOUNT " /etc/mtab && echo "test 5c: mtab before mount" && return 10 local oldfs="${FSNAME}" @@ -339,8 +381,8 @@ test_5d() { run_test 5d "mount with ost down" test_5e() { - start_ost start_mds + start_ost #define OBD_FAIL_PTLRPC_DELAY_SEND 0x506 do_facet client "lctl set_param fail_loc=0x80000506" @@ -401,48 +443,16 @@ test_9() { fi stop_ost || return $? } - run_test 9 "test ptldebug and subsystem for mkfs" -# LOGS/PENDING do not exist anymore since CMD3 -test_16() { - local TMPMTPT="${TMP}/conf16" - local dev=${SINGLEMDS}_dev - local MDSDEV=${!dev} - if [ ! -e "$MDSDEV" ]; then - log "no $MDSDEV existing, so mount Lustre to create one" - setup - check_mount || return 41 - cleanup || return $? - fi - - [ -f "$MDSDEV" ] && LOOPOPT="-o loop" - - log "change the mode of $MDSDEV/OBJECTS to 555" - do_facet $SINGLEMDS "mkdir -p $TMPMTPT && - mount $LOOPOPT -t $FSTYPE $MDSDEV $TMPMTPT && - chmod 555 $TMPMTPT/OBJECTS && - umount $TMPMTPT" || return $? - - log "mount Lustre to change the mode of OBJECTS, then umount Lustre" - setup - check_mount || return 41 - cleanup || return $? - - log "read the mode of OBJECTS and check if they has been changed properly" - EXPECTEDOBJECTSMODE=`do_facet $SINGLEMDS "$DEBUGFS -R 'stat OBJECTS' $MDSDEV 2> /dev/null" | grep 'Mode: ' | sed -e "s/.*Mode: *//" -e "s/ *Flags:.*//"` - - if [ "$EXPECTEDOBJECTSMODE" = "0777" ]; then - log "Success:Lustre change the mode of OBJECTS correctly" - else - error "Lustre does not change mode of OBJECTS properly" - fi -} -run_test 16 "verify that lustre will correct the mode of OBJECTS" +# +# Test 16 was to "verify that lustre will correct the mode of OBJECTS". +# But with new MDS stack we don't care about the mode of local objects +# anymore, so this test is removed. See bug 22944 for more details. +# test_17() { - local dev=${SINGLEMDS}_dev - local MDSDEV=${!dev} + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) if [ ! -e "$MDSDEV" ]; then echo "no $MDSDEV existing, so mount Lustre to create one" @@ -463,8 +473,7 @@ run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should ret test_18() { [ "$FSTYPE" != "ldiskfs" ] && skip "not needed for FSTYPE=$FSTYPE" && return - local dev=${SINGLEMDS}_dev - local MDSDEV=${!dev} + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) local MIN=2000000 @@ -533,8 +542,8 @@ run_test 19b "start/stop OSTs without MDS" test_20() { # first format the ost/mdt - start_ost start_mds + start_ost mount_client $MOUNT check_mount || return 43 rm -f $DIR/$tfile @@ -617,8 +626,8 @@ test_22() { run_test 22 "start a client before osts (should return errs)" test_23a() { # was test_23 - setup - # fail mds + setup + # fail mds stop $SINGLEMDS # force down client so that recovering mds waits for reconnect local running=$(grep -c $MOUNT /proc/mounts) || true @@ -645,7 +654,7 @@ test_23a() { # was test_23 local PID1 local PID2 local WAIT=0 - local MAX_WAIT=20 + local MAX_WAIT=30 local sleep=1 while [ "$WAIT" -lt "$MAX_WAIT" ]; do sleep $sleep @@ -657,9 +666,11 @@ test_23a() { # was test_23 echo "waiting for mount to finish ... " WAIT=$(( WAIT + sleep)) done - [ "$WAIT" -eq "$MAX_WAIT" ] && error "MOUNT_PID $MOUNT_PID and "\ + if [ "$WAIT" -eq "$MAX_WAIT" ]; then + error "MOUNT_PID $MOUNT_PID and "\ "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs" - ps -ef | grep mount + ps -ef | grep mount + fi stop_mds || error stop_ost || error } @@ -669,8 +680,8 @@ umount_client $MOUNT cleanup_nocli test_23b() { # was test_23 - start_ost start_mds + start_ost # Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD lctl set_param fail_loc=0x80000313 mount_client $MOUNT @@ -694,7 +705,10 @@ cleanup_24a() { test_24a() { #set up fs1 gen_config + #set up fs2 + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \ @@ -742,6 +756,8 @@ test_24a() { run_test 24a "Multiple MDTs on a single node" test_24b() { + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + if [ -z "$fs2mds_DEV" ]; then do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \ skip_env "mixed loopback and real device not working" && return @@ -865,7 +881,9 @@ test_29() { fi # check MDT too - local MPROC="osc.$FSNAME-OST0001-osc-[M]*.active" + local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $FSNAME-OST0001) + mdtosc=${mdtosc/-MDT*/-MDT\*} + local MPROC="osc.$mdtosc.active" local MAX=30 local WAIT=0 while [ 1 ]; do @@ -906,9 +924,10 @@ test_29() { } run_test 29 "permanently remove an OST" -test_30() { +test_30a() { setup + echo Big config llog TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb" ORIG=$($TEST) LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5) @@ -918,11 +937,54 @@ test_30() { # make sure client restart still works umount_client $MOUNT mount_client $MOUNT || return 4 - [ "$($TEST)" -ne "$i" ] && return 5 - set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $ORIG || return 6 + [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i" + pass + + echo Erase parameter setting + do_facet mgs "$LCTL conf_param -d $FSNAME.llite.max_read_ahead_whole_mb" || return 6 + umount_client $MOUNT + mount_client $MOUNT || return 6 + FINAL=$($TEST) + echo "deleted (default) value=$FINAL, orig=$ORIG" + # assumes this parameter started at the default value + [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG" + + cleanup +} +run_test 30a "Big config llog and conf_param deletion" + +test_30b() { + setup + + # Make a fake nid. Use the OST nid, and add 20 to the least significant + # numerical part of it. Hopefully that's not already a failover address for + # the server. + OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') + ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@") + NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256)) + NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/") + echo "Using fake nid $NEW" + + TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'" + set_and_check client "$TEST" "$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW" + NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids) + echo $NIDS + NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1)) + echo "should have 2 failover nids: $NIDCOUNT" + [ $NIDCOUNT -eq 2 ] || error "Failover nid not added" + do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed" + umount_client $MOUNT + mount_client $MOUNT || return 3 + + NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids) + echo $NIDS + NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1)) + echo "only 1 final nid should remain: $NIDCOUNT" + [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed" + cleanup } -run_test 30 "Big config llog" +run_test 30b "Remove failover nids" test_31() { # bug 10734 # ipaddr must not exist @@ -1110,6 +1172,8 @@ run_test 32b "Upgrade from 1.8 with writeconf" test_33a() { # bug 12333, was test_33 local rc=0 local FSNAME2=test-123 + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) + [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then @@ -1245,6 +1309,8 @@ test_35a() { # bug 12459 }" $TMP/lustre-log-$TESTNAME.log` [ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7 cleanup + # remove nid settings + writeconf } run_test 35a "Reconnect to the last active server first" @@ -1312,6 +1378,8 @@ test_35b() { # bug 18674 return 5 cleanup + # remove nid settings + writeconf } run_test 35b "Continue reconnection retries, if the active server is busy" @@ -1319,6 +1387,7 @@ test_36() { # 12743 local rc local FSNAME2=test1234 local fs3ost_HOST=$ost_HOST + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) [ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST rc=0 @@ -1431,8 +1500,7 @@ test_38() { # bug 14222 log "rename lov_objid file on MDS" rm -f $TMP/lov_objid.orig - local dev=${SINGLEMDS}_dev - local MDSDEV=${!dev} + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV" do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV" @@ -1493,8 +1561,7 @@ run_test 40 "race during service thread startup" test_41() { #bug 14134 local rc - local dev=${SINGLEMDS}_dev - local MDSDEV=${!dev} + local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/}) start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n start ost1 `ostdevname 1` $OST_MOUNT_OPTS @@ -1518,7 +1585,7 @@ run_test 41 "mount mds with --nosvc and --nomgs" test_42() { #bug 14693 setup check_mount || return 2 - do_facet client lctl conf_param lustre.llite.some_wrong_param=10 + do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10 umount_client $MOUNT mount_client $MOUNT || return 1 cleanup @@ -1679,16 +1746,15 @@ cleanup_46a() { let count=count-1 done stop_mds || rc=$? - # writeconf is needed after the test, otherwise, - # we might end up with extra OSTs - writeconf || rc=$? cleanup_nocli || rc=$? + #writeconf to remove all ost2 traces for subsequent tests + writeconf return $rc } test_46a() { echo "Testing with $OSTCOUNT OSTs" - reformat + reformat_and_config start_mds || return 1 #first client should see only one ost start_ost || return 2 @@ -1811,9 +1877,7 @@ test_49() { # bug 17710 OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$LOCAL_TIMEOUT $MKFSOPT $OSTOPT" reformat - start_mds - start_ost - mount_client $MOUNT + setup_noconfig check_mount || return 1 echo "check ldlm_timout..." @@ -1836,9 +1900,7 @@ test_49() { # bug 17710 OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$((LOCAL_TIMEOUT - 1)) $MKFSOPT $OSTOPT" reformat - start_mds || return 4 - start_ost || return 5 - mount_client $MOUNT || return 6 + setup_noconfig check_mount || return 7 LDLM_MDS="`do_facet mds lctl get_param -n ldlm_timeout`" @@ -2027,6 +2089,7 @@ test_50f() { umount_client $MOUNT || error "Unable to unmount client" stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" + #writeconf to remove all ost2 traces for subsequent tests writeconf } run_test 50f "normal statfs one server in down ==========================" @@ -2035,6 +2098,8 @@ test_50g() { [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return setup start_ost2 || error "Unable to start OST2" + wait_osc_import_state mds ost2 FULL + wait_osc_import_state client ost2 FULL local PARAM="${FSNAME}-OST0001.osc.active" @@ -2052,6 +2117,7 @@ test_50g() { stop_ost2 || error "Unable to stop OST2" stop_ost || error "Unable to stop OST1" stop_mds || error "Unable to stop MDS" + #writeconf to remove all ost2 traces for subsequent tests writeconf } run_test 50g "deactivated OST should not cause panic=====================" @@ -2060,9 +2126,7 @@ test_51() { local LOCAL_TIMEOUT=20 reformat - start_mds - start_ost - mount_client $MOUNT + setup_noconfig check_mount || return 1 mkdir $MOUNT/d1 @@ -2144,7 +2208,9 @@ test_52() { [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; } touch $TMP/modified_first [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; } - do_node $ost1node "mkdir -p $ost1tmp && touch $ost1tmp/modified_first" + local mtime=$(stat -c %Y $TMP/modified_first) + do_node $ost1node "mkdir -p $ost1tmp && touch -m -d @$mtime $ost1tmp/modified_first" + [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; } sleep 1 @@ -2212,6 +2278,232 @@ test_52() { } run_test 52 "check recovering objects from lost+found" +# Checks threads_min/max/started for some service +# +# Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a +# parameter pattern prefix like 'ost.*.ost'. +thread_sanity() { + local modname=$1 + local facet=$2 + local parampat=$3 + local opts=$4 + local tmin + local tmin2 + local tmax + local tmax2 + local tstarted + local paramp + local msg="Insane $modname thread counts" + shift 4 + + setup + check_mount || return 41 + + # We need to expand $parampat, but it may match multiple parameters, so + # we'll pick the first one + if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then + error "Couldn't expand ${parampat}.threads_min parameter name" + return 22 + fi + + # Remove the .threads_min part + paramp=${paramp%.threads_min} + + # Check for sanity in defaults + tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) + tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0) + tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0) + lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $? + lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= tmax ))' || return $? + + # Check that we can lower min/max + do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin - 1))" + do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - 10))" + tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) + tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0) + lassert 25 "$msg" '(($tmin2 == ($tmin - 1) && $tmax2 == ($tmax -10)))' || return $? + + # Check that we can set min/max to the same value + do_facet $facet "lctl set_param ${paramp}.threads_min=$tmin" + do_facet $facet "lctl set_param ${paramp}.threads_max=$tmin" + tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) + tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0) + lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $? + + # Check that we can't set max < min + do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmin - 1))" + tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0) + tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0) + lassert 27 "$msg" '(($tmin <= $tmax2))' || return $? + + # We need to ensure that we get the module options desired; to do this + # we set LOAD_MODULES_REMOTE=true and we call setmodopts below. + LOAD_MODULES_REMOTE=true + cleanup + local oldvalue + setmodopts -a $modname "$opts" oldvalue + + load_modules + setup + check_mount || return 41 + + # Restore previous setting of MODOPTS_* + setmodopts $modname "$oldvalue" + + # Check that $opts took + tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min") + tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max") + tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started") + lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $? + cleanup + + # Workaround a YALA bug where YALA expects that modules will remain + # loaded on the servers + LOAD_MODULES_REMOTE=false + load_modules + setup + cleanup +} + +test_53a() { + thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads=64' +} +run_test 53a "check OSS thread count params" + +test_53b() { + thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads=64' +} +run_test 53b "check MDT thread count params" + +run_llverfs() +{ + local dir=$1 + local partial_arg="" + local size=$(df -B G $dir | tail -1 | awk '{print $2}' | sed 's/G//') # Gb + + # Run in partial (fast) mode if the size + # of a partition > 10 GB + [ $size -gt 10 ] && partial_arg="-p" + + llverfs $partial_arg $dir +} + +test_54a() { + do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) + [ $? -eq 0 ] || error "llverdev failed!" + reformat_and_config +} +run_test 54a "llverdev" + +test_54b() { + setup + run_llverfs $MOUNT + [ $? -eq 0 ] || error "llverfs failed!" + cleanup +} +run_test 54b "llverfs" + +lov_objid_size() +{ + local max_ost_index=$1 + echo -n $(((max_ost_index + 1) * 8)) +} + +test_55() { + local mdsdev=$(mdsdevname 1) + local ostdev=$(ostdevname 1) + local saved_opts=$OST_MKFS_OPTS + + for i in 0 1023 2048 + do + OST_MKFS_OPTS="$saved_opts --index $i" + reformat + + setup_noconfig + stopall + + setup + sync + echo checking size of lov_objid for ost index $i + LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}') + if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then + error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE" + else + echo ok, lov_objid size is correct: $LOV_OBJID_SIZE + fi + stopall + done + + OST_MKFS_OPTS=$saved_opts + reformat +} +run_test 55 "check lov_objid size" + +test_56() { + add mds1 $MDS_MKFS_OPTS --mkfsoptions='\"-J size=16\"' --reformat $(mdsdevname 1) + add ost1 $OST_MKFS_OPTS --index=1000 --reformat $(ostdevname 1) + add ost2 $OST_MKFS_OPTS --index=10000 --reformat $(ostdevname 2) + + start_mds + start_ost + start_ost2 || error "Unable to start second ost" + mount_client $MOUNT || error "Unable to mount client" + echo ok + $LFS osts + stopall + reformat +} +run_test 56 "check big indexes" + +test_57() { # bug 22656 + local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}') + writeconf + do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed" + start_mgsmds + start_ost && error "OST registration from failnode should fail" + stop_mds + reformat +} +run_test 57 "initial registration from failnode should fail (should return errs)" + +count_osts() { + do_facet mgs $LCTL get_param mgs.MGS.live.$FSNAME | grep OST | wc -l +} + +test_59() { + start_mgsmds >> /dev/null + local C1=$(count_osts) + if [ $C1 -eq 0 ]; then + start_ost >> /dev/null + C1=$(count_osts) + fi + stopall + echo "original ost count: $C1 (expect > 0)" + [ $C1 -gt 0 ] || error "No OSTs in $FSNAME log" + start_mgsmds -o writeconf >> /dev/null || error "MDT start failed" + local C2=$(count_osts) + echo "after mdt writeconf count: $C2 (expect 0)" + [ $C2 -gt 0 ] && error "MDT writeconf should erase OST logs" + echo "OST start without writeconf should fail:" + start_ost >> /dev/null && error "OST start without writeconf didn't fail" + echo "OST start with writeconf should succeed:" + start_ost -o writeconf >> /dev/null || error "OST1 start failed" + local C3=$(count_osts) + echo "after ost writeconf count: $C3 (expect 1)" + [ $C3 -eq 1 ] || error "new OST writeconf should add:" + start_ost2 -o writeconf >> /dev/null || error "OST2 start failed" + local C4=$(count_osts) + echo "after ost2 writeconf count: $C4 (expect 2)" + [ $C4 -eq 2 ] || error "OST2 writeconf should add log" + stop_ost2 >> /dev/null + cleanup_nocli >> /dev/null +} +run_test 59 "writeconf mount option" + + +if ! combined_mgs_mds ; then + stop mgs +fi cleanup_gss equals_msg `basename $0`: test complete [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true