# bug number for skipped test:
# 15977
-ALWAYS_EXCEPT=" 39 $CONF_SANITY_EXCEPT"
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=`dirname $0`
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-init_logging
+
+# use small MDS + OST size to speed formatting time
+# do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
+MDSSIZE=200000
+OSTSIZE=200000
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+
# STORED_MDSSIZE is used in test_18
if [ -n "$MDSSIZE" ]; then
STORED_MDSSIZE=$MDSSIZE
fi
-# use small MDS + OST size to speed formatting time
-MDSSIZE=40000
-OSTSIZE=40000
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
+
+#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
+#
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="30a 31 45"
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a 45"
assert_DIR
formatall
}
-writeconf() {
- local facet=$SINGLEMDS
- local dev=${facet}_dev
- shift
+writeconf1() {
+ local facet=$1
+ local dev=$2
+
stop ${facet} -f
rm -f ${facet}active
# who knows if/where $TUNEFS is installed? Better reformat if it fails...
- do_facet ${facet} "$TUNEFS --writeconf ${!dev}" || echo "tunefs failed, reformatting instead" && reformat
+ do_facet ${facet} "$TUNEFS --writeconf $dev" ||
+ { echo "tunefs failed, reformatting instead" && reformat_and_config && return 1; }
+ return 0
+}
- gen_config
+writeconf() {
+ # if writeconf failed, we reformatted
+ writeconf1 mds $MDSDEV || return 0
+ writeconf1 ost1 `ostdevname 1` || return 0
+ writeconf1 ost2 `ostdevname 2` || return 0
}
gen_config() {
reformat_and_config() {
reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
gen_config
}
+start_mgs () {
+ echo "start mgs"
+ start mgs $MGSDEV $mgs_MOUNT_OPTS
+}
+
start_mds() {
local facet=$SINGLEMDS
# we can not use MDSDEV1 here because SINGLEMDS could be set not to mds1 only
local num=$(echo $facet | tr -d "mds")
local dev=$(mdsdevname $num)
echo "start mds service on `facet_active_host $facet`"
- start $facet ${dev} $MDS_MOUNT_OPTS || return 94
+ start $facet ${dev} $MDS_MOUNT_OPTS $@ || return 94
+}
+
+start_mgsmds() {
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+ start_mds $@
}
stop_mds() {
start_ost() {
echo "start ost1 service on `facet_active_host ost1`"
- start ost1 `ostdevname 1` $OST_MOUNT_OPTS || return 95
+ start ost1 `ostdevname 1` $OST_MOUNT_OPTS $@ || return 95
}
stop_ost() {
start_ost2() {
echo "start ost2 service on `facet_active_host ost2`"
- start ost2 `ostdevname 2` $OST_MOUNT_OPTS || return 92
+ start ost2 `ostdevname 2` $OST_MOUNT_OPTS $@ || return 92
}
stop_ost2() {
}
setup() {
- start_ost
- start_mds
- mount_client $MOUNT
+ start_mds || error "MDT start failed"
+ start_ost || error "OST start failed"
+ mount_client $MOUNT || error "client start failed"
}
setup_noconfig() {
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+
start_mds
start_ost
mount_client $MOUNT
}
cleanup_nocli() {
- stop_mds || return 201
stop_ost || return 202
+ stop_mds || return 201
unload_modules || return 203
}
run_test 0 "single mount setup"
test_1() {
+ start_mds || error "MDT start failed"
start_ost
echo "start ost second time..."
- setup
+ start_ost && error "2nd OST start should fail"
+ mount_client $MOUNT || error "client start failed"
check_mount || return 42
cleanup || return $?
}
run_test 1 "start up ost twice (should return errors)"
test_2() {
- start_ost
start_mds
echo "start mds second time.."
- start_mds
+ start_mds && error "2nd MDT start should fail"
+ start_ost
mount_client $MOUNT
check_mount || return 43
cleanup || return $?
test_3() {
setup
#mount.lustre returns an error if already in mtab
- mount_client $MOUNT && return $?
+ mount_client $MOUNT && error "2nd client mount should fail"
check_mount || return 44
cleanup || return $?
}
run_test 5b "mds down, cleanup after failed mount (bug 2712) (should return errs)"
test_5c() {
- start_ost
start_mds
+ start_ost
[ -d $MOUNT ] || mkdir -p $MOUNT
grep " $MOUNT " /etc/mtab && echo "test 5c: mtab before mount" && return 10
local oldfs="${FSNAME}"
run_test 5d "mount with ost down"
test_5e() {
- start_ost
start_mds
+ start_ost
#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
do_facet client "lctl set_param fail_loc=0x80000506"
fi
stop_ost || return $?
}
-
run_test 9 "test ptldebug and subsystem for mkfs"
-# LOGS/PENDING do not exist anymore since CMD3
-test_16() {
- local TMPMTPT="${TMP}/conf16"
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
- if [ ! -e "$MDSDEV" ]; then
- log "no $MDSDEV existing, so mount Lustre to create one"
- setup
- check_mount || return 41
- cleanup || return $?
- fi
-
- [ -f "$MDSDEV" ] && LOOPOPT="-o loop"
-
- log "change the mode of $MDSDEV/OBJECTS to 555"
- do_facet $SINGLEMDS "mkdir -p $TMPMTPT &&
- mount $LOOPOPT -t $FSTYPE $MDSDEV $TMPMTPT &&
- chmod 555 $TMPMTPT/OBJECTS &&
- umount $TMPMTPT" || return $?
-
- log "mount Lustre to change the mode of OBJECTS, then umount Lustre"
- setup
- check_mount || return 41
- cleanup || return $?
-
- log "read the mode of OBJECTS and check if they has been changed properly"
- EXPECTEDOBJECTSMODE=`do_facet $SINGLEMDS "$DEBUGFS -R 'stat OBJECTS' $MDSDEV 2> /dev/null" | grep 'Mode: ' | sed -e "s/.*Mode: *//" -e "s/ *Flags:.*//"`
-
- if [ "$EXPECTEDOBJECTSMODE" = "0777" ]; then
- log "Success:Lustre change the mode of OBJECTS correctly"
- else
- error "Lustre does not change mode of OBJECTS properly"
- fi
-}
-run_test 16 "verify that lustre will correct the mode of OBJECTS"
+#
+# Test 16 was to "verify that lustre will correct the mode of OBJECTS".
+# But with new MDS stack we don't care about the mode of local objects
+# anymore, so this test is removed. See bug 22944 for more details.
+#
test_17() {
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
if [ ! -e "$MDSDEV" ]; then
echo "no $MDSDEV existing, so mount Lustre to create one"
test_18() {
[ "$FSTYPE" != "ldiskfs" ] && skip "not needed for FSTYPE=$FSTYPE" && return
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
local MIN=2000000
test_20() {
# first format the ost/mdt
- start_ost
start_mds
+ start_ost
mount_client $MOUNT
check_mount || return 43
rm -f $DIR/$tfile
run_test 22 "start a client before osts (should return errs)"
test_23a() { # was test_23
- setup
- # fail mds
+ setup
+ # fail mds
stop $SINGLEMDS
# force down client so that recovering mds waits for reconnect
local running=$(grep -c $MOUNT /proc/mounts) || true
local PID1
local PID2
local WAIT=0
- local MAX_WAIT=20
+ local MAX_WAIT=30
local sleep=1
while [ "$WAIT" -lt "$MAX_WAIT" ]; do
sleep $sleep
echo "waiting for mount to finish ... "
WAIT=$(( WAIT + sleep))
done
- [ "$WAIT" -eq "$MAX_WAIT" ] && error "MOUNT_PID $MOUNT_PID and "\
+ if [ "$WAIT" -eq "$MAX_WAIT" ]; then
+ error "MOUNT_PID $MOUNT_PID and "\
"MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
- ps -ef | grep mount
+ ps -ef | grep mount
+ fi
stop_mds || error
stop_ost || error
}
cleanup_nocli
test_23b() { # was test_23
- start_ost
start_mds
+ start_ost
# Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD
lctl set_param fail_loc=0x80000313
mount_client $MOUNT
test_24a() {
#set up fs1
gen_config
+
#set up fs2
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
run_test 24a "Multiple MDTs on a single node"
test_24b() {
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
if [ -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
skip_env "mixed loopback and real device not working" && return
}
run_test 27b "Reacquire MGS lock after failover"
-test_28() {
+test_28a() {
setup
TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
- PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
+ PARAM="llite.$FSNAME.max_read_ahead_whole_mb"
ORIG=$($TEST)
FINAL=$(($ORIG + 1))
set_and_check client "$TEST" "$PARAM" $FINAL || return 3
set_and_check client "$TEST" "$PARAM" $ORIG || return 5
cleanup
}
-run_test 28 "permanent parameter setting"
+run_test 28a "permanent parameter setting"
+
+check_28b() {
+ local NODE=$1
+ shift
+ set_and_check $NODE "$LCTL get_param -n $1*.$2 | head -1" "$1.$2" "$3" || \
+ error "conf_param $1.$2 failed"
+}
+
+test_28b() {
+ setup > /dev/null
+ # should error
+ do_facet mgs "$LCTL conf_param foo=1 2>/dev/null" && \
+ error "Bad format should fail"
+ do_facet mgs "$LCTL conf_param osc.notanfs-OST0000.active=0 2>/dev/null" && \
+ error "Setting on unknown fs should fail"
+ do_facet mgs "$LCTL conf_param osc.$FSNAME-OST00000.active=0 2>/dev/null" && \
+ error "Bad target name should fail"
+ # should succeed
+ check_28b mds mdt.$FSNAME-MDT0000 capa_timeout 1500
+ check_28b mds mdt.$FSNAME-MDT* identity_expire 150
+ check_28b mds mdd.$FSNAME-MDT0000 atime_diff 15
+ check_28b mds mdd.$FSNAME-MDT* sync_permission 0
+ check_28b ost1 obdfilter.$FSNAME-OST0000 client_cache_seconds 15
+ check_28b ost1 obdfilter.$FSNAME-OST* client_cache_count 15
+ check_28b mds lov.$FSNAME-MDT0000 qos_maxage "15 Sec"
+ check_28b mds lov.$FSNAME-MDT0000 qos_prio_free "15%"
+ check_28b client mdc.$FSNAME-MDT0000 max_rpcs_in_flight 15
+ check_28b client osc.$FSNAME-OST0000 active 0
+ check_28b client osc.$FSNAME-OST0000 active 1
+ check_28b client osc.$FSNAME-OST0000 max_dirty_mb 15
+ check_28b client llite.$FSNAME max_read_ahead_mb 15
+ set_and_check client "$LCTL get_param -n at_max" "sys.$FSNAME.at_max" 1500 || \
+ error "conf_param sys.fsname.at_max failed"
+ cleanup > /dev/null
+}
+run_test 28b "permanent parameter setting, set_param syntax"
test_29() {
[ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
- setup > /dev/null 2>&1
+ setup > /dev/null 2>&1
start_ost2
sleep 10
- local PARAM="$FSNAME-OST0001.osc.active"
- local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
- local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
+ local PARAM="osc.$FSNAME-OST0001.active"
+ local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
+ local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
- ACTV=$(lctl get_param -n $PROC_ACT)
+ ACTV=$(lctl get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
- # also check ost_server_uuid status
+ # also check ost_server_uuid status
RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
if [ -z "$RESULT" ]; then
echo "Live client not deactivated: $(lctl get_param -n $PROC_UUID)"
fi
# check MDT too
- local MPROC="osc.$FSNAME-OST0001-osc-[M]*.active"
+ local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $FSNAME-OST0001)
+ mdtosc=${mdtosc/-MDT*/-MDT\*}
+ local MPROC="osc.$mdtosc.active"
local MAX=30
local WAIT=0
while [ 1 ]; do
}
run_test 29 "permanently remove an OST"
-test_30() {
+test_30a() {
setup
+ echo Big config llog
TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
ORIG=$($TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
- set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
+ set_and_check client "$TEST" "llite.$FSNAME.max_read_ahead_whole_mb" $i || return 3
done
# make sure client restart still works
umount_client $MOUNT
mount_client $MOUNT || return 4
- [ "$($TEST)" -ne "$i" ] && return 5
- set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $ORIG || return 6
+ [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i"
+ pass
+
+ echo Erase parameter setting
+ do_facet mgs "$LCTL conf_param -d llite.$FSNAME.max_read_ahead_whole_mb" || return 6
+ umount_client $MOUNT
+ mount_client $MOUNT || return 6
+ FINAL=$($TEST)
+ echo "deleted (default) value=$FINAL, orig=$ORIG"
+ # assumes this parameter started at the default value
+ [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
+
+ cleanup
+}
+run_test 30a "Big config llog and conf_param deletion"
+
+test_30b() {
+ setup
+
+ # Make a fake nid. Use the OST nid, and add 20 to the least significant
+ # numerical part of it. Hopefully that's not already a failover address for
+ # the server.
+ OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
+ ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
+ NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
+ NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
+ echo "Using fake nid $NEW"
+
+ TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
+ set_and_check client "$TEST" "osc.$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW"
+ NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
+ echo $NIDS
+ NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
+ echo "should have 2 failover nids: $NIDCOUNT"
+ [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
+ do_facet mgs "$LCTL conf_param -d osc.$FSNAME-OST0000.failover.node" || error "conf_param delete failed"
+ umount_client $MOUNT
+ mount_client $MOUNT || return 3
+
+ NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
+ echo $NIDS
+ NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
+ echo "only 1 final nid should remain: $NIDCOUNT"
+ [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
+
cleanup
}
-run_test 30 "Big config llog"
+run_test 30b "Remove failover nids"
test_31() { # bug 10734
# ipaddr must not exist
test_33a() { # bug 12333, was test_33
local rc=0
local FSNAME2=test-123
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
- do_facet $SINGLEMDS "$LCTL conf_param $FSNAME2.sys.timeout=200" || rc=1
+ do_facet mgs "$LCTL conf_param sys.$FSNAME2.timeout=200" || rc=1
mkdir -p $MOUNT2
mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
echo "ok."
sleep 1
cleanup
}
-run_test 34a "umount with opened file should be fail"
+run_test 34a "umount with opened file should fail"
test_34b() {
log "Set up a fake failnode for the MDS"
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet $SINGLEMDS $LCTL conf_param ${device}.failover.node=$FAKENID || return 4
+ do_facet $SINGLEMDS $LCTL conf_param mdc.${device}.failover.node=$FAKENID || return 4
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
}" $TMP/lustre-log-$TESTNAME.log`
[ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
cleanup
+ # remove nid settings
+ writeconf
}
run_test 35a "Reconnect to the last active server first"
FAKENID="127.0.0.2"
local device=$(do_facet mds "$LCTL get_param -n devices" | \
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mds "$LCTL conf_param ${device}.failover.node=$FAKENID" || \
+ do_facet mds "$LCTL conf_param mdc.${device}.failover.node=$FAKENID" || \
return 1
local at_max_saved=0
at_max_set 0 mds client
fi
- mkdir -p $MOUNT/testdir
- touch $MOUNT/testdir/test
+ mkdir -p $MOUNT/$tdir
log "Injecting EBUSY on MDS"
# Setting OBD_FAIL_MDS_RESEND=0x136
do_facet mds "$LCTL set_param fail_loc=0x80000136" || return 2
- log "Stat on a test file"
- stat $MOUNT/testdir/test
+ $LCTL set_param mdc.${FSNAME}*.stats=clear
+
+ log "Creating a test file and stat it"
+ touch $MOUNT/$tdir/$tfile
+ stat $MOUNT/$tdir/$tfile
log "Stop injecting EBUSY on MDS"
do_facet mds "$LCTL set_param fail_loc=0" || return 3
- rm -f $MOUNT/testdir/test
+ rm -f $MOUNT/$tdir/$tfile
log "done"
# restore adaptive timeout
$LCTL dk $TMP/lustre-log-$TESTNAME.log
+ CONNCNT=`$LCTL get_param mdc.${FSNAME}*.stats | awk '/mds_connect/{print $2}'`
+
# retrieve from the log if the client has ever tried to
# contact the fake server after the loss of connection
FAILCONN=`awk "BEGIN {ret = 0;}
log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
return 5
+ # When OBD_FAIL_MDS_RESEND is hit, we sleep for 2 * obd_timeout
+ # Reconnects are supposed to be rate limited to one every 5s
+ [ $CONNCNT -gt $((2 * $TIMEOUT / 5 + 1)) ] && \
+ log "ERROR: Too many reconnects $CONNCNT" && \
+ return 6
+
cleanup
+ # remove nid settings
+ writeconf
}
run_test 35b "Continue reconnection retries, if the active server is busy"
local rc
local FSNAME2=test1234
local fs3ost_HOST=$ost_HOST
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
rc=0
log "rename lov_objid file on MDS"
rm -f $TMP/lov_objid.orig
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
test_41() { #bug 14134
local rc
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
start ost1 `ostdevname 1` $OST_MOUNT_OPTS
test_42() { #bug 14693
setup
check_mount || return 2
- do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10
+ do_facet mgs $LCTL conf_param llite.$FSNAME.some_wrong_param=10
umount_client $MOUNT
mount_client $MOUNT || return 1
cleanup
chmod ugo+x $DIR || error "chmod 0 failed"
set_and_check mds \
"lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
+ "mdt.$FSNAME-MDT*.root_squash" \
"0:0"
set_and_check mds \
"lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME.mdt.nosquash_nids" \
+ "mdt.$FSNAME-MDT*.nosquash_nids" \
"NONE"
#
#
set_and_check mds \
"lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
+ "mdt.$FSNAME-MDT*.root_squash" \
"$RUNAS_ID:$RUNAS_ID"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
set_and_check mds \
"lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME-MDTall.mdt.nosquash_nids" \
+ "mdt.$FSNAME-MDT*.nosquash_nids" \
"$NIDLIST"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
let count=count-1
done
stop_mds || rc=$?
- # writeconf is needed after the test, otherwise,
- # we might end up with extra OSTs
- writeconf || rc=$?
cleanup_nocli || rc=$?
+ #writeconf to remove all ost2 traces for subsequent tests
+ writeconf
return $rc
}
test_46a() {
echo "Testing with $OSTCOUNT OSTs"
- reformat
+ reformat_and_config
start_mds || return 1
#first client should see only one ost
start_ost || return 2
OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$LOCAL_TIMEOUT $MKFSOPT $OSTOPT"
reformat
- start_mds
- start_ost
- mount_client $MOUNT
+ setup_noconfig
check_mount || return 1
echo "check ldlm_timout..."
OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$((LOCAL_TIMEOUT - 1)) $MKFSOPT $OSTOPT"
reformat
- start_mds || return 4
- start_ost || return 5
- mount_client $MOUNT || return 6
+ setup_noconfig
check_mount || return 7
LDLM_MDS="`do_facet mds lctl get_param -n ldlm_timeout`"
umount_client $MOUNT || error "Unable to unmount client"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
+ #writeconf to remove all ost2 traces for subsequent tests
writeconf
}
run_test 50f "normal statfs one server in down =========================="
stop_ost2 || error "Unable to stop OST2"
stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
+ #writeconf to remove all ost2 traces for subsequent tests
writeconf
}
run_test 50g "deactivated OST should not cause panic====================="
local LOCAL_TIMEOUT=20
reformat
- start_mds
- start_ost
- mount_client $MOUNT
+ setup_noconfig
check_mount || return 1
mkdir $MOUNT/d1
}
run_test 52 "check recovering objects from lost+found"
+# Checks threads_min/max/started for some service
+#
+# Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a
+# parameter pattern prefix like 'ost.*.ost'.
+thread_sanity() {
+ local modname=$1
+ local facet=$2
+ local parampat=$3
+ local opts=$4
+ local tmin
+ local tmin2
+ local tmax
+ local tmax2
+ local tstarted
+ local paramp
+ local msg="Insane $modname thread counts"
+ shift 4
+
+ setup
+ check_mount || return 41
+
+ # We need to expand $parampat, but it may match multiple parameters, so
+ # we'll pick the first one
+ paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1)
+ if [ -z "$paramp" ]; then
+ error "Couldn't expand ${parampat}.threads_min parameter name"
+ return 22
+ fi
+
+ # Remove the .threads_min part
+ paramp=${paramp%.threads_min}
+
+ # Check for sanity in defaults
+ tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
+ lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
+ lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= tmax ))' || return $?
+
+ # Check that we can lower min/max
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin - 1))"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - 10))"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 25 "$msg" '(($tmin2 == ($tmin - 1) && $tmax2 == ($tmax -10)))' || return $?
+
+ # Check that we can set min/max to the same value
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$tmin"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$tmin"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $?
+
+ # Check that we can't set max < min
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmin - 1))"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 27 "$msg" '(($tmin <= $tmax2))' || return $?
+
+ # We need to ensure that we get the module options desired; to do this
+ # we set LOAD_MODULES_REMOTE=true and we call setmodopts below.
+ LOAD_MODULES_REMOTE=true
+ cleanup
+ local oldvalue
+ setmodopts -a $modname "$opts" oldvalue
+
+ load_modules
+ setup
+ check_mount || return 41
+
+ # Restore previous setting of MODOPTS_*
+ setmodopts $modname "$oldvalue"
+
+ # Check that $opts took
+ tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min")
+ tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max")
+ tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started")
+ lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $?
+ cleanup
+
+ # Workaround a YALA bug where YALA expects that modules will remain
+ # loaded on the servers
+ LOAD_MODULES_REMOTE=false
+ load_modules
+ setup
+ cleanup
+}
+
+test_53a() {
+ thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads=64'
+}
+run_test 53a "check OSS thread count params"
+
+test_53b() {
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads=64'
+}
+run_test 53b "check MDT thread count params"
+
+run_llverfs()
+{
+ local dir=$1
+ local partial_arg=""
+ local size=$(df -B G $dir | tail -1 | awk '{print $2}' | sed 's/G//') # Gb
+
+ # Run in partial (fast) mode if the size
+ # of a partition > 10 GB
+ [ $size -gt 10 ] && partial_arg="-p"
+
+ llverfs $partial_arg $dir
+}
+
+test_54a() {
+ do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1)
+ [ $? -eq 0 ] || error "llverdev failed!"
+ reformat_and_config
+}
+run_test 54a "llverdev"
+
+test_54b() {
+ setup
+ run_llverfs $MOUNT
+ [ $? -eq 0 ] || error "llverfs failed!"
+ cleanup
+}
+run_test 54b "llverfs"
+
+lov_objid_size()
+{
+ local max_ost_index=$1
+ echo -n $(((max_ost_index + 1) * 8))
+}
+
+test_55() {
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+ local saved_opts=$OST_MKFS_OPTS
+
+ for i in 0 1023 2048
+ do
+ OST_MKFS_OPTS="$saved_opts --index $i"
+ reformat
+
+ setup_noconfig
+ stopall
+
+ setup
+ sync
+ echo checking size of lov_objid for ost index $i
+ LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
+ if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
+ error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
+ else
+ echo ok, lov_objid size is correct: $LOV_OBJID_SIZE
+ fi
+ stopall
+ done
+
+ OST_MKFS_OPTS=$saved_opts
+ reformat
+}
+run_test 55 "check lov_objid size"
+
+test_56() {
+ add mds1 $MDS_MKFS_OPTS --mkfsoptions='\"-J size=16\"' --reformat $(mdsdevname 1)
+ add ost1 $OST_MKFS_OPTS --index=1000 --reformat $(ostdevname 1)
+ add ost2 $OST_MKFS_OPTS --index=10000 --reformat $(ostdevname 2)
+
+ start_mds
+ start_ost
+ start_ost2 || error "Unable to start second ost"
+ mount_client $MOUNT || error "Unable to mount client"
+ echo ok
+ $LFS osts
+ stopall
+ reformat
+}
+run_test 56 "check big indexes"
+
+test_57() { # bug 22656
+ local NID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
+ writeconf
+ do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" || error "tunefs failed"
+ start_mgsmds
+ start_ost && error "OST registration from failnode should fail"
+ stop_mds
+ reformat
+}
+run_test 57 "initial registration from failnode should fail (should return errs)"
+
+count_osts() {
+ do_facet mgs $LCTL get_param mgs.MGS.live.$FSNAME | grep OST | wc -l
+}
+
+test_59() {
+ start_mgsmds >> /dev/null
+ local C1=$(count_osts)
+ if [ $C1 -eq 0 ]; then
+ start_ost >> /dev/null
+ C1=$(count_osts)
+ fi
+ stopall
+ echo "original ost count: $C1 (expect > 0)"
+ [ $C1 -gt 0 ] || error "No OSTs in $FSNAME log"
+ start_mgsmds -o writeconf >> /dev/null || error "MDT start failed"
+ local C2=$(count_osts)
+ echo "after mdt writeconf count: $C2 (expect 0)"
+ [ $C2 -gt 0 ] && error "MDT writeconf should erase OST logs"
+ echo "OST start without writeconf should fail:"
+ start_ost >> /dev/null && error "OST start without writeconf didn't fail"
+ echo "OST start with writeconf should succeed:"
+ start_ost -o writeconf >> /dev/null || error "OST1 start failed"
+ local C3=$(count_osts)
+ echo "after ost writeconf count: $C3 (expect 1)"
+ [ $C3 -eq 1 ] || error "new OST writeconf should add:"
+ start_ost2 -o writeconf >> /dev/null || error "OST2 start failed"
+ local C4=$(count_osts)
+ echo "after ost2 writeconf count: $C4 (expect 2)"
+ [ $C4 -eq 2 ] || error "OST2 writeconf should add log"
+ stop_ost2 >> /dev/null
+ cleanup_nocli >> /dev/null
+}
+run_test 59 "writeconf mount option"
+
+
+if ! combined_mgs_mds ; then
+ stop mgs
+fi
cleanup_gss
equals_msg `basename $0`: test complete
[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true