ONLY=${ONLY:-"$*"}
# These tests don't apply to mountconf
-# xml xml xml xml xml xml dumb FIXME
-MOUNTCONFSKIP="10 11 12 13 13b 14 15 18"
+MOUNTCONFSKIP="9 10 11 12 13 13b 14 15 18"
# bug number for skipped test:
-ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $MOUNTCONFSKIP"
+ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $MOUNTCONFSKIP 16 23"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=`dirname $0`
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
+PTLDEBUG=${PTLDEBUG:--1}
LUSTRE=${LUSTRE:-`dirname $0`/..}
RLUSTRE=${RLUSTRE:-$LUSTRE}
+MOUNTLUSTRE=${MOUNTLUSTRE:-/sbin/mount.lustre}
+MKFSLUSTRE=${MKFSLUSTRE:-/usr/sbin/mkfs.lustre}
HOSTNAME=`hostname`
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+. ${CONFIG:=$LUSTRE/tests/cfg/local.sh}
reformat() {
formatall
gen_config
+init_krb5_env
test_0() {
setup
test_24a() {
local fs2mds_HOST=$mds_HOST
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --nomgs --mgsnode=$MGSNID --reformat ${MDSDEV}_2 || exit 10
+ # test 8-char fsname as well
+ local FSNAME2=test1234
+ add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --nomgs --mgsnode=$MGSNID --reformat ${MDSDEV}_2 || exit 10
local fs2ost_HOST=$ost_HOST
local fs2ostdev=$(ostdevname 1)_2
- add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME}2 --reformat $fs2ostdev || exit 10
+ add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2ostdev || exit 10
setup
start fs2mds ${MDSDEV}_2 $MDS_MOUNT_OPTS
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
mkdir -p $MOUNT2
- mount -t lustre $MGSNID:/${FSNAME}2 $MOUNT2 || return 1
+ mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
# 1 still works
check_mount || return 2
# files written on 1 should not show up on 2
sleep 10
[ -e $MOUNT2/$tfile ] && error "File bleed" && return 7
# 2 should work
+ sleep 5
cp /etc/passwd $MOUNT2/b || return 3
rm $MOUNT2/b || return 4
# 2 is actually mounted
umount_client $MOUNT
# the MDS must remain up until last MDT
stop_mds
- MDS=$(awk '($3 ~ "mdt" && $4 ~ "MDS") { print $4 }' $LPROC/devices)
- [ -z "$MDS" ] && error "No MDS" && return 8
+ MDS=$(awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' $LPROC/devices)
+ [ -z "$MDS" ] && error "No MDT" && return 8
umount $MOUNT2
stop fs2mds -f
stop fs2ost -f
# we need modules before mount for sysctl, so make sure...
[ -z "$(lsmod | grep lustre)" ] && modprobe lustre
#define OBD_FAIL_MDS_FS_SETUP 0x135
- sysctl -w lustre.fail_loc=0x80000135
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000135"
start_mds && echo MDS started && return 1
cat $LPROC/devices
DEVS=$(cat $LPROC/devices | wc -l)
test_27b() {
setup
facet_failover mds
- set_and_check "cat $LPROC/mds/$FSNAME-MDT0000/group_acquire_expire" "$FSNAME-MDT0000.mdt.group_acquire_expire" || return 3
- set_and_check "cat $LPROC/mdc/$FSNAME-MDT0000-mdc-*/max_rpcs_in_flight" "$FSNAME-MDT0000.mdc.max_rpcs_in_flight" || return 4
+ set_and_check "cat $LPROC/mdt/$FSNAME-MDT0000/identity_acquire_expire" "$FSNAME-MDT0000.mdt.identity_acquire_expire" || return 3
+ set_and_check "cat $LPROC/mdc/$FSNAME-MDT0000-mdc-*/max_rpcs_in_flight" "$FSNAME-MDT0000.mdc.max_rpcs_in_flight" || return 4
cleanup
}
run_test 27b "Reacquire MGS lock after failover"
sleep 10
local PARAM="$FSNAME-OST0001.osc.active"
- local PROC_ACT="$LPROC/osc/$FSNAME-OST0001-osc-*/active"
- local PROC_UUID="$LPROC/osc/$FSNAME-OST0001-osc-*/ost_server_uuid"
+ local PROC_ACT="$LPROC/osc/$FSNAME-OST0001-osc-[^M]*/active"
+ local PROC_UUID="$LPROC/osc/$FSNAME-OST0001-osc-[^M]*/ost_server_uuid"
if [ ! -r $PROC_ACT ]; then
echo "Can't read $PROC_ACT"
ls $LPROC/osc/$FSNAME-*
fi
# check MDT too
- local MPROC="$LPROC/osc/$FSNAME-OST0001-osc/active"
+ local MPROC="$LPROC/osc/$FSNAME-OST0001-osc-[M]*/active"
if [ -r $MPROC ]; then
RESULT=$(cat $MPROC)
if [ $RESULT -ne $DEAC ]; then
}
run_test 31 "Connect to non-existent node (shouldn't crash)"
+test_32a() {
+ [ -z "$TUNEFS" ] && echo "No tunefs" && return
+ [ ! -r disk1_4.zip ] && echo "Cant find disk1_4.zip, skipping" && return
+ unzip -o -j -d $TMP/$tdir disk1_4.zip || { echo "Cant unzip disk1_4, skipping" && return ; }
+ load_modules
+ sysctl lnet.debug=$PTLDEBUG
+
+ $TUNEFS $TMP/$tdir/mds || error "tunefs failed"
+ # nids are wrong, so client wont work, but server should start
+ start mds $TMP/$tdir/mds "-o loop,exclude=lustre-OST0000" || return 3
+ local UUID=$(cat $LPROC/mds/lustre-MDT0000/uuid)
+ echo MDS uuid $UUID
+ [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+
+ $TUNEFS --mgsnode=`hostname` $TMP/$tdir/ost1 || error "tunefs failed"
+ start ost1 $TMP/$tdir/ost1 "-o loop" || return 5
+ UUID=$(cat $LPROC/obdfilter/lustre-OST0000/uuid)
+ echo OST uuid $UUID
+ [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+
+ local NID=$($LCTL list_nids | head -1)
+
+ echo "OSC changes should return err:"
+ $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 && return 7
+ $LCTL conf_param lustre-OST0000.failover.node=$NID && return 8
+ echo "ok."
+ echo "MDC changes should succeed:"
+ $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
+ $LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
+ echo "ok."
+
+ # With a new good MDT failover nid, we should be able to mount a client
+ # (but it cant talk to OST)
+ local OLDMOUNTOPT=$MOUNTOPT
+ MOUNTOPT="exclude=lustre-OST0000"
+ mount_client $MOUNT
+ MOUNTOPT=$OLDMOUNTOPT
+ set_and_check "cat $LPROC/mdc/*/max_rpcs_in_flight" "lustre-MDT0000.mdc.max_rpcs_in_flight" || return 11
+
+ zconf_umount `hostname` $MOUNT -f
+ cleanup_nocli
+
+ # mount a second time to make sure we didnt leave upgrade flag on
+ $TUNEFS --dryrun $TMP/$tdir/mds || error "tunefs failed"
+ start mds $TMP/$tdir/mds "-o loop,exclude=lustre-OST0000" || return 12
+ cleanup_nocli
+
+ [ -d $TMP/$tdir ] && rm -rf $TMP/$tdir
+}
+run_test 32a "Upgrade from 1.4 (not live)"
+
+test_32b() {
+ [ -z "$TUNEFS" ] && echo "No tunefs" && return
+ [ ! -r disk1_4.zip ] && echo "Cant find disk1_4.zip, skipping" && return
+ unzip -o -j -d $TMP/$tdir disk1_4.zip || { echo "Cant unzip disk1_4, skipping" && return ; }
+ load_modules
+ sysctl lnet.debug=$PTLDEBUG
+
+ # writeconf will cause servers to register with their current nids
+ $TUNEFS --writeconf $TMP/$tdir/mds || error "tunefs failed"
+ start mds $TMP/$tdir/mds "-o loop" || return 3
+ local UUID=$(cat $LPROC/mds/lustre-MDT0000/uuid)
+ echo MDS uuid $UUID
+ [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+
+ $TUNEFS --mgsnode=`hostname` $TMP/$tdir/ost1 || error "tunefs failed"
+ start ost1 $TMP/$tdir/ost1 "-o loop" || return 5
+ UUID=$(cat $LPROC/obdfilter/lustre-OST0000/uuid)
+ echo OST uuid $UUID
+ [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+
+ echo "OSC changes should succeed:"
+ $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
+ $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
+ echo "ok."
+ echo "MDC changes should succeed:"
+ $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
+ echo "ok."
+
+ # MDT and OST should have registered with new nids, so we should have
+ # a fully-functioning client
+ echo "Check client and old fs contents"
+ mount_client $MOUNT
+ set_and_check "cat $LPROC/mdc/*/max_rpcs_in_flight" "lustre-MDT0000.mdc.max_rpcs_in_flight" || return 11
+ [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "2479747619 779" ] || return 12
+ echo "ok."
+
+ cleanup
+ [ -d $TMP/$tdir ] && rm -rf $TMP/$tdir
+}
+run_test 32b "Upgrade from 1.4 with writeconf"
+
+test_33() { # bug 12333
+ local FSNAME2=test1234
+ local fs2mds_HOST=$mds_HOST
+ local fs2ost_HOST=$ost_HOST
+ local fs2mdsdev=${MDSDEV}_2
+ local fs2ostdev=$(ostdevname 1)_2
+ add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
+ add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --index=8191 --mgsnode=`hostname`@tcp --reformat $fs2ostdev || exit 10
+
+ start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
+ start fs2ost $fs2ostdev $OST_MOUNT_OPTS
+ mkdir -p $MOUNT2
+ mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || return 1
+ echo "ok."
+
+ umount -d $MOUNT2
+ stop fs2ost -f
+ stop fs2mds -f
+ rm -rf $MOUNT2 $fs2mdsdev $fs2ostdev
+ cleanup_nocli || return 6
+}
+run_test 33 "Mount ost with a large index number"
+
umount_client $MOUNT
cleanup_nocli
+cleanup_krb5_env
equals_msg "Done"
echo "$0: completed"