ONLY=${ONLY:-"$*"}
-# bug number for skipped test: 13739
-HEAD_EXCEPT=" 32a 32b "
-
-# bug number for skipped test:
-ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $HEAD_EXCEPT"
+# bug number for skipped test:
+ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=`dirname $0`
OSTSIZE=40000
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
+remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
+
#
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a "
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a 45"
assert_DIR
}
writeconf() {
- local facet=$SINGLEMDS
- local dev=${facet}_dev
- shift
- stop ${facet} -f
- rm -f ${facet}active
- # who knows if/where $TUNEFS is installed? Better reformat if it fails...
- do_facet ${facet} "$TUNEFS --writeconf ${!dev}" || echo "tunefs failed, reformatting instead" && reformat
+ local facet=$SINGLEMDS
+ local dev=${facet}_dev
+ shift
+ stop ${facet} -f
+ rm -f ${facet}active
+ # who knows if/where $TUNEFS is installed? Better reformat if it fails...
+ do_facet ${facet} "$TUNEFS --writeconf ${!dev}" || echo "tunefs failed, reformatting instead" && reformat
+
+ gen_config
}
gen_config() {
- reformat
- # The MGS must be started before the OSTs for a new fs, so start
- # and stop to generate the startup logs.
+ # The MGS must be started before the OSTs for a new fs, so start
+ # and stop to generate the startup logs.
start_mds
start_ost
- sleep 5
+ wait_osc_import_state mds ost FULL
stop_ost
stop_mds
}
+reformat_and_config() {
+ reformat
+ gen_config
+}
+
start_mds() {
local facet=$SINGLEMDS
# we can not use MDSDEV1 here because SINGLEMDS could be set not to mds1 only
}
manual_umount_client(){
+ local rc
+ local FORCE=$1
echo "manual umount lustre on ${MOUNT}...."
- do_facet client "umount -d $MOUNT"
+ do_facet client "umount -d ${FORCE} $MOUNT"
+ rc=$?
+ return $rc
}
setup() {
mount_client $MOUNT
}
+setup_noconfig() {
+ start_mds
+ start_ost
+ mount_client $MOUNT
+}
+
cleanup_nocli() {
stop_mds || return 201
stop_ost || return 202
check_mount() {
do_facet client "cp /etc/passwd $DIR/a" || return 71
do_facet client "rm $DIR/a" || return 72
- # make sure lustre is actually mounted (touch will block,
- # but grep won't, so do it after)
+ # make sure lustre is actually mounted (touch will block,
+ # but grep won't, so do it after)
do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
echo "setup single mount lustre success"
}
check_mount2() {
- do_facet client "touch $DIR/a" || return 71
- do_facet client "rm $DIR/a" || return 72
- do_facet client "touch $DIR2/a" || return 73
- do_facet client "rm $DIR2/a" || return 74
+ do_facet client "touch $DIR/a" || return 71
+ do_facet client "rm $DIR/a" || return 72
+ do_facet client "touch $DIR2/a" || return 73
+ do_facet client "rm $DIR2/a" || return 74
echo "setup double mount lustre success"
}
exit
fi
-#create single point mountpoint
+init_gss
-gen_config
+#create single point mountpoint
-init_gss
+reformat_and_config
test_0() {
setup
do_facet ost1 lctl set_param subsystem_debug=\'mds ost\' || return 1
CHECK_PTLDEBUG="`do_facet ost1 lctl get_param -n debug`"
- if [ "$CHECK_PTLDEBUG" ] && [ "$CHECK_PTLDEBUG" = "trace inode" ];then
+ if [ "$CHECK_PTLDEBUG" ] && { \
+ [ "$CHECK_PTLDEBUG" = "trace inode warning error emerg console" ] ||
+ [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
echo "lnet.debug success"
else
echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
cleanup || return $?
log "read the mode of OBJECTS and check if they has been changed properly"
- EXPECTEDOBJECTSMODE=`do_facet $SINGLEMDS "debugfs -R 'stat OBJECTS' $MDSDEV 2> /dev/null" | grep 'Mode: ' | sed -e "s/.*Mode: *//" -e "s/ *Flags:.*//"`
+ EXPECTEDOBJECTSMODE=`do_facet $SINGLEMDS "$DEBUGFS -R 'stat OBJECTS' $MDSDEV 2> /dev/null" | grep 'Mode: ' | sed -e "s/.*Mode: *//" -e "s/ *Flags:.*//"`
if [ "$EXPECTEDOBJECTSMODE" = "0777" ]; then
log "Success:Lustre change the mode of OBJECTS correctly"
fi
echo "Remove mds config log"
- do_facet $SINGLEMDS "debugfs -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MDSDEV || return \$?" || return $?
+ do_facet $SINGLEMDS "$DEBUGFS -w -R 'unlink CONFIGS/$FSNAME-MDT0000' $MDSDEV || return \$?" || return $?
start_ost
start_mds && return 42
- gen_config
+ reformat_and_config
}
run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
MDS_MKFS_OPTS="--mgs --mdt --fsname=$FSNAME --device-size=$myMDSSIZE --param sys.timeout=$TIMEOUT $MDSOPT"
- gen_config
+ reformat_and_config
echo "mount lustre system..."
setup
check_mount || return 41
echo "check journal size..."
- local FOUNDSIZE=`do_facet mds "debugfs -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}'`
+ local FOUNDSIZE=`do_facet mds "$DEBUGFS -c -R 'stat <8>' $MDSDEV" | awk '/Size: / { print $NF; exit;}'`
if [ $FOUNDSIZE -gt $((32 * 1024 * 1024)) ]; then
log "Success: mkfs creates large journals. Size: $((FOUNDSIZE >> 20))M"
else
cleanup || return $?
MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
- gen_config
+ reformat_and_config
}
run_test 18 "check mkfs creates large journals"
test_21a() {
start_mds
start_ost
+ wait_osc_import_state mds ost FULL
stop_ost
stop_mds
}
test_21b() {
start_ost
start_mds
+ wait_osc_import_state mds ost FULL
stop_mds
stop_ost
}
start_ost
start_mds
start_ost2
+ wait_osc_import_state mds ost2 FULL
stop_ost
stop_ost2
stop_mds
+ #writeconf to remove all ost2 traces for subsequent tests
+ writeconf
}
run_test 21c "start mds between two osts, stop mds last"
test_22() {
- #reformat to remove all logs
- reformat
start_mds
- echo Client mount before any osts are in the logs
- mount_client $MOUNT
- check_mount && return 41
- pass
echo Client mount with ost in logs, but none running
start_ost
+ # wait until mds connected to ost and open client connection
+ wait_osc_import_state mds ost FULL
stop_ost
mount_client $MOUNT
# check_mount will block trying to contact ost
test_23a() { # was test_23
setup
# fail mds
- stop $SINGLEMDS
+ stop $SINGLEMDS
# force down client so that recovering mds waits for reconnect
local running=$(grep -c $MOUNT /proc/mounts) || true
if [ $running -ne 0 ]; then
}
test_24a() {
- #set up fs1
+ #set up fs1
gen_config
#set up fs2
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
rm $MOUNT2/b || return 4
# 2 is actually mounted
grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
- # failover
+ # failover
facet_failover fs2mds
facet_failover fs2ost
df
- umount_client $MOUNT
+ umount_client $MOUNT
# the MDS must remain up until last MDT
stop_mds
MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
+ add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
setup
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
cleanup || return 6
local myfacet=$1
local TEST=$2
local PARAM=$3
- local ORIG=$(do_facet $myfacet "$TEST")
+ local ORIG=$(do_facet $myfacet "$TEST")
if [ $# -gt 3 ]; then
local FINAL=$4
else
FINAL=$(($ORIG + 5))
fi
echo "Setting $PARAM from $ORIG to $FINAL"
- do_facet $SINGLEMDS "$LCTL conf_param $PARAM=$FINAL" || error conf_param failed
- local RESULT
- local MAX=90
- local WAIT=0
- while [ 1 ]; do
- sleep 5
- RESULT=$(do_facet $myfacet "$TEST")
- if [ $RESULT -eq $FINAL ]; then
- echo "Updated config after $WAIT sec (got $RESULT)"
- break
- fi
- WAIT=$((WAIT + 5))
- if [ $WAIT -eq $MAX ]; then
- echo "Config update not seen: wanted $FINAL got $RESULT"
- return 3
- fi
- echo "Waiting $(($MAX - $WAIT)) secs for config update"
- done
+ do_facet $SINGLEMDS "$LCTL conf_param $PARAM='$FINAL'" || error conf_param failed
+
+ wait_update $(facet_host $myfacet) "$TEST" "$FINAL" || error check failed!
}
test_27a() {
start_ost || return 1
start_mds || return 2
- echo "Requeue thread should have started: "
- ps -e | grep ll_cfg_requeue
+ echo "Requeue thread should have started: "
+ ps -e | grep ll_cfg_requeue
set_and_check ost1 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
cleanup_nocli
}
echo "Live client success: got $RESULT"
fi
- # check MDT too
+ # check MDT too
local MPROC="osc.$FSNAME-OST0001-osc-[M]*.active"
local MAX=30
local WAIT=0
cleanup_nocli
#writeconf to remove all ost2 traces for subsequent tests
writeconf
- start_mds
- start_ost
- cleanup
}
run_test 29 "permanently remove an OST"
for i in ${LIST[@]}; do
set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
done
- # make sure client restart still works
+ # make sure client restart still works
umount_client $MOUNT
mount_client $MOUNT || return 4
- [ "$($TEST)" -ne "$i" ] && return 5
+ [ "$($TEST)" -ne "$i" ] && return 5
set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $ORIG || return 6
cleanup
}
}
run_test 31 "Connect to non-existent node (shouldn't crash)"
+# Use these start32/stop32 fn instead of t-f start/stop fn,
+# for local devices, to skip global facet vars init
+stop32 () {
+ local facet=$1
+ shift
+ echo "Stopping local ${MOUNT%/*}/${facet} (opts:$@)"
+ umount -d $@ ${MOUNT%/*}/${facet}
+ losetup -a
+}
+
+start32 () {
+ local facet=$1
+ shift
+ local device=$1
+ shift
+ mkdir -p ${MOUNT%/*}/${facet}
+
+ echo "Starting local ${facet}: $@ $device ${MOUNT%/*}/${facet}"
+ mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
+ local RC=$?
+ if [ $RC -ne 0 ]; then
+ echo "mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}"
+ echo "Start of ${device} of local ${facet} failed ${RC}"
+ fi
+ losetup -a
+ return $RC
+}
+
+cleanup_nocli32 () {
+ stop32 mds1 -f
+ stop32 ost1 -f
+ wait_exit_ST client
+}
+
+cleanup_32() {
+ trap 0
+ echo "Cleanup test_32 umount $MOUNT ..."
+ umount -f $MOUNT || true
+ echo "Cleanup local mds ost1 ..."
+ cleanup_nocli32
+ unload_modules
+}
+
test_32a() {
- # XXX - make this test verify 1.8 -> 2.0 upgrade is working
- # XXX - make this run on client-only systems with real hardware on
- # the OST and MDT
- # there appears to be a lot of assumption here about loopback
- # devices
- # or maybe this test is just totally useless on a client-only system
+ # this test is totally useless on a client-only system
+ [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ "$mds_HOST" = "`hostname`" ] || { skip "remote MDS" && return 0; }
- [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
- { skip "remote OST" && return 0; }
+ [ -z "$TUNEFS" ] && skip "No tunefs" && return 0
- [ -z "$TUNEFS" ] && skip "No tunefs" && return
- local DISK1_8=$LUSTRE/tests/disk1_8.tgz
+ local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
[ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
-
- mkdir -p $TMP/$tdir
- tar xjvf $DISK1_8 -C $TMP/$tdir || \
+ local tmpdir=$TMP/conf32a
+ mkdir -p $tmpdir
+ tar xjvf $DISK1_8 -C $tmpdir || \
{ skip "Cannot untar $DISK1_8" && return 0; }
load_modules
- lctl set_param debug=$PTLDEBUG
+ $LCTL set_param debug=$PTLDEBUG
$TUNEFS $tmpdir/mds || error "tunefs failed"
+
# nids are wrong, so client wont work, but server should start
- start mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" || return 3
- local UUID=$(lctl get_param -n mdt.lustre-MDT0000.uuid)
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ trap cleanup_32 EXIT INT || return 3
+
+ local UUID=$($LCTL get_param -n mdt.lustre-MDT0000.uuid)
echo MDS uuid $UUID
- [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` $tmpdir/ost1 || error "tunefs failed"
- start ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.lustre-OST0000.uuid)
+ $TUNEFS --mgsnode=$HOSTNAME $tmpdir/ost1 || error "tunefs failed"
+ start32 ost1 $tmpdir/ost1 "-o loop" || return 5
+ UUID=$($LCTL get_param -n obdfilter.lustre-OST0000.uuid)
echo OST uuid $UUID
- [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-OST0000_UUID" ] || error "UUID is wrong: $UUID"
local NID=$($LCTL list_nids | head -1)
- echo "OSC changes should return err:"
- $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 && return 7
- $LCTL conf_param lustre-OST0000.failover.node=$NID && return 8
+ echo "OSC changes should return err:"
+ $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
+ $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
+
echo "ok."
- echo "MDC changes should succeed:"
+ echo "MDC changes should succeed:"
$LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
$LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
echo "ok."
- # With a new good MDT failover nid, we should be able to mount a client
- # (but it cant talk to OST)
- local OLDMOUNTOPT=$MOUNTOPT
- MOUNTOPT="exclude=lustre-OST0000"
- mount_client $MOUNT
- MOUNTOPT=$OLDMOUNTOPT
- set_and_check client "lctl get_param -n mdc.*.max_rpcs_in_flight" "lustre-MDT0000.mdc.max_rpcs_in_flight" ||
- return 11
-
- zconf_umount `hostname` $MOUNT -f
- cleanup_nocli
- load_modules
+ cleanup_32
- # mount a second time to make sure we didnt leave upgrade flag on
+ # mount a second time to make sure we didnt leave upgrade flag on
load_modules
$TUNEFS --dryrun $tmpdir/mds || error "tunefs failed"
- load_modules
- start mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" || return 12
- cleanup_nocli
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ trap cleanup_32 EXIT INT || return 12
+
+ cleanup_32
rm -rf $tmpdir || true # true is only for TMP on NFS
}
run_test 32a "Upgrade from 1.8 (not live)"
test_32b() {
- # XXX - make this test verify 1.8 -> 2.0 upgrade is working
- # XXX - make this run on client-only systems with real hardware on
- # the OST and MDT
- # there appears to be a lot of assumption here about loopback
- # devices
- # or maybe this test is just totally useless on a client-only system
+ # this test is totally useless on a client-only system
+ [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ "$mds_HOST" = "`hostname`" ] || { skip "remote MDS" && return 0; }
- [ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
- { skip "remote OST" && return 0; }
-
[ -z "$TUNEFS" ] && skip "No tunefs" && return
- local DISK1_8=$LUSTRE/tests/disk1_8.tgz
+
+ local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
[ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
- mkdir -p $TMP/$tdir
- tar xjvf $DISK1_8 -C $TMP/$tdir || \
+ local tmpdir=$TMP/conf32b
+ mkdir -p $tmpdir
+ tar xjvf $DISK1_8 -C $tmpdir || \
{ skip "Cannot untar $DISK1_8" && return ; }
load_modules
- lctl set_param debug=$PTLDEBUG
- NEWNAME=sofia
+ $LCTL set_param debug=$PTLDEBUG
+ local NEWNAME=lustre
# writeconf will cause servers to register with their current nids
$TUNEFS --writeconf --fsname=$NEWNAME $tmpdir/mds || error "tunefs failed"
- start mds $tmpdir/mds "-o loop" || return 3
- local UUID=$(lctl get_param -n mdt.${NEWNAME}-MDT0000.uuid)
+ start32 mds1 $tmpdir/mds "-o loop" && \
+ trap cleanup_32 EXIT INT || return 3
+
+ local UUID=$($LCTL get_param -n mdt.${NEWNAME}-MDT0000.uuid)
echo MDS uuid $UUID
- [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` --fsname=$NEWNAME --writeconf $tmpdir/ost1 || error "tunefs failed"
- start ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
+ $TUNEFS --mgsnode=$HOSTNAME --writeconf --fsname=$NEWNAME $tmpdir/ost1 || error "tunefs failed"
+ start32 ost1 $tmpdir/ost1 "-o loop" || return 5
+ UUID=$($LCTL get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
echo OST uuid $UUID
- [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "${NEWNAME}-OST0000_UUID" ] || error "UUID is wrong: $UUID"
+
+ local NID=$($LCTL list_nids | head -1)
+
+ echo "OSC changes should succeed:"
- echo "OSC changes should succeed:"
$LCTL conf_param ${NEWNAME}-OST0000.osc.max_dirty_mb=15 || return 7
$LCTL conf_param ${NEWNAME}-OST0000.failover.node=$NID || return 8
+
echo "ok."
- echo "MDC changes should succeed:"
+ echo "MDC changes should succeed:"
$LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
echo "ok."
# MDT and OST should have registered with new nids, so we should have
# a fully-functioning client
echo "Check client and old fs contents"
- OLDFS=$FSNAME
- FSNAME=$NEWNAME
- mount_client $MOUNT
- FSNAME=$OLDFS
- set_and_check client "lctl get_param -n mdc.*.max_rpcs_in_flight" "${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight" || return 11
- [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "2479747619 779" ] || return 12
+
+ local device=`h2$NETTYPE $HOSTNAME`:/$NEWNAME
+ echo "Starting local client: $HOSTNAME: $device $MOUNT"
+ mount -t lustre $device $MOUNT || return 1
+
+ local old=$($LCTL get_param -n mdc.*.max_rpcs_in_flight)
+ local new=$((old + 5))
+ $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
+ wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+
+ [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "94306271 1478" ] || return 12
echo "ok."
- cleanup
+ cleanup_32
+
rm -rf $tmpdir || true # true is only for TMP on NFS
}
run_test 32b "Upgrade from 1.8 with writeconf"
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
+ add fs2mds $MDS_MKFS_OPTS --mkfsoptions='\"-J size=8\"' --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --index=8191 --mgsnode=$MGSNID --reformat $fs2ostdev || exit 10
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
echo "ok."
- cp /etc/hosts $MOUNT2/ || rc=3
+ cp /etc/hosts $MOUNT2/ || rc=3
$LFS getstripe $MOUNT2/hosts
umount -d $MOUNT2
# contact after the connection loss
$LCTL dk $TMP/lustre-log-$TESTNAME.log
NEXTCONN=`awk "/${MSG}/ {start = 1;}
- /import_select_connection.$device-mdc.* using connection/ {
+ /import_select_connection.*$device-mdc.* using connection/ {
if (start) {
if (\\\$NF ~ /$FAKENID/)
print \\\$NF;
ALLOWANCE=$((64 * $OSTCOUNT))
- if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
+ if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
[ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
rc=1
fi
- if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
+ if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
[ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
rc=2
fi
- if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
+ if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
[ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
rc=3
local dev=${SINGLEMDS}_dev
local MDSDEV=${!dev}
- do_facet $SINGLEMDS "debugfs -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
- do_facet $SINGLEMDS "debugfs -w -R \\\"rm lov_objid\\\" $MDSDEV"
+ do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
+ do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.orig"
# check create in mds_lov_connect
[ $V ] && log "verifying $DIR/$tdir/$f"
diff -q $f $DIR/$tdir/$f || ERROR=y
done
- do_facet $SINGLEMDS "debugfs -c -R \\\"dump lov_objid $TMP/lov_objid.new\\\" $MDSDEV"
+ do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new\\\" $MDSDEV"
do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new"
[ "$ERROR" = "y" ] && error "old and new files are different after connect" || true
stop_mds
do_facet $SINGLEMDS dd if=/dev/zero of=$TMP/lov_objid.clear bs=4096 count=1
- do_facet $SINGLEMDS "debugfs -w -R \\\"rm lov_objid\\\" $MDSDEV"
- do_facet $SINGLEMDS "debugfs -w -R \\\"write $TMP/lov_objid.clear lov_objid\\\" $MDSDEV "
+ do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
+ do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"write $TMP/lov_objid.clear lov_objid\\\" $MDSDEV "
start_mds
mount_client $MOUNT
[ $V ] && log "verifying $DIR/$tdir/$f"
diff -q $f $DIR/$tdir/$f || ERROR=y
done
- do_facet $SINGLEMDS "debugfs -c -R \\\"dump lov_objid $TMP/lov_objid.new1\\\" $MDSDEV"
+ do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.new1\\\" $MDSDEV"
do_facet $SINGLEMDS "od -Ax -td8 $TMP/lov_objid.new1"
umount_client $MOUNT
stop_mds
PTLDEBUG=+malloc
setup
cleanup
- perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
+ perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
error "memory leak detected" || true
}
run_test 39 "leak_finder recognizes both LUSTRE and LNET malloc messages"
start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
start ost1 `ostdevname 1` $OST_MOUNT_OPTS
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs
+ start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
mkdir -p $MOUNT
mount_client $MOUNT || return 1
sleep 5
}
run_test 42 "invalid config param should not prevent client from mounting"
+test_43() {
+ [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip "run as root"
+ setup
+ chmod ugo+x $DIR || error "chmod 0 failed"
+ set_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "0:0"
+ set_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME.mdt.nosquash_nids" \
+ "NONE"
+
+ #
+ # create set of test files
+ #
+ echo "111" > $DIR/$tfile-userfile || error "write 1 failed"
+ chmod go-rw $DIR/$tfile-userfile || error "chmod 1 failed"
+ chown $RUNAS_ID.$RUNAS_ID $DIR/$tfile-userfile || error "chown failed"
+
+ echo "222" > $DIR/$tfile-rootfile || error "write 2 failed"
+ chmod go-rw $DIR/$tfile-rootfile || error "chmod 2 faield"
+
+ mkdir $DIR/$tdir-rootdir -p || error "mkdir failed"
+ chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
+ touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
+
+ #
+ # check root_squash:
+ # set root squash UID:GID to RUNAS_ID
+ # root should be able to access only files owned by RUNAS_ID
+ #
+ set_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "$RUNAS_ID:$RUNAS_ID"
+
+ ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
+ dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
+ error "$ST: root read permission is denied"
+ echo "$ST: root read permission is granted - ok"
+
+ echo "444" | \
+ dd conv=notrunc if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
+ error "$ST: root write permission is denied"
+ echo "$ST: root write permission is granted - ok"
+
+ ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
+ dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
+ error "$ST: root read permission is granted"
+ echo "$ST: root read permission is denied - ok"
+
+ echo "555" | \
+ dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null && \
+ error "$ST: root write permission is granted"
+ echo "$ST: root write permission is denied - ok"
+
+ ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
+ rm $DIR/$tdir-rootdir/tfile-1 1>/dev/null 2>/dev/null && \
+ error "$ST: root unlink permission is granted"
+ echo "$ST: root unlink permission is denied - ok"
+
+ touch $DIR/tdir-rootdir/tfile-2 1>/dev/null 2>/dev/null && \
+ error "$ST: root create permission is granted"
+ echo "$ST: root create permission is denied - ok"
+
+ #
+ # check nosquash_nids:
+ # put client's NID into nosquash_nids list,
+ # root should be able to access root file after that
+ #
+ local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
+ NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
+ NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
+ set_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME-MDTall.mdt.nosquash_nids" \
+ "$NIDLIST"
+
+ ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
+ dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
+ error "$ST: root read permission is denied"
+ echo "$ST: root read permission is granted - ok"
+
+ echo "666" | \
+ dd conv=notrunc of=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
+ error "$ST: root write permission is denied"
+ echo "$ST: root write permission is granted - ok"
+
+ ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tdir-rootdir)
+ rm $DIR/$tdir-rootdir/tfile-1 || \
+ error "$ST: root unlink permission is denied"
+ echo "$ST: root unlink permission is granted - ok"
+ touch $DIR/$tdir-rootdir/tfile-2 || \
+ error "$ST: root create permission is denied"
+ echo "$ST: root create permission is granted - ok"
+
+ return 0
+}
+run_test 43 "check root_squash and nosquash_nids"
+
umount_client $MOUNT
cleanup_nocli
-cleanup_gss
+test_44() { # 16317
+ setup
+ check_mount || return 2
+ UUID=$($LCTL get_param llite.${FSNAME}*.uuid | cut -d= -f2)
+ STATS_FOUND=no
+ UUIDS=$(do_facet mds "$LCTL get_param mdt.${FSNAME}*.exports.*.uuid")
+ for VAL in $UUIDS; do
+ NID=$(echo $VAL | cut -d= -f1)
+ CLUUID=$(echo $VAL | cut -d= -f2)
+ [ "$UUID" = "$CLUUID" ] && STATS_FOUND=yes && break
+ done
+ [ "$STATS_FOUND" = "no" ] && error "stats not found for client"
+ cleanup
+ return 0
+}
+run_test 44 "mounted client proc entry exists"
+
+test_45() { #17310
+ setup
+ check_mount || return 2
+ stop_mds
+ df -h $MOUNT &
+ log "sleep 60 sec"
+ sleep 60
+#define OBD_FAIL_PTLRPC_LONG_UNLINK 0x50f
+ do_facet client "lctl set_param fail_loc=0x50f"
+ log "sleep 10 sec"
+ sleep 10
+ manual_umount_client --force || return 3
+ do_facet client "lctl set_param fail_loc=0x0"
+ start_mds
+ mount_client $MOUNT || return 4
+ cleanup
+ return 0
+}
+run_test 45 "long unlink handling in ptlrpcd"
+
+cleanup_46a() {
+ trap 0
+ local rc=0
+ local count=5
+
+ umount_client $MOUNT2 || rc=$?
+ umount_client $MOUNT || rc=$?
+ while [ $count -gt 0 ]; do
+ stop ost${count} -f || rc=$?
+ let count=count-1
+ done
+ stop_mds || rc=$?
+ # writeconf is needed after the test, otherwise,
+ # we might end up with extra OSTs
+ writeconf || rc=$?
+ cleanup_nocli || rc=$?
+ return $rc
+}
+
+test_46a() {
+ [ $OSTCOUNT -lt 5 ] && skip "skipping test for too few OSTs" && return
+ reformat
+ start_mds || return 1
+ #first client should see only one ost
+ start_ost || return 2
+ wait_osc_import_state mds ost FULL
+ #start_client
+ mount_client $MOUNT || return 3
+ trap cleanup_46a EXIT ERR
+
+ start_ost2 || return 4
+ start ost3 `ostdevname 3` $OST_MOUNT_OPTS || return 5
+ start ost4 `ostdevname 4` $OST_MOUNT_OPTS || return 6
+ start ost5 `ostdevname 5` $OST_MOUNT_OPTS || return 7
+ # wait until ost2-5 is sync
+ # ping_interval + 1
+ wait_osc_import_state mds ost2 FULL
+ wait_osc_import_state mds ost3 FULL
+ wait_osc_import_state mds ost4 FULL
+ wait_osc_import_state mds ost5 FULL
+ #second client see all ost's
+
+ mount_client $MOUNT2 || return 8
+ $LFS setstripe $MOUNT2 -c -1 || return 9
+ $LFS getstripe $MOUNT2 || return 10
+
+ echo "ok" > $MOUNT2/widestripe
+ $LFS getstripe $MOUNT2/widestripe || return 11
+ # fill acl buffer for avoid expand lsm to them
+ awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
+ setfacl -m $acl $MOUNT2/widestripe
+ done
+
+ # will be deadlock
+ stat $MOUNT/widestripe || return 12
+
+ cleanup_46a || { echo "cleanup_46a failed!" && return 13; }
+ return 0
+}
+run_test 46a "handle ost additional - wide striped file"
+
+test_47() { #17674
+ reformat
+ setup_noconfig
+ check_mount || return 2
+ $LCTL set_param ldlm.namespaces.$FSNAME-*-*-*.lru_size=100
+
+ local lru_size=[]
+ local count=0
+ for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
+ if echo $ns | grep "MDT[[:digit:]]*"; then
+ continue
+ fi
+ lrs=$(echo $ns | sed 's/.*lru_size=//')
+ lru_size[count]=$lrs
+ let count=count+1
+ done
+
+ facet_failover ost1
+ facet_failover $SINGLEMDS
+ df -h $MOUNT || return 3
+
+ count=0
+ for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
+ if echo $ns | grep "MDT[[:digit:]]*"; then
+ continue
+ fi
+ lrs=$(echo $ns | sed 's/.*lru_size=//')
+ if ! test "$lrs" -eq "${lru_size[count]}"; then
+ n=$(echo $ns | sed -e 's/ldlm.namespaces.//' -e 's/.lru_size=.*//')
+ error "$n has lost lru_size: $lrs vs. ${lru_size[count]}"
+ fi
+ let count=count+1
+ done
+
+ cleanup
+ return 0
+}
+run_test 47 "server restart does not make client loss lru_resize settings"
+
+cleanup_48() {
+ trap 0
+
+ # reformat after this test is needed - if test will failed
+ # we will have unkillable file at FS
+ reformat_and_config
+}
+
+test_48() { # bug 17636
+ reformat
+ setup_noconfig
+ check_mount || return 2
+
+ $LFS setstripe $MOUNT -c -1 || return 9
+ $LFS getstripe $MOUNT || return 10
+
+ echo "ok" > $MOUNT/widestripe
+ $LFS getstripe $MOUNT/widestripe || return 11
+
+ trap cleanup_48 EXIT ERR
+
+ # fill acl buffer for avoid expand lsm to them
+ getent passwd | awk -F : '{ print "u:"$1":rwx" }' | while read acl; do
+ setfacl -m $acl $MOUNT/widestripe
+ done
+
+ stat $MOUNT/widestripe || return 12
+
+ cleanup_48
+ return 0
+}
+run_test 48 "too many acls on file"
+
+# check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE
+test_49() { # bug 17710
+ local OLD_MDS_MKFS_OPTS=$MDS_MKFS_OPTS
+ local OLD_OST_MKFS_OPTS=$OST_MKFS_OPTS
+ local LOCAL_TIMEOUT=20
+
+
+ OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$LOCAL_TIMEOUT $MKFSOPT $OSTOPT"
+
+ reformat
+ start_mds
+ start_ost
+ mount_client $MOUNT
+ check_mount || return 1
+
+ echo "check ldlm_timout..."
+ LDLM_MDS="`do_facet mds lctl get_param -n ldlm_timeout`"
+ LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`"
+ LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`"
+
+ if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then
+ error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
+ fi
+
+ if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT / 3)) ]; then
+ error "LDLM_TIMEOUT($LDLM_MDS) is not correct"
+ fi
+
+ umount_client $MOUNT
+ stop_ost || return 2
+ stop_mds || return 3
+
+ OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$((LOCAL_TIMEOUT - 1)) $MKFSOPT $OSTOPT"
+
+ reformat
+ start_mds || return 4
+ start_ost || return 5
+ mount_client $MOUNT || return 6
+ check_mount || return 7
+
+ LDLM_MDS="`do_facet mds lctl get_param -n ldlm_timeout`"
+ LDLM_OST1="`do_facet ost1 lctl get_param -n ldlm_timeout`"
+ LDLM_CLIENT="`do_facet client lctl get_param -n ldlm_timeout`"
+
+ if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then
+ error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
+ fi
+
+ if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then
+ error "LDLM_TIMEOUT($LDLM_MDS) is not correct"
+ fi
+
+ cleanup || return $?
+
+ MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
+ OST_MKFS_OPTS=$OLD_OST_MKFS_OPTS
+}
+run_test 49 "check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE"
+
+lazystatfs() {
+ # Test both statfs and lfs df and fail if either one fails
+ multiop_bg_pause $1 f_
+ RC1=$?
+ PID=$!
+ killall -USR1 multiop
+ [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
+ wait $PID || { RC1=$?; log "multiop return error "; }
+
+ $LFS df &
+ PID=$!
+ sleep 5
+ kill -s 0 $PID
+ RC2=$?
+ if [ $RC2 -eq 0 ]; then
+ kill -s 9 $PID
+ log "lazystatfs df failed"
+ fi
+
+ RC=0
+ [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
+ return $RC
+}
+
+test_50a() {
+ setup
+ lctl set_param llite.$FSNAME-*.lazystatfs=1
+ touch $DIR/$tfile
+
+ lazystatfs $MOUNT || error "lazystatfs failed but no down servers"
+
+ cleanup || return $?
+}
+run_test 50a "lazystatfs all servers available =========================="
+
+test_50b() {
+ setup
+ lctl set_param llite.$FSNAME-*.lazystatfs=1
+ touch $DIR/$tfile
+
+ # Wait for client to detect down OST
+ stop_ost || error "Unable to stop OST1"
+ wait_osc_import_state mds ost DISCONN
+
+ lazystatfs $MOUNT || error "lazystatfs should don't have returned EIO"
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_mds || error "Unable to stop MDS"
+}
+run_test 50b "lazystatfs all servers down =========================="
+
+test_50c() {
+ start_mds || error "Unable to start MDS"
+ start_ost || error "Unable to start OST1"
+ start_ost2 || error "Unable to start OST2"
+ mount_client $MOUNT || error "Unable to mount client"
+ lctl set_param llite.$FSNAME-*.lazystatfs=1
+ touch $DIR/$tfile
+
+ # Wait for client to detect down OST
+ stop_ost || error "Unable to stop OST1"
+ wait_osc_import_state mds ost DISCONN
+ lazystatfs $MOUNT || error "lazystatfs failed with one down server"
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost2 || error "Unable to stop OST2"
+ stop_mds || error "Unable to stop MDS"
+}
+run_test 50c "lazystatfs one server down =========================="
+
+test_50d() {
+ start_mds || error "Unable to start MDS"
+ start_ost || error "Unable to start OST1"
+ start_ost2 || error "Unable to start OST2"
+ mount_client $MOUNT || error "Unable to mount client"
+ lctl set_param llite.$FSNAME-*.lazystatfs=1
+ touch $DIR/$tfile
+
+ # Issue the statfs during the window where the client still
+ # belives the OST to be available but it is in fact down.
+ # No failure just a statfs which hangs for a timeout interval.
+ stop_ost || error "Unable to stop OST1"
+ lazystatfs $MOUNT || error "lazystatfs failed with one down server"
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost2 || error "Unable to stop OST2"
+ stop_mds || error "Unable to stop MDS"
+}
+run_test 50d "lazystatfs client/server conn race =========================="
+
+test_50e() {
+ local RC1
+ local pid
+
+ reformat_and_config
+ start_mds || return 1
+ #first client should see only one ost
+ start_ost || return 2
+ wait_osc_import_state mds ost FULL
+
+ # Wait for client to detect down OST
+ stop_ost || error "Unable to stop OST1"
+ wait_osc_import_state mds ost DISCONN
+
+ mount_client $MOUNT || error "Unable to mount client"
+ lctl set_param llite.$FSNAME-*.lazystatfs=0
+
+ multiop_bg_pause $MOUNT _f
+ RC1=$?
+ pid=$!
+
+ if [ $RC1 -ne 0 ]; then
+ log "multiop failed $RC1"
+ else
+ kill -USR1 $pid
+ sleep $(( $TIMEOUT+1 ))
+ kill -0 $pid
+ [ $? -ne 0 ] && error "process isn't sleep"
+ start_ost || error "Unable to start OST1"
+ wait $pid || error "statfs failed"
+ fi
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost || error "Unable to stop OST1"
+ stop_mds || error "Unable to stop MDS"
+}
+run_test 50e "normal statfs all servers down =========================="
+
+test_50f() {
+ local RC1
+ local pid
+ CONN_PROC="osc.$FSNAME-OST0001-osc-[M]*.ost_server_uuid"
+
+ start_mds || error "Unable to start mds"
+ #first client should see only one ost
+ start_ost || error "Unable to start OST1"
+ wait_osc_import_state mds ost FULL
+
+ start_ost2 || error "Unable to start OST2"
+ wait_osc_import_state mds ost2 FULL
+
+ # Wait for client to detect down OST
+ stop_ost2 || error "Unable to stop OST2"
+
+ wait_osc_import_state mds ost2 DISCONN
+ mount_client $MOUNT || error "Unable to mount client"
+ lctl set_param llite.$FSNAME-*.lazystatfs=0
+
+ multiop_bg_pause $MOUNT _f
+ RC1=$?
+ pid=$!
+
+ if [ $RC1 -ne 0 ]; then
+ log "lazystatfs multiop failed $RC1"
+ else
+ kill -USR1 $pid
+ sleep $(( $TIMEOUT+1 ))
+ kill -0 $pid
+ [ $? -ne 0 ] && error "process isn't sleep"
+ start_ost2 || error "Unable to start OST2"
+ wait $pid || error "statfs failed"
+ stop_ost2 || error "Unable to stop OST2"
+ fi
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost || error "Unable to stop OST1"
+ stop_mds || error "Unable to stop MDS"
+ writeconf
+}
+run_test 50f "normal statfs one server in down =========================="
+
+test_51() {
+ local LOCAL_TIMEOUT=20
+
+ reformat
+ start_mds
+ start_ost
+ mount_client $MOUNT
+ check_mount || return 1
+
+ mkdir $MOUNT/d1
+ $LFS setstripe -c -1 $MOUNT/d1
+ #define OBD_FAIL_MDS_REINT_DELAY 0x142
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x142"
+ touch $MOUNT/d1/f1 &
+ local pid=$!
+ sleep 2
+ start_ost2 || return 2
+ wait $pid
+ stop_ost2 || return 3
+ cleanup
+}
+run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
+
+copy_files_xattrs()
+{
+ local node=$1
+ local dest=$2
+ local xattrs=$3
+ shift 3
+
+ do_node $node mkdir -p $dest
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
+
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
+ [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
+}
+
+diff_files_xattrs()
+{
+ local node=$1
+ local backup=$2
+ local xattrs=$3
+ shift 3
+
+ local backup2=${TMP}/backup2
+
+ do_node $node mkdir -p $backup2
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
+
+ do_node $node "diff -rq $backup $backup2"
+ [ $? -eq 0 ] || { error "contents differ"; return 3; }
+
+ local xattrs2=${TMP}/xattrs2
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
+ [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
+
+ do_node $node "diff $xattrs $xattrs2"
+ [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
+
+ do_node $node "rm -rf $backup2 $xattrs2"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
+}
+
+test_52() {
+ start_mds
+ [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
+
+ local nrfiles=8
+ local ost1mnt=${MOUNT%/*}/ost1
+ local ost1node=$(facet_active_host ost1)
+ local ost1tmp=$TMP/conf52
+
+ mkdir -p $DIR/$tdir
+ [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
+ touch $TMP/modified_first
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; }
+ do_node $ost1node "mkdir -p $ost1tmp && touch $ost1tmp/modified_first"
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; }
+ sleep 1
+
+ $LFS setstripe $DIR/$tdir -c -1 -s 1M
+ [ $? -eq 0 ] || { error "lfs setstripe failed"; return 7; }
+
+ for (( i=0; i < nrfiles; i++ )); do
+ multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c
+ [ $? -eq 0 ] || { error "multiop failed"; return 8; }
+ echo -n .
+ done
+ echo
+
+ # backup files
+ echo backup files to $TMP/files
+ local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
+ copy_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to copy files"; return 9; }
+
+ umount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to umount client"; return 10; }
+ stop_ost
+ [ $? -eq 0 ] || { error "Unable to stop ost1"; return 11; }
+
+ echo mount ost1 as ldiskfs
+ do_node $ost1node mount -t $FSTYPE $ost1_dev $ost1mnt $OST_MOUNT_OPTS
+ [ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
+
+ # backup objects
+ echo backup objects to $ost1tmp/objects
+ local objects=$(do_node $ost1node 'find '$ost1mnt'/O/0 -type f -size +0'\
+ '-newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"')
+ copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; }
+
+ # move objects to lost+found
+ do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
+ [ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
+
+ # recover objects
+ do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found"
+ [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; }
+
+ # compare restored objects against saved ones
+ diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to diff objects"; return 16; }
+
+ do_node $ost1node "umount $ost1_dev"
+ [ $? -eq 0 ] || { error "Unable to umount ost1 as ldiskfs"; return 17; }
+
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start ost1"; return 18; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 19; }
+
+ # compare files
+ diff_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to diff files"; return 20; }
+
+ rm -rf $TMP/files $TMP/file_xattrs
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 21; }
+ do_node $ost1node "rm -rf $ost1tmp"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 22; }
+ cleanup
+}
+run_test 52 "check recovering objects from lost+found"
+
+cleanup_gss
equals_msg `basename $0`: test complete
-[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true
+[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true