#!/bin/bash
-# requirement:
-# add uml1 uml2 uml3 in your /etc/hosts
+# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
+# vim:autoindent:shiftwidth=4:tabstop=4:
# FIXME - there is no reason to use all of these different
# return codes, espcially when most of them are mapped to something
ONLY=${ONLY:-"$*"}
-# bug number for skipped test: 13739
-HEAD_EXCEPT=" 32a"
-
-# bug number for skipped test:
-ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $HEAD_EXCEPT"
+# bug number for skipped test:
+# 15977
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=`dirname $0`
. $LUSTRE/tests/test-framework.sh
init_test_env $@
+
+# use small MDS + OST size to speed formatting time
+# do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
+MDSSIZE=200000
+OSTSIZE=200000
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+
# STORED_MDSSIZE is used in test_18
if [ -n "$MDSSIZE" ]; then
STORED_MDSSIZE=$MDSSIZE
fi
-# use small MDS + OST size to speed formatting time
-MDSSIZE=40000
-OSTSIZE=40000
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
-remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
+init_logging
+
+require_dsh_mds || exit 0
+require_dsh_ost || exit 0
-#
[ "$SLOW" = "no" ] && EXCEPT_SLOW="0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a 45"
assert_DIR
stop ${facet} -f
rm -f ${facet}active
# who knows if/where $TUNEFS is installed? Better reformat if it fails...
- do_facet ${facet} "$TUNEFS --writeconf ${!dev}" || echo "tunefs failed, reformatting instead" && reformat
+ do_facet ${facet} "$TUNEFS --writeconf ${!dev}" ||
+ echo "tunefs failed, reformatting instead" && reformat_and_config
- gen_config
}
gen_config() {
# The MGS must be started before the OSTs for a new fs, so start
- # and stop to generate the startup logs.
+ # and stop to generate the startup logs.
start_mds
start_ost
- sleep 5
+ wait_osc_import_state mds ost FULL
stop_ost
stop_mds
}
reformat_and_config() {
reformat
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
gen_config
}
+start_mgs () {
+ echo "start mgs"
+ start mgs $MGSDEV $mgs_MOUNT_OPTS
+}
+
start_mds() {
local facet=$SINGLEMDS
# we can not use MDSDEV1 here because SINGLEMDS could be set not to mds1 only
}
setup() {
- start_ost
- start_mds
- mount_client $MOUNT
+ start_mds || error "MDT start failed"
+ start_ost || error "OST start failed"
+ mount_client $MOUNT || error "client start failed"
}
setup_noconfig() {
+ if ! combined_mgs_mds ; then
+ start_mgs
+ fi
+
start_mds
start_ost
mount_client $MOUNT
}
cleanup_nocli() {
- stop_mds || return 201
stop_ost || return 202
+ stop_mds || return 201
unload_modules || return 203
}
check_mount() {
do_facet client "cp /etc/passwd $DIR/a" || return 71
do_facet client "rm $DIR/a" || return 72
- # make sure lustre is actually mounted (touch will block,
- # but grep won't, so do it after)
+ # make sure lustre is actually mounted (touch will block,
+ # but grep won't, so do it after)
do_facet client "grep $MOUNT' ' /proc/mounts > /dev/null" || return 73
echo "setup single mount lustre success"
}
check_mount2() {
- do_facet client "touch $DIR/a" || return 71
- do_facet client "rm $DIR/a" || return 72
- do_facet client "touch $DIR2/a" || return 73
- do_facet client "rm $DIR2/a" || return 74
+ do_facet client "touch $DIR/a" || return 71
+ do_facet client "rm $DIR/a" || return 72
+ do_facet client "touch $DIR2/a" || return 73
+ do_facet client "rm $DIR2/a" || return 74
echo "setup double mount lustre success"
}
run_test 0 "single mount setup"
test_1() {
+ start_mds || error "MDT start failed"
start_ost
echo "start ost second time..."
- setup
+ start_ost && error "2nd OST start should fail"
+ mount_client $MOUNT || error "client start failed"
check_mount || return 42
cleanup || return $?
}
run_test 1 "start up ost twice (should return errors)"
test_2() {
- start_ost
start_mds
echo "start mds second time.."
- start_mds
+ start_mds && error "2nd MDT start should fail"
+ start_ost
mount_client $MOUNT
check_mount || return 43
cleanup || return $?
test_3() {
setup
#mount.lustre returns an error if already in mtab
- mount_client $MOUNT && return $?
+ mount_client $MOUNT && error "2nd client mount should fail"
check_mount || return 44
cleanup || return $?
}
run_test 5b "mds down, cleanup after failed mount (bug 2712) (should return errs)"
test_5c() {
- start_ost
start_mds
+ start_ost
[ -d $MOUNT ] || mkdir -p $MOUNT
grep " $MOUNT " /etc/mtab && echo "test 5c: mtab before mount" && return 10
local oldfs="${FSNAME}"
run_test 5d "mount with ost down"
test_5e() {
- start_ost
start_mds
+ start_ost
#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
do_facet client "lctl set_param fail_loc=0x80000506"
CHECK_PTLDEBUG="`do_facet ost1 lctl get_param -n debug`"
if [ "$CHECK_PTLDEBUG" ] && { \
[ "$CHECK_PTLDEBUG" = "trace inode warning error emerg console" ] ||
- [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
+ [ "$CHECK_PTLDEBUG" = "trace inode" ]; }; then
echo "lnet.debug success"
else
echo "lnet.debug: want 'trace inode', have '$CHECK_PTLDEBUG'"
# LOGS/PENDING do not exist anymore since CMD3
test_16() {
local TMPMTPT="${TMP}/conf16"
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
if [ ! -e "$MDSDEV" ]; then
log "no $MDSDEV existing, so mount Lustre to create one"
setup
run_test 16 "verify that lustre will correct the mode of OBJECTS"
test_17() {
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
if [ ! -e "$MDSDEV" ]; then
echo "no $MDSDEV existing, so mount Lustre to create one"
test_18() {
[ "$FSTYPE" != "ldiskfs" ] && skip "not needed for FSTYPE=$FSTYPE" && return
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
local MIN=2000000
[ $SPACE -gt $((MIN / 20)) ] && OK=1 && myMDSSIZE=$MIN && \
log "use file $MDSDEV with MIN=$MIN"
- [ -z "$OK" ] && skip "$MDSDEV too small for ${MIN}kB MDS" && return
+ [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return
echo "mount mds with large journal..."
test_20() {
# first format the ost/mdt
- start_ost
start_mds
+ start_ost
mount_client $MOUNT
check_mount || return 43
rm -f $DIR/$tfile
test_21a() {
start_mds
start_ost
+ wait_osc_import_state mds ost FULL
stop_ost
stop_mds
}
test_21b() {
start_ost
start_mds
+ wait_osc_import_state mds ost FULL
stop_mds
stop_ost
}
start_ost
start_mds
start_ost2
+ wait_osc_import_state mds ost2 FULL
stop_ost
stop_ost2
stop_mds
echo Client mount with ost in logs, but none running
start_ost
# wait until mds connected to ost and open client connection
- # ping_interval + 1
- sleep $((TIMEOUT / 4 + 1))
+ wait_osc_import_state mds ost FULL
stop_ost
mount_client $MOUNT
# check_mount will block trying to contact ost
+ mcreate $DIR/$tfile || return 40
+ rm -f $DIR/$tfile || return 42
umount_client $MOUNT
pass
run_test 22 "start a client before osts (should return errs)"
test_23a() { # was test_23
- setup
- # fail mds
- stop $SINGLEMDS
+ setup
+ # fail mds
+ stop $SINGLEMDS
# force down client so that recovering mds waits for reconnect
local running=$(grep -c $MOUNT /proc/mounts) || true
if [ $running -ne 0 ]; then
local PID1
local PID2
local WAIT=0
- local MAX_WAIT=20
+ local MAX_WAIT=30
local sleep=1
while [ "$WAIT" -lt "$MAX_WAIT" ]; do
sleep $sleep
echo "waiting for mount to finish ... "
WAIT=$(( WAIT + sleep))
done
- [ "$WAIT" -eq "$MAX_WAIT" ] && error "MOUNT_PID $MOUNT_PID and \
- MOUNT__LUSTRE_PID $MOUNT__LUSTRE_PID still not killed in $WAIT secs"
- ps -ef | grep mount
+ if [ "$WAIT" -eq "$MAX_WAIT" ]; then
+ error "MOUNT_PID $MOUNT_PID and "\
+ "MOUNT_LUSTRE_PID $MOUNT_LUSTRE_PID still not killed in $WAIT secs"
+ ps -ef | grep mount
+ fi
stop_mds || error
stop_ost || error
}
cleanup_nocli
test_23b() { # was test_23
- start_ost
start_mds
+ start_ost
# Simulate -EINTR during mount OBD_FAIL_LDLM_CLOSE_THREAD
lctl set_param fail_loc=0x80000313
mount_client $MOUNT
}
test_24a() {
- #set up fs1
+ #set up fs1
gen_config
+
#set up fs2
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
rm $MOUNT2/b || return 4
# 2 is actually mounted
grep $MOUNT2' ' /proc/mounts > /dev/null || return 5
- # failover
+ # failover
facet_failover fs2mds
facet_failover fs2ost
df
- umount_client $MOUNT
+ umount_client $MOUNT
# the MDS must remain up until last MDT
stop_mds
MDS=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
run_test 24a "Multiple MDTs on a single node"
test_24b() {
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
if [ -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
+ add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
setup
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
cleanup || return 6
local myfacet=$1
local TEST=$2
local PARAM=$3
- local ORIG=$(do_facet $myfacet "$TEST")
+ local ORIG=$(do_facet $myfacet "$TEST")
if [ $# -gt 3 ]; then
local FINAL=$4
else
test_27a() {
start_ost || return 1
start_mds || return 2
- echo "Requeue thread should have started: "
- ps -e | grep ll_cfg_requeue
+ echo "Requeue thread should have started: "
+ ps -e | grep ll_cfg_requeue
set_and_check ost1 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
cleanup_nocli
}
run_test 28 "permanent parameter setting"
test_29() {
- [ "$OSTCOUNT" -lt "2" ] && skip "$OSTCOUNT < 2, skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
setup > /dev/null 2>&1
start_ost2
sleep 10
echo "Live client success: got $RESULT"
fi
- # check MDT too
+ # check MDT too
local MPROC="osc.$FSNAME-OST0001-osc-[M]*.active"
local MAX=30
local WAIT=0
}
run_test 29 "permanently remove an OST"
-test_30() {
+test_30a() {
setup
+ echo Big config llog
TEST="lctl get_param -n llite.$FSNAME-*.max_read_ahead_whole_mb"
ORIG=$($TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
done
- # make sure client restart still works
+ # make sure client restart still works
umount_client $MOUNT
mount_client $MOUNT || return 4
- [ "$($TEST)" -ne "$i" ] && return 5
- set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $ORIG || return 6
+ [ "$($TEST)" -ne "$i" ] && error "Param didn't stick across restart $($TEST) != $i"
+ pass
+
+ echo Erase parameter setting
+ do_facet mgs "$LCTL conf_param -d $FSNAME.llite.max_read_ahead_whole_mb" || return 6
+ umount_client $MOUNT
+ mount_client $MOUNT || return 6
+ FINAL=$($TEST)
+ echo "deleted (default) value=$FINAL, orig=$ORIG"
+ # assumes this parameter started at the default value
+ [ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
+
cleanup
}
-run_test 30 "Big config llog"
+run_test 30a "Big config llog and conf_param deletion"
+
+test_30b() {
+ setup
+
+ # Make a fake nid. Use the OST nid, and add 20 to the least significant
+ # numerical part of it. Hopefully that's not already a failover address for
+ # the server.
+ OSTNID=$(do_facet ost1 "$LCTL get_param nis" | tail -1 | awk '{print $1}')
+ ORIGVAL=$(echo $OSTNID | egrep -oi "[0-9]*@")
+ NEWVAL=$((($(echo $ORIGVAL | egrep -oi "[0-9]*") + 20) % 256))
+ NEW=$(echo $OSTNID | sed "s/$ORIGVAL/$NEWVAL@/")
+ echo "Using fake nid $NEW"
+
+ TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
+ set_and_check client "$TEST" "$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW"
+ NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
+ echo $NIDS
+ NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
+ echo "should have 2 failover nids: $NIDCOUNT"
+ [ $NIDCOUNT -eq 2 ] || error "Failover nid not added"
+ do_facet mgs "$LCTL conf_param -d $FSNAME-OST0000.failover.node" || error "conf_param delete failed"
+ umount_client $MOUNT
+ mount_client $MOUNT || return 3
+
+ NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
+ echo $NIDS
+ NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
+ echo "only 1 final nid should remain: $NIDCOUNT"
+ [ $NIDCOUNT -eq 1 ] || error "Failover nids not removed"
+
+ cleanup
+}
+run_test 30b "Remove failover nids"
test_31() { # bug 10734
# ipaddr must not exist
run_test 31 "Connect to non-existent node (shouldn't crash)"
# Use these start32/stop32 fn instead of t-f start/stop fn,
-# for local devices, to skip global facet vars init
+# for local devices, to skip global facet vars init
stop32 () {
local facet=$1
shift
echo "Starting local ${facet}: $@ $device ${MOUNT%/*}/${facet}"
mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
- RC=$?
+ local RC=$?
if [ $RC -ne 0 ]; then
echo "mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}"
echo "Start of ${device} of local ${facet} failed ${RC}"
- fi
+ fi
losetup -a
return $RC
}
}
test_32a() {
- # this test is totally useless on a client-only system
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip "No tunefs" && return 0
- local DISK1_8=$LUSTRE/tests/disk1_8.tgz
- [ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
+ [ -z "$TUNEFS" ] && skip_env "No tunefs" && return 0
- mkdir -p $TMP/$tdir
- tar xjvf $DISK1_8 -C $TMP/$tdir || \
- { skip "Cannot untar $DISK1_8" && return 0; }
+ local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
+ [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
+ local tmpdir=$TMP/conf32a
+ mkdir -p $tmpdir
+ tar xjvf $DISK1_8 -C $tmpdir || \
+ { skip_env "Cannot untar $DISK1_8" && return 0; }
load_modules
- lctl set_param debug=$PTLDEBUG
+ $LCTL set_param debug=$PTLDEBUG
$TUNEFS $tmpdir/mds || error "tunefs failed"
# nids are wrong, so client wont work, but server should start
- start32 mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
trap cleanup_32 EXIT INT || return 3
-
- local UUID=$(lctl get_param -n mds.lustre-MDT0000.uuid)
+
+ local UUID=$($LCTL get_param -n mdt.lustre-MDT0000.uuid)
echo MDS uuid $UUID
- [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` $tmpdir/ost1 || error "tunefs failed"
+ $TUNEFS --mgsnode=$HOSTNAME $tmpdir/ost1 || error "tunefs failed"
start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.lustre-OST0000.uuid)
+ UUID=$($LCTL get_param -n obdfilter.lustre-OST0000.uuid)
echo OST uuid $UUID
- [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-OST0000_UUID" ] || error "UUID is wrong: $UUID"
local NID=$($LCTL list_nids | head -1)
- echo "OSC changes should return err:"
- $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 && return 7
- $LCTL conf_param lustre-OST0000.failover.node=$NID && return 8
+ echo "OSC changes should succeed:"
+ $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
+ $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
echo "ok."
- echo "MDC changes should succeed:"
+
+ echo "MDC changes should succeed:"
$LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
$LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
echo "ok."
- # With a new good MDT failover nid, we should be able to mount a client
- # (but it cant talk to OST)
- local mountopt="-o exclude=lustre-OST0000"
-
- local device=`h2$NETTYPE $HOSTNAME`:/lustre
- echo "Starting local client: $HOSTNAME: $mountopt $device $MOUNT"
- mount -t lustre $mountopt $device $MOUNT || return 1
-
- local old=$(lctl get_param -n mdc.*.max_rpcs_in_flight)
- local new=$((old + 5))
- lctl conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=$new
- wait_update $HOSTNAME "lctl get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+ echo "LOV changes should succeed:"
+ $LCTL pool_new lustre.interop || return 11
+ $LCTL conf_param lustre-MDT0000.lov.stripesize=4M || return 12
+ echo "ok."
cleanup_32
# mount a second time to make sure we didnt leave upgrade flag on
load_modules
$TUNEFS --dryrun $tmpdir/mds || error "tunefs failed"
- start32 mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
trap cleanup_32 EXIT INT || return 12
cleanup_32
run_test 32a "Upgrade from 1.8 (not live)"
test_32b() {
- # this test is totally useless on a client-only system
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip "No tunefs" && return
+ [ -z "$TUNEFS" ] && skip_env "No tunefs" && return
local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
- [ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
- local tmpdir=$TMP/$tdir
+ [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
+ local tmpdir=$TMP/conf32b
mkdir -p $tmpdir
tar xjvf $DISK1_8 -C $tmpdir || \
- { skip "Cannot untar $DISK1_8" && return ; }
+ { skip_env "Cannot untar $DISK1_8" && return ; }
load_modules
- lctl set_param debug=$PTLDEBUG
+ $LCTL set_param debug="config"
local NEWNAME=lustre
# writeconf will cause servers to register with their current nids
start32 mds1 $tmpdir/mds "-o loop" && \
trap cleanup_32 EXIT INT || return 3
- local UUID=$(lctl get_param -n mdt.${NEWNAME}-MDT0000.uuid)
+ local UUID=$($LCTL get_param -n mdt.${NEWNAME}-MDT0000.uuid)
echo MDS uuid $UUID
- [ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` --writeconf --fsname=$NEWNAME $tmpdir/ost1 || error "tunefs failed"
+ $TUNEFS --mgsnode=$HOSTNAME --writeconf --fsname=$NEWNAME $tmpdir/ost1 ||\
+ error "tunefs failed"
start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
+ UUID=$($LCTL get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
echo OST uuid $UUID
[ "$UUID" == "${NEWNAME}-OST0000_UUID" ] || error "UUID is wrong: $UUID"
- echo "OSC changes should succeed:"
+ local NID=$($LCTL list_nids | head -1)
+
+ echo "OSC changes should succeed:"
$LCTL conf_param ${NEWNAME}-OST0000.osc.max_dirty_mb=15 || return 7
$LCTL conf_param ${NEWNAME}-OST0000.failover.node=$NID || return 8
echo "ok."
- echo "MDC changes should succeed:"
+
+ echo "MDC changes should succeed:"
$LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
+ $LCTL conf_param ${NEWNAME}-MDT0000.failover.node=$NID || return 10
+ echo "ok."
+
+ echo "LOV changes should succeed:"
+ $LCTL pool_new ${NEWNAME}.interop || return 11
+ $LCTL conf_param ${NEWNAME}-MDT0000.lov.stripesize=4M || return 12
echo "ok."
# MDT and OST should have registered with new nids, so we should have
echo "Starting local client: $HOSTNAME: $device $MOUNT"
mount -t lustre $device $MOUNT || return 1
- local old=$(lctl get_param -n mdc.*.max_rpcs_in_flight)
+ local old=$($LCTL get_param -n mdc.*.max_rpcs_in_flight)
local new=$((old + 5))
- lctl conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
- wait_update $HOSTNAME "lctl get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+ $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
+ wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
[ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "94306271 1478" ] || return 12
echo "ok."
test_33a() { # bug 12333, was test_33
local rc=0
local FSNAME2=test-123
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
mount -t lustre $MGSNID:/${FSNAME2} $MOUNT2 || rc=2
echo "ok."
- cp /etc/hosts $MOUNT2/ || rc=3
+ cp /etc/hosts $MOUNT2/ || rc=3
$LFS getstripe $MOUNT2/hosts
umount -d $MOUNT2
}
run_test 34c "force umount with failed ost should be normal"
-test_35() { # bug 12459
+test_35a() { # bug 12459
setup
DBG_SAVE="`lctl get_param -n debug`"
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
- MSG="conf-sanity.sh test_35 `date +%F%kh%Mm%Ss`"
+ MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`"
$LCTL clear
log "$MSG"
log "Stopping the MDT:"
[ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
cleanup
}
-run_test 35 "Reconnect to the last active server first"
+run_test 35a "Reconnect to the last active server first"
+
+test_35b() { # bug 18674
+ remote_mds || { skip "local MDS" && return 0; }
+ setup
+
+ debugsave
+ $LCTL set_param debug="ha"
+ $LCTL clear
+ MSG="conf-sanity.sh test_35b `date +%F%kh%Mm%Ss`"
+ log "$MSG"
+
+ log "Set up a fake failnode for the MDS"
+ FAKENID="127.0.0.2"
+ local device=$(do_facet mds "$LCTL get_param -n devices" | \
+ awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
+ do_facet mds "$LCTL conf_param ${device}.failover.node=$FAKENID" || \
+ return 1
+
+ local at_max_saved=0
+ # adaptive timeouts may prevent seeing the issue
+ if at_is_enabled; then
+ at_max_saved=$(at_max_get mds)
+ at_max_set 0 mds client
+ fi
+
+ mkdir -p $MOUNT/testdir
+ touch $MOUNT/testdir/test
+
+ log "Injecting EBUSY on MDS"
+ # Setting OBD_FAIL_MDS_RESEND=0x136
+ do_facet mds "$LCTL set_param fail_loc=0x80000136" || return 2
+
+ log "Stat on a test file"
+ stat $MOUNT/testdir/test
+
+ log "Stop injecting EBUSY on MDS"
+ do_facet mds "$LCTL set_param fail_loc=0" || return 3
+ rm -f $MOUNT/testdir/test
+
+ log "done"
+ # restore adaptive timeout
+ [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds client
+
+ $LCTL dk $TMP/lustre-log-$TESTNAME.log
+
+ # retrieve from the log if the client has ever tried to
+ # contact the fake server after the loss of connection
+ FAILCONN=`awk "BEGIN {ret = 0;}
+ /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
+ ret = 1;
+ if (\\\$NF ~ /$FAKENID/) {
+ ret = 2;
+ exit;
+ }
+ }
+ END {print ret}" $TMP/lustre-log-$TESTNAME.log`
+
+ [ "$FAILCONN" == "0" ] && \
+ log "ERROR: The client reconnection has not been triggered" && \
+ return 4
+ [ "$FAILCONN" == "2" ] && \
+ log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
+ return 5
+
+ cleanup
+}
+run_test 35b "Continue reconnection retries, if the active server is busy"
test_36() { # 12743
local rc
local FSNAME2=test1234
local fs3ost_HOST=$ost_HOST
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST && fs3ost_HOST=$ost1_HOST
rc=0
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
- [ $OSTCOUNT -lt 2 ] && skip "skipping test for single OST" && return
+ [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
[ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
{ skip "remote OST" && return 0; }
ALLOWANCE=$((64 * $OSTCOUNT))
- if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
+ if [ $DFTOTAL -lt $(($BKTOTAL - $ALLOWANCE)) ] ||
[ $DFTOTAL -gt $(($BKTOTAL + $ALLOWANCE)) ] ; then
echo "**** FAIL: df total($DFTOTAL) mismatch OST total($BKTOTAL)"
rc=1
fi
- if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
+ if [ $DFFREE -lt $(($BKFREE - $ALLOWANCE)) ] ||
[ $DFFREE -gt $(($BKFREE + $ALLOWANCE)) ] ; then
echo "**** FAIL: df free($DFFREE) mismatch OST free($BKFREE)"
rc=2
fi
- if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
+ if [ $DFAVAIL -lt $(($BKAVAIL - $ALLOWANCE)) ] ||
[ $DFAVAIL -gt $(($BKAVAIL + $ALLOWANCE)) ] ; then
echo "**** FAIL: df avail($DFAVAIL) mismatch OST avail($BKAVAIL)"
rc=3
run_test 36 "df report consistency on OSTs with different block size"
test_37() {
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
LOCAL_MDSDEV="$TMP/mdt.img"
SYM_MDSDEV="$TMP/sym_mdt.img"
log "rename lov_objid file on MDS"
rm -f $TMP/lov_objid.orig
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
do_facet $SINGLEMDS "$DEBUGFS -c -R \\\"dump lov_objid $TMP/lov_objid.orig\\\" $MDSDEV"
do_facet $SINGLEMDS "$DEBUGFS -w -R \\\"rm lov_objid\\\" $MDSDEV"
PTLDEBUG=+malloc
setup
cleanup
- perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
+ perl $SRCDIR/leak_finder.pl $TMP/debug 2>&1 | egrep '*** Leak:' &&
error "memory leak detected" || true
}
run_test 39 "leak_finder recognizes both LUSTRE and LNET malloc messages"
test_41() { #bug 14134
local rc
- local dev=${SINGLEMDS}_dev
- local MDSDEV=${!dev}
+ local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nosvc -n
start ost1 `ostdevname 1` $OST_MOUNT_OPTS
- start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs
+ start $SINGLEMDS $MDSDEV $MDS_MOUNT_OPTS -o nomgs,force
mkdir -p $MOUNT
mount_client $MOUNT || return 1
sleep 5
test_42() { #bug 14693
setup
check_mount || return 2
- do_facet client lctl conf_param lustre.llite.some_wrong_param=10
+ do_facet mgs $LCTL conf_param lustre.llite.some_wrong_param=10
umount_client $MOUNT
mount_client $MOUNT || return 1
cleanup
run_test 42 "invalid config param should not prevent client from mounting"
test_43() {
- [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip "run as root"
+ [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
setup
chmod ugo+x $DIR || error "chmod 0 failed"
set_and_check mds \
umount_client $MOUNT
cleanup_nocli
+test_44() { # 16317
+ setup
+ check_mount || return 2
+ UUID=$($LCTL get_param llite.${FSNAME}*.uuid | cut -d= -f2)
+ STATS_FOUND=no
+ UUIDS=$(do_facet mds "$LCTL get_param mdt.${FSNAME}*.exports.*.uuid")
+ for VAL in $UUIDS; do
+ NID=$(echo $VAL | cut -d= -f1)
+ CLUUID=$(echo $VAL | cut -d= -f2)
+ [ "$UUID" = "$CLUUID" ] && STATS_FOUND=yes && break
+ done
+ [ "$STATS_FOUND" = "no" ] && error "stats not found for client"
+ cleanup
+ return 0
+}
+run_test 44 "mounted client proc entry exists"
+
test_45() { #17310
setup
check_mount || return 2
}
run_test 45 "long unlink handling in ptlrpcd"
+cleanup_46a() {
+ trap 0
+ local rc=0
+ local count=$1
+
+ umount_client $MOUNT2 || rc=$?
+ umount_client $MOUNT || rc=$?
+ while [ $count -gt 0 ]; do
+ stop ost${count} -f || rc=$?
+ let count=count-1
+ done
+ stop_mds || rc=$?
+ # writeconf is needed after the test, otherwise,
+ # we might end up with extra OSTs
+ writeconf || rc=$?
+ cleanup_nocli || rc=$?
+ return $rc
+}
+
test_46a() {
- OSTCOUNT=6
- reformat
+ echo "Testing with $OSTCOUNT OSTs"
+ reformat_and_config
start_mds || return 1
#first client should see only one ost
start_ost || return 2
+ wait_osc_import_state mds ost FULL
#start_client
mount_client $MOUNT || return 3
-
- start_ost2 || return 4
- start ost3 `ostdevname 3` $OST_MOUNT_OPTS || return 5
- start ost4 `ostdevname 4` $OST_MOUNT_OPTS || return 6
- start ost5 `ostdevname 5` $OST_MOUNT_OPTS || return 7
- # wait until ost2-5 is sync
- # ping_interval + 1
- sleep $((TIMEOUT / 4 + 1))
- #second client see both ost's
+ trap "cleanup_46a $OSTCOUNT" EXIT ERR
+
+ local i
+ for (( i=2; i<=$OSTCOUNT; i++ )); do
+ start ost$i `ostdevname $i` $OST_MOUNT_OPTS || return $((i+2))
+ done
+
+ # wait until osts in sync
+ for (( i=2; i<=$OSTCOUNT; i++ )); do
+ wait_osc_import_state mds ost$i FULL
+ done
+
+
+ #second client see all ost's
mount_client $MOUNT2 || return 8
$LFS setstripe $MOUNT2 -c -1 || return 9
echo "ok" > $MOUNT2/widestripe
$LFS getstripe $MOUNT2/widestripe || return 11
# fill acl buffer for avoid expand lsm to them
- awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
+ awk -F : '{if (FNR < 25) { print "u:"$1":rwx" }}' /etc/passwd | while read acl; do
setfacl -m $acl $MOUNT2/widestripe
done
# will be deadlock
stat $MOUNT/widestripe || return 12
- umount_client $MOUNT2 || return 13
- umount_client $MOUNT || return 14
- stop ost5 -f || return 20
- stop ost4 -f || return 21
- stop ost3 -f || return 22
- stop_ost2 || return 23
- stop_ost || return 24
- stop_mds || return 25
+ cleanup_46a $OSTCOUNT || { echo "cleanup_46a failed!" && return 13; }
+ return 0
}
run_test 46a "handle ost additional - wide striped file"
lru_size[count]=$lrs
let count=count+1
done
-
+
facet_failover ost1
facet_failover $SINGLEMDS
- df -h $MOUNT || return 3
+ client_up || return 3
count=0
for ns in $($LCTL get_param ldlm.namespaces.$FSNAME-*-*-*.lru_size); do
OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$LOCAL_TIMEOUT $MKFSOPT $OSTOPT"
reformat
- start_mds
- start_ost
- mount_client $MOUNT
+ setup_noconfig
check_mount || return 1
echo "check ldlm_timout..."
stop_mds || return 3
OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$((LOCAL_TIMEOUT - 1)) $MKFSOPT $OSTOPT"
-
+
reformat
- start_mds || return 4
- start_ost || return 5
- mount_client $MOUNT || return 6
+ setup_noconfig
check_mount || return 7
LDLM_MDS="`do_facet mds lctl get_param -n ldlm_timeout`"
if [ $LDLM_MDS -ne $LDLM_OST1 ] || [ $LDLM_MDS -ne $LDLM_CLIENT ]; then
error "Different LDLM_TIMEOUT:$LDLM_MDS $LDLM_OST1 $LDLM_CLIENT"
fi
-
+
if [ $LDLM_MDS -ne $((LOCAL_TIMEOUT - 1)) ]; then
error "LDLM_TIMEOUT($LDLM_MDS) is not correct"
fi
-
+
cleanup || return $?
MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- CONN_PROC="osc.$FSNAME-OST0000-osc-[M]*.ost_server_uuid"
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ "${CONN_STATE}" = "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
+ wait_osc_import_state mds ost DISCONN
+
lazystatfs $MOUNT || error "lazystatfs should don't have returned EIO"
umount_client $MOUNT || error "Unable to unmount client"
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- CONN_PROC="osc.$FSNAME-OST0000-osc-[M]*.ost_server_uuid"
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ ${CONN_STATE} = "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
+ wait_osc_import_state mds ost DISCONN
lazystatfs $MOUNT || error "lazystatfs failed with one down server"
umount_client $MOUNT || error "Unable to unmount client"
test_50e() {
local RC1
local pid
- CONN_PROC="osc.$FSNAME-OST0000-osc-[M]*.ost_server_uuid"
reformat_and_config
start_mds || return 1
#first client should see only one ost
start_ost || return 2
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ "${CONN_STATE}" != "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
-
- lctl set_param llite.$FSNAME-*.lazystatfs=0
+ wait_osc_import_state mds ost FULL
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
+ wait_osc_import_state mds ost DISCONN
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ "${CONN_STATE}" = "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
-
mount_client $MOUNT || error "Unable to mount client"
-
+ lctl set_param llite.$FSNAME-*.lazystatfs=0
+
multiop_bg_pause $MOUNT _f
RC1=$?
pid=$!
sleep $(( $TIMEOUT+1 ))
kill -0 $pid
[ $? -ne 0 ] && error "process isn't sleep"
- start_ost || error "Unable to start OST1"
+ start_ost || error "Unable to start OST1"
wait $pid || error "statfs failed"
fi
umount_client $MOUNT || error "Unable to unmount client"
- stop_ost || error "Unable to stop OST1"
+ stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
}
run_test 50e "normal statfs all servers down =========================="
start_mds || error "Unable to start mds"
#first client should see only one ost
start_ost || error "Unable to start OST1"
- start_ost2 || error "Unable to start OST2"
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ "${CONN_STATE}" != "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
+ wait_osc_import_state mds ost FULL
- lctl set_param llite.$FSNAME-*.lazystatfs=0
+ start_ost2 || error "Unable to start OST2"
+ wait_osc_import_state mds ost2 FULL
# Wait for client to detect down OST
stop_ost2 || error "Unable to stop OST2"
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- while [ "${CONN_STATE}" = "FULL" ]; do
- sleep 1
- CONN_STATE=`lctl get_param -n $CONN_PROC | cut -f2`
- done
-
+ wait_osc_import_state mds ost2 DISCONN
mount_client $MOUNT || error "Unable to mount client"
-
+ lctl set_param llite.$FSNAME-*.lazystatfs=0
+
multiop_bg_pause $MOUNT _f
RC1=$?
pid=$!
sleep $(( $TIMEOUT+1 ))
kill -0 $pid
[ $? -ne 0 ] && error "process isn't sleep"
- start_ost2 || error "Unable to start OST1"
+ start_ost2 || error "Unable to start OST2"
wait $pid || error "statfs failed"
+ stop_ost2 || error "Unable to stop OST2"
fi
umount_client $MOUNT || error "Unable to unmount client"
- stop_ost || error "Unable to stop OST1"
+ stop_ost || error "Unable to stop OST1"
stop_mds || error "Unable to stop MDS"
writeconf
}
run_test 50f "normal statfs one server in down =========================="
+test_50g() {
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ setup
+ start_ost2 || error "Unable to start OST2"
+
+ local PARAM="${FSNAME}-OST0001.osc.active"
+
+ $LFS setstripe -c -1 $DIR/$tfile || error "Unable to lfs setstripe"
+ do_facet mgs $LCTL conf_param $PARAM=0 || error "Unable to deactivate OST"
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ mount_client $MOUNT || error "Unable to mount client"
+ # This df should not cause a panic
+ df -k $MOUNT
+
+ do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
+ rm -f $DIR/$tfile
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost2 || error "Unable to stop OST2"
+ stop_ost || error "Unable to stop OST1"
+ stop_mds || error "Unable to stop MDS"
+ writeconf
+}
+run_test 50g "deactivated OST should not cause panic====================="
+
+test_51() {
+ local LOCAL_TIMEOUT=20
+
+ reformat
+ setup_noconfig
+ check_mount || return 1
+
+ mkdir $MOUNT/d1
+ $LFS setstripe -c -1 $MOUNT/d1
+ #define OBD_FAIL_MDS_REINT_DELAY 0x142
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x142"
+ touch $MOUNT/d1/f1 &
+ local pid=$!
+ sleep 2
+ start_ost2 || return 2
+ wait $pid
+ stop_ost2 || return 3
+ cleanup
+}
+run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
+
+copy_files_xattrs()
+{
+ local node=$1
+ local dest=$2
+ local xattrs=$3
+ shift 3
+
+ do_node $node mkdir -p $dest
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
+
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
+ [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
+}
+
+diff_files_xattrs()
+{
+ local node=$1
+ local backup=$2
+ local xattrs=$3
+ shift 3
+
+ local backup2=${TMP}/backup2
+
+ do_node $node mkdir -p $backup2
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
+
+ do_node $node "diff -rq $backup $backup2"
+ [ $? -eq 0 ] || { error "contents differ"; return 3; }
+
+ local xattrs2=${TMP}/xattrs2
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
+ [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
+
+ do_node $node "diff $xattrs $xattrs2"
+ [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
+
+ do_node $node "rm -rf $backup2 $xattrs2"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
+}
+
+test_52() {
+ start_mds
+ [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
+
+ local nrfiles=8
+ local ost1mnt=${MOUNT%/*}/ost1
+ local ost1node=$(facet_active_host ost1)
+ local ost1tmp=$TMP/conf52
+
+ mkdir -p $DIR/$tdir
+ [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
+ touch $TMP/modified_first
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; }
+ do_node $ost1node "mkdir -p $ost1tmp && touch $ost1tmp/modified_first"
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; }
+ sleep 1
+
+ $LFS setstripe $DIR/$tdir -c -1 -s 1M
+ [ $? -eq 0 ] || { error "lfs setstripe failed"; return 7; }
+
+ for (( i=0; i < nrfiles; i++ )); do
+ multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c
+ [ $? -eq 0 ] || { error "multiop failed"; return 8; }
+ echo -n .
+ done
+ echo
+
+ # backup files
+ echo backup files to $TMP/files
+ local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
+ copy_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to copy files"; return 9; }
+
+ umount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to umount client"; return 10; }
+ stop_ost
+ [ $? -eq 0 ] || { error "Unable to stop ost1"; return 11; }
+
+ echo mount ost1 as ldiskfs
+ do_node $ost1node mount -t $FSTYPE $ost1_dev $ost1mnt $OST_MOUNT_OPTS
+ [ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
+
+ # backup objects
+ echo backup objects to $ost1tmp/objects
+ local objects=$(do_node $ost1node 'find '$ost1mnt'/O/0 -type f -size +0'\
+ '-newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"')
+ copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; }
+
+ # move objects to lost+found
+ do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
+ [ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
+
+ # recover objects
+ do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found"
+ [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; }
+
+ # compare restored objects against saved ones
+ diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to diff objects"; return 16; }
+
+ do_node $ost1node "umount $ost1_dev"
+ [ $? -eq 0 ] || { error "Unable to umount ost1 as ldiskfs"; return 17; }
+
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start ost1"; return 18; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 19; }
+
+ # compare files
+ diff_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to diff files"; return 20; }
+
+ rm -rf $TMP/files $TMP/file_xattrs
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 21; }
+ do_node $ost1node "rm -rf $ost1tmp"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 22; }
+ cleanup
+}
+run_test 52 "check recovering objects from lost+found"
+
+# Checks threads_min/max/started for some service
+#
+# Arguments: service name (OST or MDT), facet (e.g., ost1, $SINGLEMDS), and a
+# parameter pattern prefix like 'ost.*.ost'.
+thread_sanity() {
+ local modname=$1
+ local facet=$2
+ local parampat=$3
+ local opts=$4
+ local tmin
+ local tmin2
+ local tmax
+ local tmax2
+ local tstarted
+ local paramp
+ local msg="Insane $modname thread counts"
+ shift 4
+
+ setup
+ check_mount || return 41
+
+ # We need to expand $parampat, but it may match multiple parameters, so
+ # we'll pick the first one
+ if ! paramp=$(do_facet $facet "lctl get_param -N ${parampat}.threads_min"|head -1); then
+ error "Couldn't expand ${parampat}.threads_min parameter name"
+ return 22
+ fi
+
+ # Remove the .threads_min part
+ paramp=${paramp%.threads_min}
+
+ # Check for sanity in defaults
+ tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
+ lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
+ lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= tmax ))' || return $?
+
+ # Check that we can lower min/max
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin - 1))"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - 10))"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 25 "$msg" '(($tmin2 == ($tmin - 1) && $tmax2 == ($tmax -10)))' || return $?
+
+ # Check that we can set min/max to the same value
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$tmin"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$tmin"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 26 "$msg" '(($tmin2 == $tmin && $tmax2 == $tmin))' || return $?
+
+ # Check that we can't set max < min
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmin - 1))"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 27 "$msg" '(($tmin <= $tmax2))' || return $?
+
+ # We need to ensure that we get the module options desired; to do this
+ # we set LOAD_MODULES_REMOTE=true and we call setmodopts below.
+ LOAD_MODULES_REMOTE=true
+ cleanup
+ local oldvalue
+ setmodopts -a $modname "$opts" oldvalue
+
+ load_modules
+ setup
+ check_mount || return 41
+
+ # Restore previous setting of MODOPTS_*
+ setmodopts $modname "$oldvalue"
+
+ # Check that $opts took
+ tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min")
+ tmax=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max")
+ tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started")
+ lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $?
+ cleanup
+
+ # Workaround a YALA bug where YALA expects that modules will remain
+ # loaded on the servers
+ LOAD_MODULES_REMOTE=false
+ load_modules
+ setup
+ cleanup
+}
+
+test_53a() {
+ thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads=64'
+}
+run_test 53a "check OSS thread count params"
+
+test_53b() {
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads=64'
+}
+run_test 53b "check MDT thread count params"
+
+if ! combined_mgs_mds ; then
+ stop mgs
+fi
+
+run_llverfs()
+{
+ local dir=$1
+ local partial_arg=""
+ local size=$(df -B G $dir | tail -1 | awk '{print $2}' | sed 's/G//') # Gb
+
+ # Run in partial (fast) mode if the size
+ # of a partition > 10 GB
+ [ $size -gt 10 ] && partial_arg="-p"
+
+ llverfs $partial_arg $dir
+}
+
+test_54a() {
+ do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1)
+ [ $? -eq 0 ] || error "llverdev failed!"
+ reformat_and_config
+}
+run_test 54a "llverdev"
+
+test_54b() {
+ setup
+ run_llverfs $MOUNT
+ [ $? -eq 0 ] || error "llverfs failed!"
+ cleanup
+}
+run_test 54b "llverfs"
+
+lov_objid_size()
+{
+ local max_ost_index=$1
+ echo -n $(((max_ost_index + 1) * 8))
+}
+
+test_55() {
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+ local saved_opts=$OST_MKFS_OPTS
+
+ for i in 0 1023 2048
+ do
+ OST_MKFS_OPTS="$saved_opts --index $i"
+ reformat
+
+ setup_noconfig
+ stopall
+
+ setup
+ sync
+ echo checking size of lov_objid for ost index $i
+ LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
+ if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
+ error "lov_objid size has to be $(lov_objid_size $i), not $LOV_OBJID_SIZE"
+ else
+ echo ok, lov_objid size is correct: $LOV_OBJID_SIZE
+ fi
+ stopall
+ done
+
+ OST_MKFS_OPTS=$saved_opts
+ reformat
+}
+run_test 55 "check lov_objid size"
+
+test_56() {
+ add mds1 $MDS_MKFS_OPTS --mkfsoptions='\"-J size=16\"' --reformat $(mdsdevname 1)
+ add ost1 $OST_MKFS_OPTS --index=1000 --reformat $(ostdevname 1)
+ add ost2 $OST_MKFS_OPTS --index=10000 --reformat $(ostdevname 2)
+
+ start_mds
+ start_ost
+ start_ost2 || error "Unable to start second ost"
+ mount_client $MOUNT || error "Unable to mount client"
+ echo ok
+ $LFS osts
+ stopall
+ reformat
+}
+run_test 56 "check big indexes"
cleanup_gss
equals_msg `basename $0`: test complete