ONLY=${ONLY:-"$*"}
-# bug number for skipped test: 13739
-HEAD_EXCEPT=" 32a"
-
# bug number for skipped test:
-ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT $HEAD_EXCEPT"
+ALWAYS_EXCEPT=" $CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
SRCDIR=`dirname $0`
[ $SPACE -gt $((MIN / 20)) ] && OK=1 && myMDSSIZE=$MIN && \
log "use file $MDSDEV with MIN=$MIN"
- [ -z "$OK" ] && skip "$MDSDEV too small for ${MIN}kB MDS" && return
+ [ -z "$OK" ] && skip_env "$MDSDEV too small for ${MIN}kB MDS" && return
echo "mount mds with large journal..."
stop_ost
mount_client $MOUNT
# check_mount will block trying to contact ost
+ mcreate $DIR/$tfile || return 40
+ rm -f $DIR/$tfile || return 42
umount_client $MOUNT
pass
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
test_24b() {
if [ -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
run_test 28 "permanent parameter setting"
test_29() {
- [ "$OSTCOUNT" -lt "2" ] && skip "$OSTCOUNT < 2, skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
setup > /dev/null 2>&1
start_ost2
sleep 10
echo "Starting local ${facet}: $@ $device ${MOUNT%/*}/${facet}"
mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
- RC=$?
+ local RC=$?
if [ $RC -ne 0 ]; then
echo "mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}"
echo "Start of ${device} of local ${facet} failed ${RC}"
}
test_32a() {
- # this test is totally useless on a client-only system
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip "No tunefs" && return 0
- local DISK1_8=$LUSTRE/tests/disk1_8.tgz
- [ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
+ [ -z "$TUNEFS" ] && skip_env "No tunefs" && return 0
- mkdir -p $TMP/$tdir
- tar xjvf $DISK1_8 -C $TMP/$tdir || \
- { skip "Cannot untar $DISK1_8" && return 0; }
+ local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
+ [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
+ local tmpdir=$TMP/conf32a
+ mkdir -p $tmpdir
+ tar xjvf $DISK1_8 -C $tmpdir || \
+ { skip_env "Cannot untar $DISK1_8" && return 0; }
load_modules
- lctl set_param debug=$PTLDEBUG
+ $LCTL set_param debug=$PTLDEBUG
$TUNEFS $tmpdir/mds || error "tunefs failed"
# nids are wrong, so client wont work, but server should start
- start32 mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
trap cleanup_32 EXIT INT || return 3
- local UUID=$(lctl get_param -n mds.lustre-MDT0000.uuid)
+ local UUID=$($LCTL get_param -n mdt.lustre-MDT0000.uuid)
echo MDS uuid $UUID
- [ "$UUID" == "mdsA_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` $tmpdir/ost1 || error "tunefs failed"
+ $TUNEFS --mgsnode=$HOSTNAME $tmpdir/ost1 || error "tunefs failed"
start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.lustre-OST0000.uuid)
+ UUID=$($LCTL get_param -n obdfilter.lustre-OST0000.uuid)
echo OST uuid $UUID
- [ "$UUID" == "ost1_UUID" ] || error "UUID is wrong: $UUID"
+ [ "$UUID" == "lustre-OST0000_UUID" ] || error "UUID is wrong: $UUID"
local NID=$($LCTL list_nids | head -1)
- echo "OSC changes should return err:"
- $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 && return 7
- $LCTL conf_param lustre-OST0000.failover.node=$NID && return 8
+ echo "OSC changes should succeed:"
+ $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
+ $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
echo "ok."
+
echo "MDC changes should succeed:"
$LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
$LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
echo "ok."
- # With a new good MDT failover nid, we should be able to mount a client
- # (but it cant talk to OST)
- local mountopt="-o exclude=lustre-OST0000"
-
- local device=`h2$NETTYPE $HOSTNAME`:/lustre
- echo "Starting local client: $HOSTNAME: $mountopt $device $MOUNT"
- mount -t lustre $mountopt $device $MOUNT || return 1
-
- local old=$(lctl get_param -n mdc.*.max_rpcs_in_flight)
- local new=$((old + 5))
- lctl conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=$new
- wait_update $HOSTNAME "lctl get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+ echo "LOV changes should succeed:"
+ $LCTL pool_new lustre.interop || return 11
+ $LCTL conf_param lustre-MDT0000.lov.stripesize=4M || return 12
+ echo "ok."
cleanup_32
# mount a second time to make sure we didnt leave upgrade flag on
load_modules
$TUNEFS --dryrun $tmpdir/mds || error "tunefs failed"
- start32 mds $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
+ start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
trap cleanup_32 EXIT INT || return 12
cleanup_32
run_test 32a "Upgrade from 1.8 (not live)"
test_32b() {
- # this test is totally useless on a client-only system
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip "No tunefs" && return
+ [ -z "$TUNEFS" ] && skip_env "No tunefs" && return
local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
- [ ! -r $DISK1_8 ] && skip "Cannot find $DISK1_8" && return 0
- local tmpdir=$TMP/$tdir
+ [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
+ local tmpdir=$TMP/conf32b
mkdir -p $tmpdir
tar xjvf $DISK1_8 -C $tmpdir || \
- { skip "Cannot untar $DISK1_8" && return ; }
+ { skip_env "Cannot untar $DISK1_8" && return ; }
load_modules
- lctl set_param debug=$PTLDEBUG
+ $LCTL set_param debug=$PTLDEBUG
local NEWNAME=lustre
# writeconf will cause servers to register with their current nids
start32 mds1 $tmpdir/mds "-o loop" && \
trap cleanup_32 EXIT INT || return 3
- local UUID=$(lctl get_param -n mdt.${NEWNAME}-MDT0000.uuid)
+ local UUID=$($LCTL get_param -n mdt.${NEWNAME}-MDT0000.uuid)
echo MDS uuid $UUID
[ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
- $TUNEFS --mgsnode=`hostname` --writeconf --fsname=$NEWNAME $tmpdir/ost1 || error "tunefs failed"
+ $TUNEFS --mgsnode=$HOSTNAME --writeconf --fsname=$NEWNAME $tmpdir/ost1 || error "tunefs failed"
start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$(lctl get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
+ UUID=$($LCTL get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
echo OST uuid $UUID
[ "$UUID" == "${NEWNAME}-OST0000_UUID" ] || error "UUID is wrong: $UUID"
+ local NID=$($LCTL list_nids | head -1)
+
echo "OSC changes should succeed:"
$LCTL conf_param ${NEWNAME}-OST0000.osc.max_dirty_mb=15 || return 7
$LCTL conf_param ${NEWNAME}-OST0000.failover.node=$NID || return 8
echo "ok."
+
echo "MDC changes should succeed:"
$LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
+ $LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
+ echo "ok."
+
+ echo "LOV changes should succeed:"
+ $LCTL pool_new lustre.interop || return 11
+ $LCTL conf_param lustre-MDT0000.lov.stripesize=4M || return 12
echo "ok."
# MDT and OST should have registered with new nids, so we should have
echo "Starting local client: $HOSTNAME: $device $MOUNT"
mount -t lustre $device $MOUNT || return 1
- local old=$(lctl get_param -n mdc.*.max_rpcs_in_flight)
+ local old=$($LCTL get_param -n mdc.*.max_rpcs_in_flight)
local new=$((old + 5))
- lctl conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
- wait_update $HOSTNAME "lctl get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+ $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
+ wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
[ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "94306271 1478" ] || return 12
echo "ok."
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
}
run_test 34c "force umount with failed ost should be normal"
-test_35() { # bug 12459
+test_35a() { # bug 12459
setup
DBG_SAVE="`lctl get_param -n debug`"
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
- MSG="conf-sanity.sh test_35 `date +%F%kh%Mm%Ss`"
+ MSG="conf-sanity.sh test_35a `date +%F%kh%Mm%Ss`"
$LCTL clear
log "$MSG"
log "Stopping the MDT:"
[ "$NEXTCONN" != "0" ] && log "The client didn't try to reconnect to the last active server (tried ${NEXTCONN} instead)" && return 7
cleanup
}
-run_test 35 "Reconnect to the last active server first"
+run_test 35a "Reconnect to the last active server first"
+
+test_35b() { # bug 18674
+ remote_mds || { skip "local MDS" && return 0; }
+ setup
+
+ debugsave
+ $LCTL set_param debug="ha"
+ $LCTL clear
+ MSG="conf-sanity.sh test_35b `date +%F%kh%Mm%Ss`"
+ log "$MSG"
+
+ log "Set up a fake failnode for the MDS"
+ FAKENID="127.0.0.2"
+ local device=$(do_facet mds "$LCTL get_param -n devices" | \
+ awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
+ do_facet mds "$LCTL conf_param ${device}.failover.node=$FAKENID" || \
+ return 1
+
+ local at_max_saved=0
+ # adaptive timeouts may prevent seeing the issue
+ if at_is_enabled; then
+ at_max_saved=$(at_max_get mds)
+ at_max_set 0 mds client
+ fi
+
+ mkdir -p $MOUNT/testdir
+ touch $MOUNT/testdir/test
+
+ log "Injecting EBUSY on MDS"
+ # Setting OBD_FAIL_MDS_RESEND=0x136
+ do_facet mds "$LCTL set_param fail_loc=0x80000136" || return 2
+
+ log "Stat on a test file"
+ stat $MOUNT/testdir/test
+
+ log "Stop injecting EBUSY on MDS"
+ do_facet mds "$LCTL set_param fail_loc=0" || return 3
+ rm -f $MOUNT/testdir/test
+
+ log "done"
+ # restore adaptive timeout
+ [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds client
+
+ $LCTL dk $TMP/lustre-log-$TESTNAME.log
+
+ # retrieve from the log if the client has ever tried to
+ # contact the fake server after the loss of connection
+ FAILCONN=`awk "BEGIN {ret = 0;}
+ /import_select_connection.*${FSNAME}-MDT0000-mdc.* using connection/ {
+ ret = 1;
+ if (\\\$NF ~ /$FAKENID/) {
+ ret = 2;
+ exit;
+ }
+ }
+ END {print ret}" $TMP/lustre-log-$TESTNAME.log`
+
+ [ "$FAILCONN" == "0" ] && \
+ log "ERROR: The client reconnection has not been triggered" && \
+ return 4
+ [ "$FAILCONN" == "2" ] && \
+ log "ERROR: The client tried to reconnect to the failover server while the primary was busy" && \
+ return 5
+
+ cleanup
+}
+run_test 35b "Continue reconnection retries, if the active server is busy"
test_36() { # 12743
local rc
if [ -z "$fs2ost_DEV" -o -z "$fs2mds_DEV" -o -z "$fs3ost_DEV" ]; then
do_facet $SINGLEMDS [ -b "$MDSDEV" ] && \
- skip "mixed loopback and real device not working" && return
+ skip_env "mixed loopback and real device not working" && return
fi
- [ $OSTCOUNT -lt 2 ] && skip "skipping test for single OST" && return
+ [ $OSTCOUNT -lt 2 ] && skip_env "skipping test for single OST" && return
[ "$ost_HOST" = "`hostname`" -o "$ost1_HOST" = "`hostname`" ] || \
{ skip "remote OST" && return 0; }
run_test 36 "df report consistency on OSTs with different block size"
test_37() {
- [ -n "$CLIENTONLY" -o -n "$CLIENTMODSONLY" ] && skip "client only testing" && return 0
+ client_only && skip "client only testing" && return 0
LOCAL_MDSDEV="$TMP/mdt.img"
SYM_MDSDEV="$TMP/sym_mdt.img"
run_test 42 "invalid config param should not prevent client from mounting"
test_43() {
- [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip "run as root"
+ [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
setup
chmod ugo+x $DIR || error "chmod 0 failed"
set_and_check mds \
}
run_test 45 "long unlink handling in ptlrpcd"
+cleanup_46a() {
+ trap 0
+ local rc=0
+ local count=$1
+
+ umount_client $MOUNT2 || rc=$?
+ umount_client $MOUNT || rc=$?
+ while [ $count -gt 0 ]; do
+ stop ost${count} -f || rc=$?
+ let count=count-1
+ done
+ stop_mds || rc=$?
+ # writeconf is needed after the test, otherwise,
+ # we might end up with extra OSTs
+ writeconf || rc=$?
+ cleanup_nocli || rc=$?
+ return $rc
+}
+
test_46a() {
- [ $OSTCOUNT -lt 6 ] && skip "skipping test for too few OSTs" && return
+ echo "Testing with $OSTCOUNT OSTs"
reformat
start_mds || return 1
#first client should see only one ost
wait_osc_import_state mds ost FULL
#start_client
mount_client $MOUNT || return 3
+ trap "cleanup_46a $OSTCOUNT" EXIT ERR
+
+ local i
+ for (( i=2; i<=$OSTCOUNT; i++ )); do
+ start ost$i `ostdevname $i` $OST_MOUNT_OPTS || return $((i+2))
+ done
+
+ # wait until osts in sync
+ for (( i=2; i<=$OSTCOUNT; i++ )); do
+ wait_osc_import_state mds ost$i FULL
+ done
+
- start_ost2 || return 4
- start ost3 `ostdevname 3` $OST_MOUNT_OPTS || return 5
- start ost4 `ostdevname 4` $OST_MOUNT_OPTS || return 6
- start ost5 `ostdevname 5` $OST_MOUNT_OPTS || return 7
- # wait until ost2-5 is sync
- # ping_interval + 1
- wait_osc_import_state mds ost2 FULL
- wait_osc_import_state mds ost3 FULL
- wait_osc_import_state mds ost4 FULL
- wait_osc_import_state mds ost5 FULL
#second client see all ost's
mount_client $MOUNT2 || return 8
# will be deadlock
stat $MOUNT/widestripe || return 12
- umount_client $MOUNT2 || return 13
- umount_client $MOUNT || return 14
- stop ost5 -f || return 20
- stop ost4 -f || return 21
- stop ost3 -f || return 22
- stop_ost2 || return 23
- stop_ost || return 24
- stop_mds || return 25
+ cleanup_46a $OSTCOUNT || { echo "cleanup_46a failed!" && return 13; }
+ return 0
}
run_test 46a "handle ost additional - wide striped file"
}
run_test 50f "normal statfs one server in down =========================="
+test_50g() {
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2, skipping" && return
+ setup
+ start_ost2 || error "Unable to start OST2"
+
+ local PARAM="${FSNAME}-OST0001.osc.active"
+
+ $LFS setstripe -c -1 $DIR/$tfile || error "Unable to lfs setstripe"
+ do_facet mgs $LCTL conf_param $PARAM=0 || error "Unable to deactivate OST"
+
+ umount_client $MOUNT || error "Unable to unmount client"
+ mount_client $MOUNT || error "Unable to mount client"
+ # This df should not cause a panic
+ df -k $MOUNT
+
+ do_facet mgs $LCTL conf_param $PARAM=1 || error "Unable to activate OST"
+ rm -f $DIR/$tfile
+ umount_client $MOUNT || error "Unable to unmount client"
+ stop_ost2 || error "Unable to stop OST2"
+ stop_ost || error "Unable to stop OST1"
+ stop_mds || error "Unable to stop MDS"
+ writeconf
+}
+run_test 50g "deactivated OST should not cause panic====================="
+
test_51() {
local LOCAL_TIMEOUT=20
}
run_test 51 "Verify that mdt_reint handles RMF_MDT_MD correctly when an OST is added"
+copy_files_xattrs()
+{
+ local node=$1
+ local dest=$2
+ local xattrs=$3
+ shift 3
+
+ do_node $node mkdir -p $dest
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$dest';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files"; return 2; }
+
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs
+ [ $? -eq 0 ] || { error "Unable to read xattrs"; return 3; }
+}
+
+diff_files_xattrs()
+{
+ local node=$1
+ local backup=$2
+ local xattrs=$3
+ shift 3
+
+ local backup2=${TMP}/backup2
+
+ do_node $node mkdir -p $backup2
+ [ $? -eq 0 ] || { error "Unable to create directory"; return 1; }
+
+ do_node $node 'tar cf - '$@' | tar xf - -C '$backup2';
+ [ \"\${PIPESTATUS[*]}\" = \"0 0\" ] || exit 1'
+ [ $? -eq 0 ] || { error "Unable to tar files to diff"; return 2; }
+
+ do_node $node "diff -rq $backup $backup2"
+ [ $? -eq 0 ] || { error "contents differ"; return 3; }
+
+ local xattrs2=${TMP}/xattrs2
+ do_node $node 'getfattr -d -m "[a-z]*\\." '$@' > '$xattrs2
+ [ $? -eq 0 ] || { error "Unable to read xattrs to diff"; return 4; }
+
+ do_node $node "diff $xattrs $xattrs2"
+ [ $? -eq 0 ] || { error "xattrs differ"; return 5; }
+
+ do_node $node "rm -rf $backup2 $xattrs2"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 6; }
+}
+
+test_52() {
+ start_mds
+ [ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start OST1"; return 2; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 3; }
+
+ local nrfiles=8
+ local ost1mnt=${MOUNT%/*}/ost1
+ local ost1node=$(facet_active_host ost1)
+ local ost1tmp=$TMP/conf52
+
+ mkdir -p $DIR/$tdir
+ [ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
+ touch $TMP/modified_first
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 5; }
+ do_node $ost1node "mkdir -p $ost1tmp && touch $ost1tmp/modified_first"
+ [ $? -eq 0 ] || { error "Unable to create temporary file"; return 6; }
+ sleep 1
+
+ $LFS setstripe $DIR/$tdir -c -1 -s 1M
+ [ $? -eq 0 ] || { error "lfs setstripe failed"; return 7; }
+
+ for (( i=0; i < nrfiles; i++ )); do
+ multiop $DIR/$tdir/$tfile-$i Ow1048576w1048576w524288c
+ [ $? -eq 0 ] || { error "multiop failed"; return 8; }
+ echo -n .
+ done
+ echo
+
+ # backup files
+ echo backup files to $TMP/files
+ local files=$(find $DIR/$tdir -type f -newer $TMP/modified_first)
+ copy_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to copy files"; return 9; }
+
+ umount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to umount client"; return 10; }
+ stop_ost
+ [ $? -eq 0 ] || { error "Unable to stop ost1"; return 11; }
+
+ echo mount ost1 as ldiskfs
+ do_node $ost1node mount -t $FSTYPE $ost1_dev $ost1mnt $OST_MOUNT_OPTS
+ [ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
+
+ # backup objects
+ echo backup objects to $ost1tmp/objects
+ local objects=$(do_node $ost1node 'find '$ost1mnt'/O/0 -type f -size +0'\
+ '-newer '$ost1tmp'/modified_first -regex ".*\/[0-9]+"')
+ copy_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to copy objects"; return 13; }
+
+ # move objects to lost+found
+ do_node $ost1node 'mv '$objects' '${ost1mnt}'/lost+found'
+ [ $? -eq 0 ] || { error "Unable to move objects"; return 14; }
+
+ # recover objects
+ do_node $ost1node "ll_recover_lost_found_objs -d $ost1mnt/lost+found"
+ [ $? -eq 0 ] || { error "ll_recover_lost_found_objs failed"; return 15; }
+
+ # compare restored objects against saved ones
+ diff_files_xattrs $ost1node $ost1tmp/objects $ost1tmp/object_xattrs $objects
+ [ $? -eq 0 ] || { error "Unable to diff objects"; return 16; }
+
+ do_node $ost1node "umount $ost1_dev"
+ [ $? -eq 0 ] || { error "Unable to umount ost1 as ldiskfs"; return 17; }
+
+ start_ost
+ [ $? -eq 0 ] || { error "Unable to start ost1"; return 18; }
+ mount_client $MOUNT
+ [ $? -eq 0 ] || { error "Unable to mount client"; return 19; }
+
+ # compare files
+ diff_files_xattrs `hostname` $TMP/files $TMP/file_xattrs $files
+ [ $? -eq 0 ] || { error "Unable to diff files"; return 20; }
+
+ rm -rf $TMP/files $TMP/file_xattrs
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 21; }
+ do_node $ost1node "rm -rf $ost1tmp"
+ [ $? -eq 0 ] || { error "Unable to delete temporary files"; return 22; }
+ cleanup
+}
+run_test 52 "check recovering objects from lost+found"
+
cleanup_gss
equals_msg `basename $0`: test complete
[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true