set -e
-# bug 5493 LU2034
-ALWAYS_EXCEPT="52 60 $RECOVERY_SMALL_EXCEPT"
-
-export MULTIOP=${MULTIOP:-multiop}
PTLDEBUG=${PTLDEBUG:--1}
-LUSTRE=${LUSTRE:-`dirname $0`/..}
+LUSTRE=${LUSTRE:-$(dirname $0)/..}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
-require_dsh_mds || exit 0
-
-# also long tests: 19, 21a, 21e, 21f, 23, 27
-# 1 2.5 2.5 4 4 (min)"
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="17 26a 26b 50 51 57"
-
-[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
-# bug number for skipped test: LU-2547
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 24a 24b"
+ALWAYS_EXCEPT="$RECOVERY_SMALL_EXCEPT "
build_test_filter
+require_dsh_mds || exit 0
+
# Allow us to override the setup if we already have a mounted system by
# setting SETUP=" " and CLEANUP=" "
SETUP=${SETUP:-""}
local before=$(date +%s)
local evict
- [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.6.53) ]] &&
- skip "Need MDS version at least 2.6.53" && return
+ [[ "$MDS1_VERSION" -lt $(version_code 2.6.53) ]] &&
+ skip "Need MDS version at least 2.6.53"
do_facet client "stat $DIR > /dev/null" ||
error "failed to stat $DIR: $?"
drop_bl_callback_once "chmod 0777 $DIR" ||
awk '{sub("_UUID", "", $2); print $2;}')
#assume one client
mdccli=$($LCTL dl | grep "${mdtname}-mdc" | awk '{print $4;}')
- conn_uuid=$($LCTL get_param -n mdc.${mdccli}.mds_conn_uuid)
+ conn_uuid=$($LCTL get_param -n mdc.${mdccli}.conn_uuid)
mdcpath="mdc.${mdccli}.import=connection=${conn_uuid}"
drop_bl_callback_once "chmod 0777 ${workdir}" &
local before=$(date +%s)
local evict
- [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.6.90) ]] &&
- skip "Need MDS version at least 2.6.90" && return
+ [[ "$MDS1_VERSION" -lt $(version_code 2.6.90) ]] &&
+ skip "Need MDS version at least 2.6.90"
# sleep 1 is to make sure that BEFORE is not equal to EVICTED below
sleep 1
rm -f $TMP/$tfile
echo -n ", world" | dd of=$TMP/$tfile bs=1c seek=5
+ remount_client $MOUNT
mount_client $MOUNT2
cancel_lru_locks osc
$LFS setstripe -i 0 -c 1 $DIR1/$tfile
- echo -n hello > $DIR1/$tfile
+ echo -n hello | dd of=$DIR1/$tfile bs=5
stat $DIR2/$tfile >& /dev/null
$LCTL set_param fail_err=71
client_reconnect
- cmp $DIR1/$tfile $DIR2/$tfile || error "file contents differ"
- cmp $DIR1/$tfile $TMP/$tfile || error "wrong content found"
+ cancel_lru_locks osc
+ cmp -l $DIR1/$tfile $DIR2/$tfile || error "file contents differ"
+ cmp -l $DIR1/$tfile $TMP/$tfile || error "wrong content found"
evict=$(do_facet client $LCTL get_param osc.$FSNAME-OST0000*.state | \
tr -d '\-\[\] ' | \
}
run_test 10d "test failed blocking ast"
+test_10e()
+{
+ [[ "$OST1_VERSION" -le $(version_code 2.8.58) ]] &&
+ skip "Need OST version at least 2.8.59"
+ [ $CLIENTCOUNT -lt 2 ] && skip "need two clients"
+ [ $(facet_host client) == $(facet_host ost1) ] &&
+ skip "need ost1 and client on different nodes"
+ local -a clients=(${CLIENTS//,/ })
+ local client1=${clients[0]}
+ local client2=${clients[1]}
+
+ $LFS setstripe -c 1 -i 0 $DIR/$tfile-1 $DIR/$tfile-2
+ $MULTIOP $DIR/$tfile-1 Ow1048576c
+
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
+ $LCTL set_param fail_loc=0x80000305
+
+#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e
+ do_facet ost1 "$LCTL set_param fail_loc=0x1000030e"
+ # hit OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT twice:
+ # 1. to return ENOTCONN from ldlm_handle_enqueue0
+ # 2. to pause reconnect handling between resend and setting
+ # import to LUSTRE_IMP_FULL state
+ do_facet ost1 "$LCTL set_param fail_val=3"
+
+ # client1 fails ro respond to bl ast
+ do_node $client2 "$MULTIOP $DIR/$tfile-1 Ow1048576c" &
+ MULTIPID=$!
+
+ # ost1 returns error on enqueue, which causes client1 to reconnect
+ do_node $client1 "$MULTIOP $DIR/$tfile-2 Ow1048576c" ||
+ error "multiop failed"
+ wait $MULTIPID
+
+ do_facet ost1 "$LCTL set_param fail_loc=0"
+ do_facet ost1 "$LCTL set_param fail_val=0"
+}
+run_test 10e "re-send BL AST vs reconnect race 2"
+
#bug 2460
# wake up a thread waiting for completion after eviction
test_11(){
}
run_test 16 "timeout bulk put, don't evict client (2732)"
-test_17() {
+test_17a() {
local at_max_saved=0
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
[ $at_max_saved -ne 0 ] && at_max_set $at_max_saved ost1
return 0
}
-run_test 17 "timeout bulk get, don't evict client (2732)"
+run_test 17a "timeout bulk get, don't evict client (2732)"
+
+test_17b() {
+ [ -z "$RCLIENTS" ] && skip "Needs multiple clients" && return 0
+
+ # get one of the clients from client list
+ local rcli=$(echo $RCLIENTS | cut -d ' ' -f 1)
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+ local ldlm_enqueue_min=$(do_facet ost1 find /sys -name ldlm_enqueue_min)
+ [ -z "$ldlm_enqueue_min" ] &&
+ skip "missing /sys/.../ldlm_enqueue_min" && return 0
+
+ $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/$tfile ||
+ error "setstripe failed"
+ $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/${tfile}2 ||
+ error "setstripe 2 failed"
+
+ save_lustre_params ost1 "at_history" > $p
+ save_lustre_params ost1 "bulk_timeout" >> $p
+ local dev="${FSNAME}-OST0000"
+ save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p
+ local ldlm_enqueue_min_save=$(do_facet ost1 cat $ldlm_enqueue_min)
+
+ local new_at_history=15
+
+ do_facet ost1 "$LCTL set_param at_history=$new_at_history"
+ do_facet ost1 "$LCTL set_param bulk_timeout=30"
+ do_facet ost1 "echo 30 > /sys/module/ptlrpc/parameters/ldlm_enqueue_min"
+
+ # brw_size is required to be 4m so that bulk transfer timeout
+ # could be simulated with OBD_FAIL_PTLRPC_CLIENT_BULK_CB3
+ local brw_size=$($LCTL get_param -n \
+ osc.$FSNAME-OST0000-osc-[^M]*.import |
+ awk '/max_brw_size/{print $2}')
+ if [ $brw_size -ne 4194304 ]
+ then
+ save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p
+ do_facet ost1 "$LCTL set_param obdfilter.$dev.brw_size=4"
+ remount_client $MOUNT
+ fi
+
+ # get service estimate expanded
+ #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
+ do_facet ost1 "$LCTL set_param fail_loc=0x80000224 fail_val=35"
+ echo "delay rpc servicing by 35 seconds"
+ dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=fdatasync
+
+ local estimate
+ estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts |
+ awk '/portal 6/ {print $5}')
+ echo "service estimates increased to $estimate"
+
+ # let current worst service estimate to get closer to obliteration
+ sleep $((new_at_history / 3))
+
+ # start i/o and simulate bulk transfer loss
+ #define OBD_FAIL_PTLRPC_CLIENT_BULK_CB3 0x520
+ do_facet ost1 "$LCTL set_param fail_loc=0xa0000520 fail_val=1"
+ dd if=/dev/zero of=$DIR/$tfile bs=2M count=1 conv=fdatasync,notrunc &
+ local writedd=$!
+
+ # start lock conflict handling
+ sleep $((new_at_history / 3))
+ do_node $rcli "dd if=$DIR/$tfile of=/dev/null bs=1M count=1" &
+ local readdd=$!
+
+ # obliterate the worst service estimate
+ sleep $((new_at_history / 3 + 1))
+ dd if=/dev/zero of=$DIR/${tfile}2 bs=1M count=1
+
+ estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts |
+ awk '/portal 6/ {print $5}')
+ echo "service estimate dropped to $estimate"
+
+ wait $writedd
+ [[ $? == 0 ]] || error "write failed"
+ wait $readdd
+ [[ $? == 0 ]] || error "read failed"
+
+ restore_lustre_params <$p
+ if [ $brw_size -ne 4194304 ]
+ then
+ remount_client $MOUNT || error "remount_client failed"
+ fi
+ do_facet ost1 "echo $ldlm_enqueue_min_save > $ldlm_enqueue_min"
+}
+run_test 17b "timeout bulk get, dont evict client (3582)"
test_18a() {
[ -z ${ost2_svc} ] && skip_env "needs 2 osts" && return 0
do_facet ost1 lctl set_param fail_loc=0x80000225
# force reconnect
sleep 1
- df $MOUNT > /dev/null 2>&1
+ $LFS df $MOUNT > /dev/null 2>&1
sleep 2
# my understanding is that there should be nothing in the page
- # cache after the client reconnects?
+ # cache after the client reconnects?
rc=0
pgcache_empty || rc=2
rm -f $f $TMP/$tfile
dmesg -c > /dev/null
mkdir -p $DIR/$tdir
- lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1
+ lfs setstripe $DIR/$tdir -S 0 -i 0 -c 1 ||
+ error "$LFS setstripe failed"
cancel_lru_locks osc
multiop_bg_pause $DIR/$tdir/$tfile-1 Ow8192_yc ||
error "mulitop Ow8192_yc failed"
check_timeout || return 1
- local OST_NEXP=$(do_facet ost1 lctl get_param -n obdfilter.${ost1_svc}.num_exports | cut -d' ' -f2)
-
- echo starting with $OST_NEXP OST exports
+ # make sure all imports are connected and not IDLE
+ do_facet client lfs df > /dev/null
# OBD_FAIL_PTLRPC_DROP_RPC 0x505
do_facet client lctl set_param fail_loc=0x505
- # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
- # But if there's a race to start the evictor from various obds,
- # the loser might have to wait for the next ping.
-
+ local before=$(date +%s)
local rc=0
- wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
- rc=$?
+
+ # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
+ # But if there's a race to start the evictor from various obds,
+ # the loser might have to wait for the next ping.
+ sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
do_facet client lctl set_param fail_loc=0x0
- [ $rc -eq 0 ] || error "client not evicted from OST"
+ do_facet client lfs df > /dev/null
+
+ local oscs=$(lctl dl | awk '/-osc-/ {print $4}')
+ check_clients_evicted $before ${oscs[@]}
+ check_clients_full 10 ${oscs[@]}
}
run_test 26a "evict dead exports"
# = 9 * PING_INTERVAL + PING_INTERVAL
# = 10 PING_INTERVAL = 10 obd_timeout / 4 = 2.5 obd_timeout
# let's wait $((TIMEOUT * 3)) # bug 19887
- local rc=0
- wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || \
- error "Client was not evicted by ost" rc=1
- wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || \
+ wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) ||
+ error "Client was not evicted by ost"
+ wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) ||
error "Client was not evicted by mds"
}
run_test 26b "evict dead exports"
for i in $SEQ
do
#echo failover in $i sec
- log "test_$testnum: failover in $i sec"
+ log "$TESTNAME: failover in $i sec"
sleep $i
facet_failover $SINGLEMDS
done
# test of open reconstruct
test_53() {
touch $DIR/$tfile
- drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
+ drop_mdt_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
return 2
}
run_test 53 "touch: drop rep"
test_54() {
- zconf_mount `hostname` $MOUNT2
- touch $DIR/$tfile
- touch $DIR2/$tfile.1
- sleep 10
- cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
- fail $SINGLEMDS
- umount $MOUNT2
- ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
- [ x"$ERROR" == x ] || error "back in time occured"
+ zconf_mount $(hostname) $MOUNT2
+ touch $DIR/$tfile
+ touch $DIR2/$tfile.1
+ sleep 10
+ cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
+ fail $SINGLEMDS
+ umount $MOUNT2
+ ERROR=$(dmesg | egrep "(test 54|went back in time)" | tail -n1 |
+ grep "went back in time")
+ [ x"$ERROR" == x ] || error "back in time occured"
}
run_test 54 "back in time"
mkdir -p $DIR/$tdir
+ # This test assumes relatively small max_dirty_mb setting
+ # which we want to walk away from, so just for it we will
+ # temporarily lower the value
+ local max_dirty_mb=$(lctl get_param -n osc.*.max_dirty_mb | head -n 1)
+ lctl set_param -n osc.*.max_dirty_mb=32
+ stack_trap "lctl set_param osc.*.max_dirty_mb=$max_dirty_mb" EXIT
+
# Minimum pass speed is 2MBps
local ddtimeout=64
# LU-2887/LU-3089 - set min pass speed to 500KBps
- [ "$(facet_fstype ost1)" = "zfs" ] && ddtimeout=256
+ [ "$ost1_FSTYPE" = zfs ] && ddtimeout=256
# first dd should be finished quickly
$LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-1
}
run_test 59 "Read cancel race on client eviction"
-err17935 () {
- # we assume that all md changes are in the MDT0 changelog
- if [ $MDSCOUNT -gt 1 ]; then
- error_ignore bz17935 $*
- else
- error $*
- fi
-}
-
test_60() {
- MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid |
- awk '{ gsub(/_UUID/,""); print $1 }' | head -n1)
-
- NUM_FILES=15000
- mkdir -p $DIR/$tdir
+ local num_files=${COUNT:-5000}
+ test_mkdir $DIR/$tdir
# Register (and start) changelog
- USER=$(do_facet $SINGLEMDS lctl --device $MDT0 changelog_register -n)
- echo "Registered as $MDT0 changelog user $USER"
+ changelog_register || error "changelog_register failed"
# Generate a large number of changelog entries
- createmany -o $DIR/$tdir/$tfile $NUM_FILES
+ createmany -o $DIR/$tdir/$tfile $num_files
sync
sleep 5
# Unlink files in the background
- unlinkmany $DIR/$tdir/$tfile $NUM_FILES &
- CLIENT_PID=$!
+ unlinkmany $DIR/$tdir/$tfile $num_files &
+ local client_pid=$!
sleep 1
# Failover the MDS while unlinks are happening
facet_failover $SINGLEMDS
# Wait for unlinkmany to finish
- wait $CLIENT_PID
+ wait $client_pid
# Check if all the create/unlink events were recorded
# in the changelog
- $LFS changelog $MDT0 >> $DIR/$tdir/changelog
- local cl_count=$(grep UNLNK $DIR/$tdir/changelog | wc -l)
- echo "$cl_count unlinks in $MDT0 changelog"
-
- do_facet $SINGLEMDS lctl --device $MDT0 changelog_deregister $USER
- USERS=$(( $(do_facet $SINGLEMDS lctl get_param -n \
- mdd.$MDT0.changelog_users | wc -l) - 2 ))
- if [ $USERS -eq 0 ]; then
- [ $cl_count -eq $NUM_FILES ] || \
- err17935 "Recorded ${cl_count} unlinks out of $NUM_FILES"
- # Also make sure we can clear large changelogs
- cl_count=$($LFS changelog $FSNAME | wc -l)
- [ $cl_count -le 2 ] || \
- error "Changelog not empty: $cl_count entries"
- else
- # If there are other users, there may be other unlinks in the log
- [ $cl_count -ge $NUM_FILES ] || \
- err17935 "Recorded ${cl_count} unlinks out of $NUM_FILES"
- echo "$USERS other changelog users; can't verify clear"
+ local cl_count=$(changelog_dump | grep -c UNLNK)
+ echo "$cl_count unlinks in changelog"
+
+ changelog_deregister || error "changelog_deregister failed"
+ if ! changelog_users $SINGLEMDS | grep -q "^cl"; then
+ [ $cl_count -eq $num_files ] ||
+ error "Recorded $cl_count of $num_files unlinks"
+ # Also make sure we can clear large changelogs
+ cl_count=$(changelog_dump | wc -l)
+ [ $cl_count -le 2 ] ||
+ error "Changelog not empty: $cl_count entries"
+ else # If other users, there may be other unlinks in the log
+ [ $cl_count -ge $num_files ] ||
+ error "Recorded $cl_count of $num_files unlinks"
+ changelog_users $SINGLEMDS
+ echo "other changelog users; can't verify clear"
fi
}
run_test 60 "Add Changelog entries during MDS failover"
#}
#run_test 62 "Verify connection flags race - bug LU-1716"
+test_65() {
+ mount_client $DIR2
+
+ #grant lock1, export2
+ $LFS setstripe -i -0 $DIR2/$tfile || error "setstripe failed"
+ $MULTIOP $DIR2/$tfile Ow || error "multiop failed"
+
+#define OBD_FAIL_LDLM_BL_EVICT 0x31e
+ do_facet ost $LCTL set_param fail_loc=0x31e
+ #get waiting lock2, export1
+ $MULTIOP $DIR/$tfile Ow &
+ PID1=$!
+ # let enqueue to get asleep
+ sleep 2
+
+ #get lock2 blocked
+ $MULTIOP $DIR2/$tfile Ow &
+ PID2=$!
+ sleep 2
+
+ #evict export1
+ ost_evict_client
+
+ sleep 2
+ do_facet ost $LCTL set_param fail_loc=0
+
+ wait $PID1
+ wait $PID2
+
+ umount_client $DIR2
+}
+run_test 65 "lock enqueue for destroyed export"
+
test_66()
{
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.51) ]] ||
- { skip "Need MDS version at least 2.7.51"; return 0; }
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.51) ]] ||
+ skip "Need MDS version at least 2.7.51"
local list=$(comma_list $(osts_nodes))
# drop 1 reply with UPDATE lock
mcreate $DIR/$tfile || error "mcreate failed: $?"
- drop_ldlm_reply_once "stat $DIR/$tfile" &
+ drop_mdt_ldlm_reply_once "stat $DIR/$tfile" &
sleep 2
# make the re-sent lock to sleep
do_nodes $list $LCTL set_param fail_loc=0x80000136
#initiate the re-connect & re-send
- local mdccli=$($LCTL dl | awk '/-mdc-/ {print $4;}')
- local conn_uuid=$($LCTL get_param -n mdc.${mdccli}.mds_conn_uuid)
+ local mdtname="MDT0000"
+ local mdccli=$($LCTL dl | grep "${mdtname}-mdc" | awk '{print $4;}')
+ local conn_uuid=$($LCTL get_param -n mdc.${mdccli}.conn_uuid)
$LCTL set_param "mdc.${mdccli}.import=connection=${conn_uuid}"
sleep 2
}
run_test 66 "lock enqueue re-send vs client eviction"
-test_65() {
- mount_client $DIR2
-
- #grant lock1, export2
- $SETSTRIPE -i -0 $DIR2/$tfile || return 1
- $MULTIOP $DIR2/$tfile Ow || return 2
-
-#define OBD_FAIL_LDLM_BL_EVICT 0x31e
- do_facet ost $LCTL set_param fail_loc=0x31e
- #get waiting lock2, export1
- $MULTIOP $DIR/$tfile Ow &
- PID1=$!
- # let enqueue to get asleep
- sleep 2
-
- #get lock2 blocked
- $MULTIOP $DIR2/$tfile Ow &
- PID2=$!
- sleep 2
-
- #evict export1
- ost_evict_client
+test_67()
+{
+#define OBD_FAIL_PTLRPC_CONNECT_RACE 0x531
+ $LCTL set_param fail_loc=0x80000531
+ local mdtname="MDT0000"
+ local mdccli=$($LCTL dl | grep "${mdtname}-mdc" | awk '{print $4;}')
+ local conn_uuid=$($LCTL get_param -n mdc.${mdccli}.mds_conn_uuid)
+ $LCTL set_param "mdc.${mdccli}.import=connection=${conn_uuid}" &
sleep 2
- do_facet ost $LCTL set_param fail_loc=0
- wait $PID1
- wait $PID2
+ mds_evict_client
+ sleep 1
- umount_client $DIR2
+ client_reconnect
+ wait
}
-run_test 65 "lock enqueue for destroyed export"
+run_test 67 "connect vs import invalidate race"
check_cli_ir_state()
{
local recovery_proc=obdfilter.${!name}.recovery_status
local st
+ while : ; do
+ st=$(do_facet $target "$LCTL get_param -n $recovery_proc |
+ awk '/status:/{ print \\\$2}'")
+ [ x$st = xRECOVERING ] || break
+ done
st=$(do_facet $target "lctl get_param -n $recovery_proc |
awk '/IR:/{ print \\\$2}'")
[ $st != ON -o $st != OFF -o $st != ENABLED -o $st != DISABLED ] ||
}
run_test 100 "IR: Make sure normal recovery still works w/o IR"
-test_101()
+test_101a()
{
- do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
- { skip "MGS without IR support"; return 0; }
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ skip "MGS without IR support"
- set_ir_status full
+ set_ir_status full
- local OST1_IMP=$(get_osc_import_name client ost1)
+ local ost1_imp=$(get_osc_import_name client ost1)
- # disable pinger recovery
- lctl set_param -n osc.$OST1_IMP.pinger_recov=0
+ # disable pinger recovery
+ lctl set_param -n osc.$ost1_imp.pinger_recov=0
+ stack_trap "$LCTL set_param -n osc.$ost1_imp.pinger_recov=1" EXIT
- fail ost1
+ fail ost1
- target_instance_match ost1 || error "instance mismatch"
- nidtbl_versions_match || error "version must match"
+ target_instance_match ost1 || error "instance mismatch"
+ nidtbl_versions_match || error "version must match"
+}
+run_test 101a "IR: Make sure IR works w/o normal recovery"
+
+test_101b()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ skip "MGS without IR support"
+
+ set_ir_status full
- lctl set_param -n osc.$OST1_IMP.pinger_recov=1
+ local ost1_imp=$(get_osc_import_name client ost1)
+
+#define OBD_FAIL_OST_PREPARE_DELAY 0x247
+ do_facet ost1 "$LCTL set_param fail_loc=0x247"
+ # disable pinger recovery
+ $LCTL set_param -n osc.$ost1_imp.pinger_recov=0
+ stack_trap "$LCTL set_param -n osc.$ost1_imp.pinger_recov=1" EXIT
+
+#OST may return EAGAIN if it is not configured yet
+ fail ost1
}
-run_test 101 "IR: Make sure IR works w/o normal recovery"
+run_test 101b "IR: Make sure IR works w/o normal recovery and proceed EAGAIN"
test_102()
{
do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
{ skip "MGS without IR support"; return 0; }
- combined_mgs_mds && skip "mgs and mds on the same target" && return 0
+ combined_mgs_mds && skip "needs separate mgs and mds" && return 0
# workaround solution to generate config log on the mds
remount_facet mds1
# Since the client just mounted, its last_rcvd entry is not on disk.
# Send an RPC so exp_need_sync forces last_rcvd to commit this export
# so the client can reconnect during OST recovery (LU-924, LU-1582)
- $SETSTRIPE -i 0 $DIR/$tfile
+ $LFS setstripe -i 0 $DIR/$tfile
dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=sync
# make sure MGS's state is Partial
}
test_106() { # LU-1789
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.50) ]] ||
- { skip "Need MDS version at least 2.3.50"; return 0; }
+ [[ "$MDS1_VERSION" -ge $(version_code 2.3.50) ]] ||
+ skip "Need MDS version at least 2.3.50"
#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
$LCTL set_param fail_loc=0x805
touch $DIR2/$tfile || error "failed to create empty file"
replay_barrier $SINGLEMDS
- $LCTL set_param debug=console
+ $LCTL set_param debug=ha
$LCTL clear
facet_failover $SINGLEMDS
- # lightweight connection must be evicted
+ # lightweight goes through LUSTRE_IMP_RECOVER during failover
touch -c $DIR2/$tfile || true
$LCTL dk $TMP/lustre-log-$TESTNAME.log
- evicted=`awk '/This client was evicted by .*MDT0000/ {
- print;
- }' $TMP/lustre-log-$TESTNAME.log`
- [ -z "$evicted" ] && error "lightweight client not evicted by mds"
+ recovered=$(awk '/MDT0000-mdc-[0-9a-f]*. lwp recover/ { print }' \
+ $TMP/lustre-log-$TESTNAME.log)
+ [ -z "$recovered" ] && error "lightweight client was not recovered"
# and all operations performed by lightweight client should be
# synchronous, so the file created before mds restart should be there
test_108() {
mkdir -p $DIR/$tdir
- $SETSTRIPE -c 1 -i 0 $DIR/$tdir
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=256 &
local dd_pid=$!
test_110a () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local remote_dir=$DIR/$tdir/remote_dir
- local MDTIDX=1
+ local mdtidx=1
local num
#prepare for 110 test, which need set striped dir on remote MDT.
for num in $(seq $MDSCOUNT); do
do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
+ lctl set_param -n mdt.$FSNAME*.enable_remote_dir=1 \
2>/dev/null
done
mkdir -p $DIR/$tdir
- drop_request "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
- error "lfs mkdir failed"
- local diridx=$($GETSTRIPE -M $remote_dir)
- [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+ drop_request "$LFS mkdir -i $mdtidx -c2 $remote_dir" ||
+ error "lfs mkdir failed"
+ local diridx=$($LFS getstripe -m $remote_dir)
+ [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
rm -rf $DIR/$tdir || error "rmdir failed"
}
test_110b () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local remote_dir=$DIR/$tdir/remote_dir
- local MDTIDX=1
+ local mdtidx=1
mkdir -p $DIR/$tdir
- drop_reint_reply "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
- error "lfs mkdir failed"
+ drop_reint_reply "$LFS mkdir -i $mdtidx -c2 $remote_dir" ||
+ error "lfs mkdir failed"
- diridx=$($GETSTRIPE -M $remote_dir)
- [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+ diridx=$($LFS getstripe -m $remote_dir)
+ [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
rm -rf $DIR/$tdir || error "rmdir failed"
}
test_110c () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local remote_dir=$DIR/$tdir/remote_dir
- local MDTIDX=1
+ local mdtidx=1
mkdir -p $DIR/$tdir
- drop_update_reply $MDTIDX "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
- error "lfs mkdir failed"
+ drop_update_reply $mdtidx "$LFS mkdir -i $mdtidx -c2 $remote_dir" ||
+ error "lfs mkdir failed"
- diridx=$($GETSTRIPE -M $remote_dir)
- [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+ diridx=$($LFS getstripe -m $remote_dir)
+ [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
rm -rf $DIR/$tdir || error "rmdir failed"
}
run_test 110f "remove remote directory: drop slave rep"
test_110g () {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
- local remote_dir=$DIR/$tdir/remote_dir
- local MDTIDX=1
+ [[ "$MDS1_VERSION" -ge $(version_code 2.11.0) ]] ||
+ skip "Need MDS version at least 2.11.0"
- mkdir -p $remote_dir
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
- createmany -o $remote_dir/f 100
+ mkdir -p $DIR/$tdir
+ touch $DIR/$tdir/$tfile
- #define OBD_FAIL_MIGRATE_NET_REP 0x1800
- do_facet mds$MDTIDX lctl set_param fail_loc=0x1800
- $LFS migrate -m $MDTIDX $remote_dir || error "migrate failed"
- do_facet mds$MDTIDX lctl set_param fail_loc=0x0
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds1 $LCTL set_param fail_loc=0x119
+ $LFS migrate -m 1 $DIR/$tdir &
+ migrate_pid=$!
+ sleep 5
+ do_facet mds1 $LCTL set_param fail_loc=0
+ wait $migrate_pid
- for file in $(find $remote_dir); do
- mdt_index=$($LFS getstripe -M $file)
- [ $mdt_index == $MDTIDX ] ||
- error "$file is not on MDT${MDTIDX}"
- done
+ local mdt_index
+ mdt_index=$($LFS getstripe -m $DIR/$tdir)
+ [ $mdt_index == 1 ] || error "$tdir is not on MDT1"
+ mdt_index=$($LFS getstripe -m $DIR/$tdir/$tfile)
+ [ $mdt_index == 1 ] || error "$tfile is not on MDT1"
rm -rf $DIR/$tdir || error "rmdir failed"
}
run_test 110g "drop reply during migration"
test_110h () {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
local src_dir=$DIR/$tdir/source_dir
local tgt_dir=$DIR/$tdir/target_dir
local MDTIDX=1
run_test 110h "drop update reply during cross-MDT file rename"
test_110i () {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
local src_dir=$DIR/$tdir/source_dir
local tgt_dir=$DIR/$tdir/target_dir
local MDTIDX=1
run_test 110i "drop update reply during cross-MDT dir rename"
test_110j () {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
local remote_dir=$DIR/$tdir/remote_dir
local local_dir=$DIR/$tdir/local_dir
local MDTIDX=1
}
run_test 110j "drop update reply during cross-MDT ln"
+test_110k() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTS"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.12.55) ]] ||
+ skip "Need MDS version at least 2.12.55"
+
+ stop mds2 || error "stop mds2 failed"
+ umount $MOUNT
+
+#define OBD_FAIL_FLD_QUERY_REQ 0x1103
+ do_facet mds2 lctl set_param fail_loc=0x1103
+ local OPTS="$MDS_MOUNT_OPTS -o abort_recovery"
+ start mds2 $(mdsdevname 2) $OPTS ||
+ error "start MDS with abort_recovery should succeed"
+ do_facet mds2 lctl set_param fail_loc=0
+
+ # cleanup
+ stop mds2 || error "cleanup: stop mds2 failed"
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS ||
+ error "cleanup: start mds2 failed"
+ zconf_mount $(hostname) $MOUNT || error "cleanup: mount failed"
+ client_up || error "post-failover df failed"
+}
+run_test 110k "FID_QUERY failed during recovery"
+
# LU-2844 mdt prepare fail should not cause umount oops
test_111 ()
{
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.62) ]] ||
- { skip "Need MDS version at least 2.3.62"; return 0; }
+ [[ "$MDS1_VERSION" -ge $(version_code 2.3.62) ]] ||
+ skip "Need MDS version at least 2.3.62"
- local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
do_facet $SINGLEMDS lctl set_param fail_loc=0x151
stop $SINGLEMDS || error "stop MDS failed"
- start $SINGLEMDS $mdsdev && error "start MDS should fail"
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS &&
+ error "start MDS should fail"
do_facet $SINGLEMDS lctl set_param fail_loc=0
- start $SINGLEMDS $mdsdev || error "start MDS failed"
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
+ error "start MDS failed"
}
run_test 111 "mdd setup fail should not cause umount oops"
local fail1=$1
local fail2=$2
local error=$3
+ local fail_val2=${4:-0}
df $DIR
touch $DIR/$tfile
sleep 1
df $MOUNT
- set_nodes_failloc "$(osts_nodes)" $fail2
+ set_nodes_failloc "$(osts_nodes)" $fail2 $fail_val2
wait $pid
rc=$?
}
test_115a() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a
run_test 115a "read: late REQ MDunlink and no bulk"
test_115b() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
#define OBD_FAIL_OST_ENOSPC 0x215
- test_115_write 0x8000051b 0x80000215 1
+
+ # pass $OSTCOUNT for the fail_loc to be caught
+ # appropriately by the IO thread
+ test_115_write 0x8000051b 0x80000215 1 $OSTCOUNT
}
run_test 115b "write: late REQ MDunlink and no bulk"
test_115c() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a
run_test 115c "read: late Reply MDunlink and no bulk"
test_115d() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
#define OBD_FAIL_OST_ENOSPC 0x215
run_test 115d "write: late Reply MDunlink and no bulk"
test_115e() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510
#define OBD_FAIL_OST_ALL_REPLY_NET 0x211
run_test 115e "read: late Bulk MDunlink and no reply"
test_115f() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
#define OBD_FAIL_OST_ALL_REPLY_NET 0x211
run_test 115f "read: late REQ MDunlink and no reply"
test_115g() {
- [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] &&
- skip "need at least 2.8.50 on OST" && return 0
+ [ "$OST1_VERSION" -lt $(version_code 2.8.50) ] &&
+ skip "need at least 2.8.50 on OST"
#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c
test_115_read 0x8000051c 0
# drop 1 reply with UPDATE lock,
# resend should not create 2nd lock on server
mcreate $DIR/$tfile || error "mcreate failed: $?"
- drop_ldlm_reply_once "stat $DIR/$tfile" || error "stat failed: $?"
+ drop_mdt_ldlm_reply_once "stat $DIR/$tfile" || error "stat failed: $?"
# 2 BL AST will be sent to client, both must find the same lock,
# race them to not get EINVAL for 2nd BL AST
}
test_130a() {
- remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.2) ]] ||
+ skip "Need server version newer than 2.7.1"
+
test_130_base
wait $T130_PID || [ $? -eq 0 ] && error "stat should fail"
run_test 130a "enqueue resend on not existing file"
test_130b() {
- remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.2) ]] ||
+ skip "Need server version newer than 2.7.1"
+
test_130_base
# let the reply to be dropped
sleep 10
}
run_test 130c "layout intent resend on a stale inode"
+test_132() {
+ local before=$(date +%s)
+ local evict
+
+ mount_client $MOUNT2 || error "mount filed"
+
+ rm -f $DIR/$tfile
+ # get a lock on client so that export would reach the stale list
+ $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed"
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=fsync ||
+ error "dd failed"
+
+ #define OBD_FAIL_OST_PAUSE_PUNCH 0x236
+ do_facet ost1 $LCTL set_param fail_val=120 fail_loc=0x80000236
+
+ $TRUNCATE $DIR/$tfile 100 &
+
+ sleep 1
+ dd if=/dev/zero of=$DIR2/$tfile bs=4096 count=1 conv=notrunc ||
+ error "dd failed"
+
+ wait
+ umount_client $MOUNT2
+
+ evict=$(do_facet client $LCTL get_param \
+ osc.$FSNAME-OST0000-osc-*/state |
+ awk -F"[ [,]" '/EVICTED ]$/ { if (t<$5) {t=$5;} } END { print t }')
+
+ [ -z "$evict" ] || [[ $evict -le $before ]] ||
+ (do_facet client $LCTL get_param \
+ osc.$FSNAME-OST0000-osc-*/state;
+ error "eviction happened: $evict before:$before")
+}
+run_test 132 "long punch"
+
+test_131() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ rm -f $DIR/$tfile
+ # get a lock on client so that export would reach the stale list
+ $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed"
+ dd if=/dev/zero of=$DIR/$tfile count=1 || error "dd failed"
+
+ # another IO under the same lock
+ #define OBD_FAIL_OSC_DELAY_IO 0x414
+ $LCTL set_param fail_loc=0x80000414
+ $LCTL set_param fail_val=4 fail_loc=0x80000414
+ dd if=/dev/zero of=$DIR/$tfile count=1 conv=notrunc oflag=dsync &
+ local pid=$!
+ sleep 1
+
+ #define OBD_FAIL_LDLM_BL_EVICT 0x31e
+ set_nodes_failloc "$(osts_nodes)" 0x8000031e
+ ost_evict_client
+ client_reconnect
+
+ wait $pid && error "dd succeeded"
+ return 0
+}
+run_test 131 "IO vs evict results to IO under staled lock"
+
+test_133() {
+ local list=$(comma_list $(mdts_nodes))
+
+ local t=$((TIMEOUT * 2))
+ touch $DIR/$tfile
+
+ flock $DIR/$tfile -c "echo bl lock;sleep $t;echo bl flock unlocked" &
+ sleep 1
+ multiop_bg_pause $DIR/$tfile O_jc || return 1
+ PID=$!
+
+ #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
+ do_nodes $list $LCTL set_param fail_loc=0x80000157
+ kill -USR1 $PID
+ echo "waiting for multiop $PID"
+ wait $PID || return 2
+
+ rm -f $DIR/$tfile
+
+ return 0
+}
+run_test 133 "don't fail on flock resend"
+
+test_134() {
+ [ -z "$CLIENTS" ] && skip "Need two or more clients" && return
+ [ $CLIENTCOUNT -lt 2 ] &&
+ { skip "Need 2+ clients, have $CLIENTCOUNT" && return; }
+
+ mkdir -p $MOUNT/$tdir/1 $MOUNT/$tdir/2 || error "mkdir failed"
+ touch $MOUNT/$tdir/1/$tfile $MOUNT/$tdir/2/$tfile ||
+ error "touch failed"
+ zconf_umount_clients $CLIENTS $MOUNT
+ zconf_mount_clients $CLIENTS $MOUNT
+
+#define OBD_FAIL_TGT_REPLY_DATA_RACE 0x722
+ # assume commit interval is 5
+ do_facet mds1 "$LCTL set_param fail_loc=0x722 fail_val=5"
+
+ local -a clients=(${CLIENTS//,/ })
+ local client1=${clients[0]}
+ local client2=${clients[1]}
+
+ do_node $client1 rm $MOUNT/$tdir/1/$tfile &
+ rmpid=$!
+ do_node $client2 mv $MOUNT/$tdir/2/$tfile $MOUNT/$tdir/2/${tfile}_2 &
+ mvpid=$!
+ fail mds1
+ wait $rmpid || error "rm failed"
+ wait $mvpid || error "mv failed"
+ return 0
+}
+run_test 134 "race between failover and search for reply data free slot"
+
+test_135() {
+ [ "$MDS1_VERSION" -lt $(version_code 2.12.51) ] &&
+ skip "Need MDS version at least 2.12.51"
+
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -E 1M -L mdt $DIR/$tdir
+ # to have parent dir write lock before open/resend
+ touch $DIR/$tdir/$tfile
+ #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
+ do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param fail_loc=0x80000157
+ openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tdir/$tfile ||
+ error "Failed to open DOM file"
+}
+run_test 135 "DOM: open/create resend to return size"
+
+test_136() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ "$MDS1_VERSION" -ge $(version_code 2.12.52) ]] ||
+ skip "Need MDS version at least 2.12.52"
+
+ local mdts=$(comma_list $(mdts_nodes))
+ local MDT0=$(facet_svc $SINGLEMDS)
+
+ local clog=$(do_facet mds1 $LCTL --device $MDT0 changelog_register -n)
+ [ -n "$clog" ] || error "changelog_register failed"
+ cl_mask=$(do_facet mds1 $LCTL get_param \
+ mdd.$MDT0.changelog_mask -n)
+ changelog_chmask "ALL"
+
+ # generate some changelog records to accumulate
+ test_mkdir -i 0 -c 0 $DIR/$tdir || error "mkdir $tdir failed"
+ createmany -m $DIR/$tdir/$tfile 10000 ||
+ error "create $DIR/$tdir/$tfile failed"
+
+ local size1=$(do_facet $SINGLEMDS \
+ $LCTL get_param -n mdd.$MDT0.changelog_size)
+ echo "Changelog size $size1"
+
+ #define OBD_FAIL_LLOG_PURGE_DELAY 0x1318
+ do_nodes $mdts $LCTL set_param fail_loc=0x1318 fail_val=30
+
+ # launch changelog_deregister in background on MDS
+ do_facet mds1 "nohup $LCTL --device $MDT0 changelog_deregister $clog \
+ > foo.out 2> foo.err < /dev/null &"
+ # give time to reach fail_loc
+ sleep 15
+
+ # fail_loc will make MDS sleep in the middle of changelog_deregister
+ # take this opportunity to abruptly kill MDS
+ FAILURE_MODE_save=$FAILURE_MODE
+ FAILURE_MODE=HARD
+ fail mds1
+ FAILURE_MODE=$FAILURE_MODE_save
+
+ do_nodes $mdts $LCTL set_param fail_loc=0x0 fail_val=0
+
+ local size2=$(do_facet $SINGLEMDS \
+ $LCTL get_param -n mdd.$MDT0.changelog_size)
+ echo "Changelog size $size2"
+ local clog2=$(do_facet $SINGLEMDS "$LCTL get_param -n \
+ mdd.$MDT0.changelog_users | grep $clog")
+ echo "After crash, changelog user $clog2"
+
+ [ -n "$clog2" -o $size2 -lt $size1 ] ||
+ error "changelog record count unchanged"
+
+ do_facet mds1 $LCTL set_param mdd.$MDT0.changelog_mask=\'$cl_mask\' -n
+}
+run_test 136 "changelog_deregister leaving pending records"
+
+test_137() {
+ df $DIR
+ mkdir -p $DIR/d1
+ mkdir -p $DIR/d2
+ dd if=/dev/zero of=$DIR/d1/$tfile bs=4096 count=1
+ dd if=/dev/zero of=$DIR/d2/$tfile bs=4096 count=1
+ cancel_lru_locks osc
+
+ #define OBD_FAIL_PTLRPC_RESEND_RACE 0x525
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000525"
+
+ # RPC1: any reply is to be delayed to disable last_xid logic
+ ln $DIR/d1/$tfile $DIR/d1/f2 &
+ sleep 1
+
+ # RPC2: setattr1 reply is delayed & resent
+ # original reply comes to client; the resend get asleep
+ chmod 666 $DIR/d2/$tfile
+
+ # RPC3: setattr2 on the same file; run ahead of RPC2 resend
+ chmod 777 $DIR/d2/$tfile
+
+ # RPC2 resend wakes up
+ sleep 5
+ [ $(stat -c "%a" $DIR/d2/$tfile) == 777 ] || error "resend got applied"
+}
+run_test 137 "late resend must be skipped if already applied"
+
+test_138() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ "$MDS1_VERSION" -ge $(version_code 2.12.59) ]] ||
+ skip "Need server version newer than 2.12.59"
+
+ zconf_umount_clients $CLIENTS $MOUNT
+
+#define OBD_FAIL_TGT_RECOVERY_CONNECT 0x724
+ #delay a first step of recovey when MDS waiting clients
+ #and failing to get osp logs
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x724 fail_val=5
+
+ facet_failover $SINGLEMDS
+
+ #waiting failover and recovery timer
+ #the valuse is based on target_recovery_overseer() wait_event timeout
+ sleep 55
+ stop $SINGLEMDS || error "stop MDS failed"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
+ error "start MDS failed"
+ zconf_mount_clients $CLIENTS $MOUNT
+}
+run_test 138 "Umount MDT during recovery"
+
+test_139() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDS1_VERSION -lt $(version_code 2.13.50) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ mdt_dev=$(mdsdevname 1)
+
+ stop $SINGLEMDS || error "stop $SINGLEMDS failed"
+
+#define OBD_FAIL_OSP_INVALID_LOGID 0x2106
+ do_facet $SINGLEMDS $LCTL set_param fail_val=0x68 fail_loc=0x80002106
+ start $SINGLEMDS $mdt_dev $MDS_MOUNT_OPTS || error "Fail to start MDT"
+}
+run_test 139 "corrupted catid won't cause crash"
+
+test_140a() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ # so local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+ mount_mds_client
+
+ local cnt
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] && error "no clients with recovery disabled"
+
+ # enable recovery for local clients
+ # so no local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=1
+ mount_mds_client
+
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] || error "$cnt clients with recovery disabled"
+}
+run_test 140a "local mount is flagged properly"
+
+test_140b() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+
+ mount_mds_client
+ replay_barrier mds1
+ umount_mds_client
+ fail mds1
+ # Lustre: tfs-MDT0000: Recovery over after 0:03, of 2 clients 2 rec...
+ local recovery=$(do_facet mds1 dmesg |
+ awk '/Recovery over after/ { print $6 }' | tail -1 |
+ awk -F: '{ print $1 * 60 + $2 }')
+ (( recovery < TIMEOUT * 2 + 5 )) ||
+ error "recovery took too long $recovery > $((TIMEOUT * 2 + 5))"
+}
+run_test 140b "local mount is excluded from recovery"
+
+test_141() {
+ local oldc
+ local newc
+
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ combined_mgs_mds || skip "needs combined MGS/MDT"
+ ( local_mode || from_build_tree ) &&
+ skip "cannot run in local mode or from build tree"
+
+ # some get_param have a bug to handle dot in param name
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ oldc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+ fail $SINGLEMDS
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ newc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+
+ [ $oldc -eq $newc ] || error "mgc lost locks ($oldc != $newc)"
+ return 0
+}
+run_test 141 "do not lose locks on MGS restart"
+
+test_142() {
+ [ $MDS1_VERSION -lt $(version_code 2.11.56) ] &&
+ skip "Need MDS version at least 2.11.56"
+
+ #define OBD_FAIL_MDS_ORPHAN_DELETE 0x165
+ do_facet mds1 $LCTL set_param fail_loc=0x165
+ $MULTIOP $DIR/$tfile Ouc || error "multiop failed"
+
+ stop mds1
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 142 "orphan name stub can be cleaned up in startup"
+
+test_143() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.13.00) ] &&
+ skip "Need MDS version at least 2.13.00"
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ stop mds1
+ mount_fstype $SINGLEMDS || error "mount as fstype $SINGLEMDS failed"
+ do_facet $SINGLEMDS touch $mntpt/PENDING/$tfile
+ unmount_fstype $SINGLEMDS
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail"
+
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 143 "orphan cleanup thread shouldn't be blocked even delete failed"
+
+test_145() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs"
+ [ $(facet_active_host mds2) = $(facet_active_host mds3) ] &&
+ skip "needs mds2 and mds3 on separate nodes"
+
+ replay_barrier mds1
+
+ touch $DIR/$tfile
+
+#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
+ echo block mds_connect from mds2
+ do_facet mds2 "$LCTL set_param fail_loc=0x507"
+
+#define OBD_FAIL_OUT_UPDATE_DROP 0x1707
+ echo block recovery updates from mds3
+ do_facet mds3 "$LCTL set_param fail_loc=0x1707"
+
+ local hard_timeout=\
+$(do_facet mds1 $LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_hard)
+
+ fail mds1 &
+
+ local get_soft_timeout_cmd=\
+"$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_soft 2>/dev/null"
+
+ echo wait until mds1 recovery_time_soft is $hard_timeout
+ wait_update $(facet_host mds1) "$get_soft_timeout_cmd" \
+"$hard_timeout" $hard_timeout
+
+ echo unblock mds_connect from mds2
+ do_facet mds2 "$LCTL set_param fail_loc=0"
+
+ echo upblock recovery updates from mds3
+ do_facet mds3 "$LCTL set_param fail_loc=0"
+
+ wait
+ [ -f $DIR/$tfile ] || error "$DIR/$tfile does not exist"
+}
+run_test 145 "connect mdtlovs and process update logs after recovery expire"
+
+test_147() {
+ local obd_timeout=200
+ local old=$($LCTL get_param -n timeout)
+ local f=$DIR/$tfile
+ local connection_count
+
+ $LFS setstripe -i 0 -c 1 $f
+ stripe_index=$($LFS getstripe -i $f)
+ if [ $stripe_index -ne 0 ]; then
+ $LFS getstripe $f
+ error "$f: stripe_index $stripe_index != 0" && return
+ fi
+
+ $LCTL set_param timeout=$obd_timeout
+ stack_trap "$LCTL set_param timeout=$old && client_reconnect" EXIT
+
+ # OBD_FAIL_OST_CONNECT_NET2
+ # lost reply to connect request
+ do_facet ost1 lctl set_param fail_loc=0x00000225 timeout=$obd_timeout
+ stack_trap "do_facet ost1 $LCTL set_param fail_loc=0 timeout=$old" EXIT
+
+
+ ost_evict_client
+ # force reconnect
+ $LFS df $MOUNT > /dev/null 2>&1 &
+ sleep $((obd_timeout * 3 / 4))
+
+ $LCTL get_param osc.$FSNAME-OST0000-osc-*.state
+ connection_count=$($LCTL get_param osc.$FSNAME-OST0000-osc-*.state |
+ tac | sed "/FULL/,$ d" | grep CONNECTING | wc -l)
+
+ echo $connection_count
+ (($connection_count >= 6)) || error "Client reconnected too slow"
+}
+run_test 147 "Check client reconnect"
+
+test_148() {
+ local wce_param="obdfilter.$FSNAME-OST0000.writethrough_cache_enable"
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+ local amc=$(at_max_get client)
+ local amo=$(at_max_get ost1)
+ local timeout
+
+ at_max_set 0 client
+ at_max_set 0 ost1
+ timeout=$(request_timeout client)
+
+ [ "$(facet_fstype ost1)" = "ldiskfs" ] && {
+ # save old r/o cache settings
+ save_lustre_params ost1 $wce_param > $p
+
+ # disable r/o cache
+ do_facet ost1 "$LCTL set_param -n $wce_param=0"
+ }
+
+ $LFS setstripe -i 0 -c 1 $DIR/$tfile
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 oflag=direct
+ cp $DIR/$tfile $TMP/$tfile
+ #define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227
+ do_facet ost1 $LCTL set_param fail_loc=0x80000227
+ do_facet ost1 $LCTL set_param fail_val=$((timeout+2))
+ dd if=/dev/urandom of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync
+ sleep 2
+ cancel_lru_locks osc
+ cmp -b $DIR/$tfile $TMP/$tfile || error "wrong data"
+
+ rm -f $DIR/$tfile $TMP/$tfile
+
+ at_max_set $amc client
+ at_max_set $amo ost1
+
+ [ "$(facet_fstype ost1)" = "ldiskfs" ] && {
+ # restore initial r/o cache settings
+ restore_lustre_params < $p
+ }
+
+ return 0
+}
+run_test 148 "data corruption through resend"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status