}
run_test 10d "test failed blocking ast"
+test_10e()
+{
+ [[ $(lustre_version_code ost1) -le $(version_code 2.8.58) ]] &&
+ skip "Need OST version at least 2.8.59" && return 0
+ [ $CLIENTCOUNT -lt 2 ] && skip "need two clients" && return 0
+ [ $(facet_host client) == $(facet_host ost1) ] &&
+ skip "need ost1 and client on different nodes" && return 0
+ local -a clients=(${CLIENTS//,/ })
+ local client1=${clients[0]}
+ local client2=${clients[1]}
+
+ $LFS setstripe -c 1 -i 0 $DIR/$tfile-1 $DIR/$tfile-2
+ $MULTIOP $DIR/$tfile-1 Ow1048576c
+
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
+ $LCTL set_param fail_loc=0x80000305
+
+#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e
+ do_facet ost1 "$LCTL set_param fail_loc=0x1000030e"
+ # hit OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT twice:
+ # 1. to return ENOTCONN from ldlm_handle_enqueue0
+ # 2. to pause reconnect handling between resend and setting
+ # import to LUSTRE_IMP_FULL state
+ do_facet ost1 "$LCTL set_param fail_val=3"
+
+ # client1 fails ro respond to bl ast
+ do_node $client2 "$MULTIOP $DIR/$tfile-1 Ow1048576c" &
+ MULTIPID=$!
+
+ # ost1 returns error on enqueue, which causes client1 to reconnect
+ do_node $client1 "$MULTIOP $DIR/$tfile-2 Ow1048576c" ||
+ error "multiop failed"
+ wait $MULTIPID
+
+ do_facet ost1 "$LCTL set_param fail_loc=0"
+ do_facet ost1 "$LCTL set_param fail_val=0"
+}
+run_test 10e "re-send BL AST vs reconnect race 2"
+
#bug 2460
# wake up a thread waiting for completion after eviction
test_11(){
}
run_test 16 "timeout bulk put, don't evict client (2732)"
-test_17() {
+test_17a() {
local at_max_saved=0
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
[ $at_max_saved -ne 0 ] && at_max_set $at_max_saved ost1
return 0
}
-run_test 17 "timeout bulk get, don't evict client (2732)"
+run_test 17a "timeout bulk get, don't evict client (2732)"
+
+test_17b() {
+ [ -z "$RCLIENTS" ] && skip "Needs multiple clients" && return 0
+
+ # get one of the clients from client list
+ local rcli=$(echo $RCLIENTS | cut -d ' ' -f 1)
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+ local ldlm_enqueue_min=$(do_facet ost1 find /sys -name ldlm_enqueue_min)
+ [ -z "$ldlm_enqueue_min" ] &&
+ skip "missing /sys/.../ldlm_enqueue_min" && return 0
+
+ $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/$tfile ||
+ error "setstripe failed"
+ $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/${tfile}2 ||
+ error "setstripe 2 failed"
+
+ save_lustre_params ost1 "at_history" > $p
+ save_lustre_params ost1 "bulk_timeout" >> $p
+ local dev="${FSNAME}-OST0000"
+ save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p
+ local ldlm_enqueue_min_save=$(do_facet ost1 cat $ldlm_enqueue_min)
+
+ local new_at_history=15
+
+ do_facet ost1 "$LCTL set_param at_history=$new_at_history"
+ do_facet ost1 "$LCTL set_param bulk_timeout=30"
+ do_facet ost1 "echo 30 > /sys/module/ptlrpc/parameters/ldlm_enqueue_min"
+
+ # brw_size is required to be 4m so that bulk transfer timeout
+ # could be simulated with OBD_FAIL_PTLRPC_CLIENT_BULK_CB3
+ local brw_size=$($LCTL get_param -n \
+ osc.$FSNAME-OST0000-osc-[^M]*.import |
+ awk '/max_brw_size/{print $2}')
+ if [ $brw_size -ne 4194304 ]
+ then
+ save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p
+ do_facet ost1 "$LCTL set_param obdfilter.$dev.brw_size=4"
+ remount_client $MOUNT
+ fi
+
+ # get service estimate expanded
+ #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
+ do_facet ost1 "$LCTL set_param fail_loc=0x80000224 fail_val=35"
+ echo "delay rpc servicing by 35 seconds"
+ dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=fdatasync
+
+ local estimate
+ estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts |
+ awk '/portal 6/ {print $5}')
+ echo "service estimates increased to $estimate"
+
+ # let current worst service estimate to get closer to obliteration
+ sleep $((new_at_history / 3))
+
+ # start i/o and simulate bulk transfer loss
+ #define OBD_FAIL_PTLRPC_CLIENT_BULK_CB3 0x520
+ do_facet ost1 "$LCTL set_param fail_loc=0xa0000520 fail_val=1"
+ dd if=/dev/zero of=$DIR/$tfile bs=2M count=1 conv=fdatasync,notrunc &
+ local writedd=$!
+
+ # start lock conflict handling
+ sleep $((new_at_history / 3))
+ do_node $rcli "dd if=$DIR/$tfile of=/dev/null bs=1M count=1" &
+ local readdd=$!
+
+ # obliterate the worst service estimate
+ sleep $((new_at_history / 3 + 1))
+ dd if=/dev/zero of=$DIR/${tfile}2 bs=1M count=1
+
+ estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts |
+ awk '/portal 6/ {print $5}')
+ echo "service estimate dropped to $estimate"
+
+ wait $writedd
+ [[ $? == 0 ]] || error "write failed"
+ wait $readdd
+ [[ $? == 0 ]] || error "read failed"
+
+ restore_lustre_params <$p
+ if [ $brw_size -ne 4194304 ]
+ then
+ remount_client $MOUNT || error "remount_client failed"
+ fi
+ do_facet ost1 "echo $ldlm_enqueue_min_save > $ldlm_enqueue_min"
+}
+run_test 17b "timeout bulk get, dont evict client (3582)"
test_18a() {
[ -z ${ost2_svc} ] && skip_env "needs 2 osts" && return 0
dmesg -c > /dev/null
mkdir -p $DIR/$tdir
- lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1
+ lfs setstripe $DIR/$tdir -S 0 -i 0 -c 1 ||
+ error "$LFS setstripe failed"
cancel_lru_locks osc
multiop_bg_pause $DIR/$tdir/$tfile-1 Ow8192_yc ||
error "mulitop Ow8192_yc failed"
check_timeout || return 1
- local OST_NEXP=$(do_facet ost1 lctl get_param -n obdfilter.${ost1_svc}.num_exports | cut -d' ' -f2)
-
- echo starting with $OST_NEXP OST exports
# OBD_FAIL_PTLRPC_DROP_RPC 0x505
do_facet client lctl set_param fail_loc=0x505
- # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
- # But if there's a race to start the evictor from various obds,
- # the loser might have to wait for the next ping.
-
+ local before=$(date +%s)
local rc=0
- wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
- rc=$?
+
+ # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
+ # But if there's a race to start the evictor from various obds,
+ # the loser might have to wait for the next ping.
+ sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
do_facet client lctl set_param fail_loc=0x0
- [ $rc -eq 0 ] || error "client not evicted from OST"
+ do_facet client df > /dev/null
+
+ local oscs=$(lctl dl | awk '/-osc-/ {print $4}')
+ check_clients_evicted $before ${oscs[@]}
+ check_clients_full 10 ${oscs[@]}
}
run_test 26a "evict dead exports"
do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
{ skip "MGS without IR support"; return 0; }
- combined_mgs_mds && skip "mgs and mds on the same target" && return 0
+ combined_mgs_mds && skip "needs separate mgs and mds" && return 0
# workaround solution to generate config log on the mds
remount_facet mds1
run_test 110f "remove remote directory: drop slave rep"
test_110g () {
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.57) ]] ||
+ { skip "Need MDS version at least 2.6.57"; return 0; }
+
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local remote_dir=$DIR/$tdir/remote_dir
local MDTIDX=1
test_130a() {
remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ local server_version=$(lustre_version_code $SINGLEMDS)
+ [[ $server_version -ge $(version_code 2.7.2) ]] ||
+ { skip "Need server version newer than 2.7.1"; return 0; }
+
test_130_base
wait $T130_PID || [ $? -eq 0 ] && error "stat should fail"
test_130b() {
remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ local server_version=$(lustre_version_code $SINGLEMDS)
+ [[ $server_version -ge $(version_code 2.7.2) ]] ||
+ { skip "Need server version newer than 2.7.1"; return 0; }
+
test_130_base
# let the reply to be dropped
sleep 10