+run_test 20b "ldlm_handle_enqueue error (should return error)"
+
+test_21a() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ close_pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000129"
+ multiop $DIR/$tdir-2/f Oc &
+ open_pid=$!
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc
+ wait $close_pid || return 1
+ wait $open_pid || return 2
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
+
+ rm -rf $DIR/$tdir-*
+}
+run_test 21a "drop close request while close and open are both in flight"
+
+test_21b() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ close_pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ mcreate $DIR/$tdir-2/f &
+ open_pid=$!
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc
+ wait $close_pid || return 1
+ wait $open_pid || return 3
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 4
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 5
+ rm -rf $DIR/$tdir-*
+}
+run_test 21b "drop open request while close and open are both in flight"
+
+test_21c() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ close_pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ mcreate $DIR/$tdir-2/f &
+ open_pid=$!
+ sleep 3
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc
+ wait $close_pid || return 1
+ wait $open_pid || return 2
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
+ rm -rf $DIR/$tdir-*
+}
+run_test 21c "drop both request while close and open are both in flight"
+
+test_21d() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000129"
+ multiop $DIR/$tdir-2/f Oc &
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000122"
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ wait $pid || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
+
+ rm -rf $DIR/$tdir-*
+}
+run_test 21d "drop close reply while close and open are both in flight"
+
+test_21e() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
+ touch $DIR/$tdir-2/f &
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ wait $pid || return 1
+
+ sleep $TIMEOUT
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
+ rm -rf $DIR/$tdir-*
+}
+run_test 21e "drop open reply while close and open are both in flight"
+
+test_21f() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
+ touch $DIR/$tdir-2/f &
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000122"
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ wait $pid || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
+ rm -rf $DIR/$tdir-*
+}
+run_test 21f "drop both reply while close and open are both in flight"
+
+test_21g() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
+ touch $DIR/$tdir-2/f &
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $pid
+ cancel_lru_locks mdc
+ wait $pid || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
+ rm -rf $DIR/$tdir-*
+}
+run_test 21g "drop open reply and close request while close and open are both in flight"
+
+test_21h() {
+ mkdir -p $DIR/$tdir-1
+ mkdir -p $DIR/$tdir-2
+ multiop_bg_pause $DIR/$tdir-1/f O_c || return 1
+ pid=$!
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ touch $DIR/$tdir-2/f &
+ touch_pid=$!
+ sleep 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000122"
+ cancel_lru_locks mdc
+ kill -USR1 $pid
+ wait $pid || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ wait $touch_pid || return 2
+
+ $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
+ $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
+ rm -rf $DIR/$tdir-*
+}
+run_test 21h "drop open request and close reply while close and open are both in flight"
+
+# bug 3462 - multiple MDC requests
+test_22() {
+ f1=$DIR/${tfile}-1
+ f2=$DIR/${tfile}-2
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ multiop $f2 Oc &
+ close_pid=$!
+
+ sleep 1
+ multiop $f1 msu || return 1
+
+ cancel_lru_locks mdc
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ wait $close_pid || return 2
+ rm -rf $f2 || return 4
+}
+run_test 22 "drop close request and do mknod"
+
+test_23() { #b=4561
+ multiop_bg_pause $DIR/$tfile O_c || return 1
+ pid=$!
+ # give a chance for open
+ sleep 5
+
+ # try the close
+ drop_request "kill -USR1 $pid"
+
+ fail $SINGLEMDS
+ wait $pid || return 1
+ return 0
+}
+run_test 23 "client hang when close a file after mds crash"
+
+test_24() { # bug 11710 details correct fsync() behavior
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+ lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1
+ cancel_lru_locks osc
+ multiop_bg_pause $DIR/$tdir/$tfile Owy_wyc || return 1
+ MULTI_PID=$!
+ ost_evict_client
+ kill -USR1 $MULTI_PID
+ wait $MULTI_PID
+ rc=$?
+ lctl set_param fail_loc=0x0
+ client_reconnect
+ [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
+}
+run_test 24 "fsync error (should return error)"
+
+wait_client_evicted () {
+ local facet=$1
+ local exports=$2
+ local varsvc=${facet}_svc
+
+ wait_update $(facet_host $facet) "lctl get_param -n *.${!varsvc}.num_exports | cut -d' ' -f2" $((exports - 1)) $3
+}
+
+test_26a() { # was test_26 bug 5921 - evict dead exports by pinger
+# this test can only run from a client on a separate node.
+ remote_ost || { skip "local OST" && return 0; }
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+ remote_mds || { skip "local MDS" && return 0; }
+
+ if [ $(facet_host mgs) = $(facet_host ost1) ]; then
+ skip "msg and ost1 are at the same node"
+ return 0
+ fi
+
+ check_timeout || return 1
+
+ local OST_NEXP=$(do_facet ost1 lctl get_param -n obdfilter.${ost1_svc}.num_exports | cut -d' ' -f2)
+
+ echo starting with $OST_NEXP OST exports
+# OBD_FAIL_PTLRPC_DROP_RPC 0x505
+ do_facet client lctl set_param fail_loc=0x505
+ # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
+ # But if there's a race to start the evictor from various obds,
+ # the loser might have to wait for the next ping.
+
+ local rc=0
+ wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
+ rc=$?
+ do_facet client lctl set_param fail_loc=0x0
+ [ $rc -eq 0 ] || error "client not evicted from OST"
+}
+run_test 26a "evict dead exports"
+
+test_26b() { # bug 10140 - evict dead exports by pinger
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ if [ $(facet_host mgs) = $(facet_host ost1) ]; then
+ skip "msg and ost1 are at the same node"
+ return 0
+ fi
+
+ check_timeout || return 1
+ clients_up
+ zconf_mount `hostname` $MOUNT2 ||
+ { error "Failed to mount $MOUNT2"; return 2; }
+ sleep 1 # wait connections being established
+
+ local MDS_NEXP=$(do_facet $SINGLEMDS lctl get_param -n mdt.${mds1_svc}.num_exports | cut -d' ' -f2)
+ local OST_NEXP=$(do_facet ost1 lctl get_param -n obdfilter.${ost1_svc}.num_exports | cut -d' ' -f2)
+
+ echo starting with $OST_NEXP OST and $MDS_NEXP MDS exports
+
+ zconf_umount `hostname` $MOUNT2 -f
+
+ # PING_INTERVAL max(obd_timeout / 4, 1U)
+ # PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
+
+ # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict.
+ # But if there's a race to start the evictor from various obds,
+ # the loser might have to wait for the next ping.
+ # = 9 * PING_INTERVAL + PING_INTERVAL
+ # = 10 PING_INTERVAL = 10 obd_timeout / 4 = 2.5 obd_timeout
+ # let's wait $((TIMEOUT * 3)) # bug 19887
+ local rc=0
+ wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || \
+ error "Client was not evicted by ost" rc=1
+ wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || \
+ error "Client was not evicted by mds"
+}
+run_test 26b "evict dead exports"
+
+test_27() {
+ mkdir -p $DIR/$tdir
+ writemany -q -a $DIR/$tdir/$tfile 0 5 &
+ CLIENT_PID=$!
+ sleep 1
+ local save_FAILURE_MODE=$FAILURE_MODE
+ FAILURE_MODE="SOFT"
+ facet_failover $SINGLEMDS
+#define OBD_FAIL_OSC_SHUTDOWN 0x407
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80000407
+ # need to wait for reconnect
+ echo waiting for fail_loc
+ wait_update_facet $SINGLEMDS "lctl get_param -n fail_loc" "-2147482617"
+ facet_failover $SINGLEMDS
+ #no crashes allowed!
+ kill -USR1 $CLIENT_PID
+ wait $CLIENT_PID
+ true
+ FAILURE_MODE=$save_FAILURE_MODE
+}
+run_test 27 "fail LOV while using OSC's"
+
+test_28() { # bug 6086 - error adding new clients
+ do_facet client mcreate $DIR/$tfile || return 1
+ drop_bl_callback "chmod 0777 $DIR/$tfile" ||echo "evicted as expected"
+ #define OBD_FAIL_MDS_CLIENT_ADD 0x12f
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012f"
+ # fail once (evicted), reconnect fail (fail_loc), ok
+ client_up || (sleep 10; client_up) || (sleep 10; client_up) || error "reconnect failed"
+ rm -f $DIR/$tfile
+ fail $SINGLEMDS # verify MDS last_rcvd can be loaded
+}
+run_test 28 "handle error adding new clients (bug 6086)"
+
+test_29a() { # bug 22273 - error adding new clients
+ #define OBD_FAIL_TGT_CLIENT_ADD 0x711
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000711"
+ # fail abort so client will be new again
+ fail_abort $SINGLEMDS
+ client_up || error "reconnect failed"
+ return 0
+}
+run_test 29a "error adding new clients doesn't cause LBUG (bug 22273)"
+
+test_29b() { # bug 22273 - error adding new clients
+ #define OBD_FAIL_TGT_CLIENT_ADD 0x711
+ do_facet ost1 "lctl set_param fail_loc=0x80000711"
+ # fail abort so client will be new again
+ fail_abort ost1
+ client_up || error "reconnect failed"
+ return 0
+}
+run_test 29b "error adding new clients doesn't cause LBUG (bug 22273)"
+
+test_50() {
+ mkdir -p $DIR/$tdir
+ # put a load of file creates/writes/deletes
+ writemany -q $DIR/$tdir/$tfile 0 5 &
+ CLIENT_PID=$!
+ echo writemany pid $CLIENT_PID
+ sleep 10
+ FAILURE_MODE="SOFT"
+ fail $SINGLEMDS
+ # wait for client to reconnect to MDS
+ sleep 60
+ fail $SINGLEMDS
+ sleep 60
+ fail $SINGLEMDS
+ # client process should see no problems even though MDS went down
+ sleep $TIMEOUT
+ kill -USR1 $CLIENT_PID
+ wait $CLIENT_PID
+ rc=$?
+ echo writemany returned $rc
+ #these may fail because of eviction due to slow AST response.
+ [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+}
+run_test 50 "failover MDS under load"
+
+test_51() {
+ #define OBD_FAIL_MDS_SYNC_CAPA_SL 0x1310
+ do_facet ost1 lctl set_param fail_loc=0x00001310
+
+ mkdir -p $DIR/$tdir
+ # put a load of file creates/writes/deletes
+ writemany -q $DIR/$tdir/$tfile 0 5 &
+ CLIENT_PID=$!
+ sleep 1
+ FAILURE_MODE="SOFT"
+ facet_failover $SINGLEMDS
+ # failover at various points during recovery
+ SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
+ echo will failover at $SEQ
+ for i in $SEQ
+ do
+ echo failover in $i sec
+ sleep $i
+ facet_failover $SINGLEMDS
+ done
+ # client process should see no problems even though MDS went down
+ # and recovery was interrupted
+ sleep $TIMEOUT
+ kill -USR1 $CLIENT_PID
+ wait $CLIENT_PID
+ rc=$?
+ echo writemany returned $rc
+ [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+}
+run_test 51 "failover MDS during recovery"
+
+test_52_guts() {
+ do_facet client "mkdir -p $DIR/$tdir"
+ do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
+ CLIENT_PID=$!
+ echo writemany pid $CLIENT_PID
+ sleep 10
+ FAILURE_MODE="SOFT"
+ fail ost1
+ rc=0
+ wait $CLIENT_PID || rc=$?
+ # active client process should see an EIO for down OST
+ [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
+ # but timing or failover setup may allow success
+ [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
+ echo "writemany returned $rc"
+ return $rc
+}
+
+test_52() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+ test_52_guts
+ rc=$?
+ [ $rc -ne 0 ] && { return $rc; }
+ # wait for client to reconnect to OST
+ sleep 30
+ test_52_guts
+ rc=$?
+ [ $rc -ne 0 ] && { return $rc; }
+ sleep 30
+ test_52_guts
+ rc=$?
+ client_reconnect
+ #return $rc
+}
+run_test 52 "failover OST under load"
+
+# test of open reconstruct
+test_53() {
+ touch $DIR/$tfile
+ drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
+ return 2
+}
+run_test 53 "touch: drop rep"
+
+test_54() {
+ zconf_mount `hostname` $MOUNT2
+ touch $DIR/$tfile
+ touch $DIR2/$tfile.1
+ sleep 10
+ cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
+ fail $SINGLEMDS
+ umount $MOUNT2
+ ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
+ [ x"$ERROR" == x ] || error "back in time occured"
+}
+run_test 54 "back in time"
+
+# bug 11330 - liblustre application death during I/O locks up OST
+test_55() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+
+ # first dd should be finished quickly
+ lfs setstripe $DIR/$tdir/$tfile-1 -c 1 -i 0
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
+ DDPID=$!
+ count=0
+ echo "step1: testing ......"
+ while [ true ]; do
+ if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
+ count=$[count+1]
+ if [ $count -gt 64 ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ lfs setstripe $DIR/$tdir/$tfile-2 -c 1 -i 0
+ #define OBD_FAIL_OST_DROP_REQ 0x21d
+ do_facet ost1 lctl set_param fail_loc=0x0000021d
+ # second dd will be never finished
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
+ DDPID=$!
+ count=0
+ echo "step2: testing ......"
+ while [ $count -le 64 ]; do
+ dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
+ if [ -z $dd_name ]; then
+ ls -l $DIR/$tdir
+ echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
+ error "dd shouldn't be finished!"
+ fi
+ count=$[count+1]
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ #Recover fail_loc and dd will finish soon
+ do_facet ost1 lctl set_param fail_loc=0
+ count=0
+ echo "step3: testing ......"
+ while [ true ]; do
+ if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
+ count=$[count+1]
+ if [ $count -gt 500 ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ rm -rf $DIR/$tdir
+}
+run_test 55 "ost_brw_read/write drops timed-out read/write request"
+
+test_56() { # b=11277
+#define OBD_FAIL_MDS_RESEND 0x136
+ touch $DIR/$tfile
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000136"
+ stat $DIR/$tfile
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ rm -f $DIR/$tfile
+}
+run_test 56 "do not allow reconnect to busy exports"