6 ALWAYS_EXCEPT="24 27 52 $RECOVERY_SMALL_EXCEPT"
9 [ "$SLOW" = "no" ] && EXCEPT="$EXCEPT 0 1 2 3 6 7 15 18 24b 25 30 31 32 33 34a "
11 PTLDEBUG=${PTLDEBUG:--1}
12 LUSTRE=${LUSTRE:-`dirname $0`/..}
13 . $LUSTRE/tests/test-framework.sh
15 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
19 # Allow us to override the setup if we already have a mounted system by
20 # setting SETUP=" " and CLEANUP=" "
22 CLEANUP=${CLEANUP:-""}
24 cleanup_and_setup_lustre
25 rm -rf $DIR/${TESTSUITE}/[df][0-9]* # bug 13798 new t-f tdir staff
26 rm -rf $DIR/[df][0-9]*
29 drop_request "mcreate $MOUNT/1" || return 1
30 drop_reint_reply "mcreate $MOUNT/2" || return 2
32 run_test 1 "mcreate: drop req, drop rep"
35 drop_request "tchmod 111 $MOUNT/2" || return 1
36 drop_reint_reply "tchmod 666 $MOUNT/2" || return 2
38 run_test 2 "chmod: drop req, drop rep"
41 drop_request "statone $MOUNT/2" || return 1
42 drop_reply "statone $MOUNT/2" || return 2
44 run_test 3 "stat: drop req, drop rep"
47 do_facet client "cp /etc/inittab $MOUNT/inittab" || return 1
48 drop_request "cat $MOUNT/inittab > /dev/null" || return 2
49 drop_reply "cat $MOUNT/inittab > /dev/null" || return 3
51 run_test 4 "open: drop req, drop rep"
54 drop_request "mv $MOUNT/inittab $MOUNT/renamed" || return 1
55 drop_reint_reply "mv $MOUNT/renamed $MOUNT/renamed-again" || return 2
56 do_facet client "checkstat -v $MOUNT/renamed-again" || return 3
58 run_test 5 "rename: drop req, drop rep"
60 [ ! -e $MOUNT/renamed-again ] && cp /etc/inittab $MOUNT/renamed-again
62 drop_request "mlink $MOUNT/renamed-again $MOUNT/link1" || return 1
63 drop_reint_reply "mlink $MOUNT/renamed-again $MOUNT/link2" || return 2
65 run_test 6 "link: drop req, drop rep"
67 [ ! -e $MOUNT/link1 ] && mlink $MOUNT/renamed-again $MOUNT/link1
68 [ ! -e $MOUNT/link2 ] && mlink $MOUNT/renamed-again $MOUNT/link2
70 drop_request "munlink $MOUNT/link1" || return 1
71 drop_reint_reply "munlink $MOUNT/link2" || return 2
73 run_test 7 "unlink: drop req, drop rep"
77 drop_reint_reply "touch $MOUNT/$tfile" || return 1
79 run_test 8 "touch: drop rep (bug 1423)"
83 pause_bulk "cp /etc/profile $MOUNT/$tfile" || return 1
84 do_facet client "cp /etc/termcap $MOUNT/${tfile}.2" || return 2
85 do_facet client "sync"
86 do_facet client "rm $MOUNT/$tfile $MOUNT/${tfile}.2" || return 3
88 run_test 9 "pause bulk on OST (bug 1420)"
92 do_facet client mcreate $MOUNT/$tfile || return 1
93 drop_bl_callback "chmod 0777 $MOUNT/$tfile" || echo "evicted as expected"
94 # wait for the mds to evict the client
95 #echo "sleep $(($TIMEOUT*2))"
96 #sleep $(($TIMEOUT*2))
97 do_facet client touch $MOUNT/$tfile || echo "touch failed, evicted"
98 do_facet client checkstat -v -p 0777 $MOUNT/$tfile || return 3
99 do_facet client "munlink $MOUNT/$tfile"
101 run_test 10 "finish request on server after client eviction (bug 1521)"
104 # wake up a thread waiting for completion after eviction
106 do_facet client multiop $MOUNT/$tfile Ow || return 1
107 do_facet client multiop $MOUNT/$tfile or || return 2
111 do_facet client multiop $MOUNT/$tfile or || return 3
112 drop_bl_callback multiop $MOUNT/$tfile Ow || echo "evicted as expected"
114 do_facet client munlink $MOUNT/$tfile || return 4
116 run_test 11 "wake up a thread waiting for completion after eviction (b=2460)"
120 $LCTL mark multiop $MOUNT/$tfile OS_c
121 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x115"
122 clear_failloc $SINGLEMDS $((TIMEOUT * 2)) &
123 multiop $MOUNT/$tfile OS_c &
125 #define OBD_FAIL_MDS_CLOSE_NET 0x115
128 echo "waiting for multiop $PID"
129 wait $PID || return 2
130 do_facet client munlink $MOUNT/$tfile || return 3
132 run_test 12 "recover from timed out resend in ptlrpcd (b=2494)"
134 # Bug 113, check that readdir lost recv timeout works.
136 mkdir $MOUNT/readdir || return 1
137 touch $MOUNT/readdir/newentry || return
138 # OBD_FAIL_MDS_READPAGE_NET|OBD_FAIL_ONCE
139 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000104"
140 ls $MOUNT/readdir || return 3
141 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
142 rm -rf $MOUNT/readdir || return 4
144 run_test 13 "mdc_readpage restart test (bug 1138)"
146 # Bug 113, check that readdir lost send timeout works.
149 touch $MOUNT/readdir/newentry
150 # OBD_FAIL_MDS_SENDPAGE|OBD_FAIL_ONCE
151 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000106"
152 ls $MOUNT/readdir || return 1
153 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
155 run_test 14 "mdc_readpage resend test (bug 1138)"
158 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000128"
159 touch $DIR/$tfile && return 1
162 run_test 15 "failed open (-ENOMEM)"
164 READ_AHEAD=`cat $LPROC/llite/*/max_read_ahead_mb | head -n 1`
166 for f in $LPROC/llite/*/max_read_ahead_mb; do
172 for f in $LPROC/llite/*/max_read_ahead_mb; do
173 echo $READ_AHEAD > $f
178 do_facet client cp /etc/termcap $MOUNT
182 #define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504 | OBD_FAIL_ONCE
183 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000504"
185 # OST bulk will time out here, client resends
186 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 1
187 do_facet ost1 sysctl -w lustre.fail_loc=0
188 # give recovery a chance to finish (shouldn't take long)
190 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 2
193 run_test 16 "timeout bulk put, don't evict client (2732)"
196 # OBD_FAIL_PTLRPC_BULK_GET_NET 0x0503 | OBD_FAIL_ONCE
197 # OST bulk will time out here, client retries
198 do_facet ost1 sysctl -w lustre.fail_loc=0x80000503
199 # need to ensure we send an RPC
200 do_facet client cp /etc/termcap $DIR/$tfile
204 do_facet ost1 sysctl -w lustre.fail_loc=0
205 do_facet client "df $DIR"
206 # expect cmp to succeed, client resent bulk
207 do_facet client "cmp /etc/termcap $DIR/$tfile" || return 3
208 do_facet client "rm $DIR/$tfile" || return 4
211 run_test 17 "timeout bulk get, don't evict client (2732)"
214 [ -z ${ost2_svc} ] && skip "needs 2 osts" && return 0
216 do_facet client mkdir -p $MOUNT/$tdir
217 f=$MOUNT/$tdir/$tfile
220 pgcache_empty || return 1
223 lfs setstripe $f $((128 * 1024)) 1 1
225 do_facet client cp /etc/termcap $f
227 local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | egrep -v 'MDT' | awk '{print $1}'`
228 $LCTL --device $osc2dev deactivate || return 3
229 # my understanding is that there should be nothing in the page
230 # cache after the client reconnects?
232 pgcache_empty || rc=2
233 $LCTL --device $osc2dev activate
237 run_test 18a "manual ost invalidate clears page cache immediately"
240 do_facet client mkdir -p $MOUNT/$tdir
241 f=$MOUNT/$tdir/$tfile
242 f2=$MOUNT/$tdir/${tfile}-2
245 pgcache_empty || return 1
247 # shouldn't have to set stripe size of count==1
248 lfs setstripe $f $((128 * 1024)) 0 1
249 lfs setstripe $f2 $((128 * 1024)) 0 1
251 do_facet client cp /etc/termcap $f
254 # allow recovery to complete
255 sleep $((TIMEOUT + 2))
256 # my understanding is that there should be nothing in the page
257 # cache after the client reconnects?
259 pgcache_empty || rc=2
263 run_test 18b "eviction and reconnect clears page cache (2766)"
267 do_facet client mcreate $f || return 1
268 drop_ldlm_cancel "chmod 0777 $f" || echo "evicted as expected"
270 do_facet client checkstat -v -p 0777 $f || echo evicted
271 # let the client reconnect
273 do_facet client "munlink $f"
275 run_test 19a "test expired_lock_main on mds (2867)"
279 do_facet client multiop $f Ow || return 1
280 do_facet client multiop $f or || return 2
284 do_facet client multiop $f or || return 3
285 drop_ldlm_cancel multiop $f Ow || echo "client evicted, as expected"
287 do_facet client munlink $f || return 4
289 run_test 19b "test expired_lock_main on ost (2867)"
291 test_20a() { # bug 2983 - ldlm_handle_enqueue cleanup
293 multiop $DIR/$tdir/${tfile} O_wc &
297 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
298 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
299 kill -USR1 $MULTI_PID
302 [ $rc -eq 0 ] && error "multiop didn't fail enqueue: rc $rc" || true
304 run_test 20a "ldlm_handle_enqueue error (should return error)"
306 test_20b() { # bug 2986 - ldlm_handle_enqueue error during open
308 touch $DIR/$tdir/${tfile}
310 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
311 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
312 dd if=/etc/hosts of=$DIR/$tdir/$tfile && \
313 error "didn't fail open enqueue" || true
315 run_test 20b "ldlm_handle_enqueue error (should return error)"
318 mkdir -p $DIR/$tdir-1
319 mkdir -p $DIR/$tdir-2
320 multiop $DIR/$tdir-1/f O_c &
323 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
324 multiop $DIR/$tdir-2/f Oc &
327 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
329 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
330 kill -USR1 $close_pid
332 wait $close_pid || return 1
333 wait $open_pid || return 2
334 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
336 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
337 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
341 run_test 21a "drop close request while close and open are both in flight"
344 mkdir -p $DIR/$tdir-1
345 mkdir -p $DIR/$tdir-2
346 multiop $DIR/$tdir-1/f O_c &
349 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
350 mcreate $DIR/$tdir-2/f &
353 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
355 kill -USR1 $close_pid
357 wait $close_pid || return 1
358 wait $open_pid || return 3
360 $CHECKSTAT -t file $DIR/$tdir-1/f || return 4
361 $CHECKSTAT -t file $DIR/$tdir-2/f || return 5
364 run_test 21b "drop open request while close and open are both in flight"
367 mkdir -p $DIR/$tdir-1
368 mkdir -p $DIR/$tdir-2
369 multiop $DIR/$tdir-1/f O_c &
372 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
373 mcreate $DIR/$tdir-2/f &
376 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
378 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
379 kill -USR1 $close_pid
381 wait $close_pid || return 1
382 wait $open_pid || return 2
384 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
386 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
387 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
390 run_test 21c "drop both request while close and open are both in flight"
393 mkdir -p $DIR/$tdir-1
394 mkdir -p $DIR/$tdir-2
395 multiop $DIR/$tdir-1/f O_c &
398 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
399 multiop $DIR/$tdir-2/f Oc &
401 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
403 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
406 wait $pid || return 1
407 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
409 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
410 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
414 run_test 21d "drop close reply while close and open are both in flight"
417 mkdir -p $DIR/$tdir-1
418 mkdir -p $DIR/$tdir-2
419 multiop $DIR/$tdir-1/f O_c &
422 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
423 touch $DIR/$tdir-2/f &
425 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
429 wait $pid || return 1
432 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
433 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
436 run_test 21e "drop open reply while close and open are both in flight"
439 mkdir -p $DIR/$tdir-1
440 mkdir -p $DIR/$tdir-2
441 multiop $DIR/$tdir-1/f O_c &
444 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
445 touch $DIR/$tdir-2/f &
447 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
449 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
452 wait $pid || return 1
453 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
455 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
456 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
459 run_test 21f "drop both reply while close and open are both in flight"
462 mkdir -p $DIR/$tdir-1
463 mkdir -p $DIR/$tdir-2
464 multiop $DIR/$tdir-1/f O_c &
467 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
468 touch $DIR/$tdir-2/f &
470 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
472 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
475 wait $pid || return 1
476 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
478 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
479 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
482 run_test 21g "drop open reply and close request while close and open are both in flight"
485 mkdir -p $DIR/$tdir-1
486 mkdir -p $DIR/$tdir-2
487 multiop $DIR/$tdir-1/f O_c &
490 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
491 touch $DIR/$tdir-2/f &
494 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
496 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
499 wait $pid || return 1
500 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
502 wait $touch_pid || return 2
504 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
505 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
508 run_test 21h "drop open request and close reply while close and open are both in flight"
510 # bug 3462 - multiple MDC requests
515 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
520 multiop $f1 msu || return 1
523 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
525 wait $close_pid || return 2
526 rm -rf $f2 || return 4
528 run_test 22 "drop close request and do mknod"
531 multiop $DIR/$tfile O_c &
533 # give a chance for open
537 drop_request "kill -USR1 $pid"
540 wait $pid || return 1
543 run_test 23 "client hang when close a file after mds crash"
545 test_24() { # bug 2248 - eviction fails writeback but app doesn't see it
548 multiop $DIR/$tdir/$tfile Owy_wyc &
553 kill -USR1 $MULTI_PID
556 sysctl -w lustre.fail_loc=0x0
558 [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
560 run_test 24 "fsync error (should return error)"
562 test_26() { # bug 5921 - evict dead exports by pinger
563 # this test can only run from a client on a separate node.
564 remote_ost || skip "local OST" && return
565 remote_mds || skip "local MDS" && return
566 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
567 OST_EXP="`do_facet ost1 cat $OST_FILE`"
568 OST_NEXP1=`echo $OST_EXP | cut -d' ' -f2`
569 echo starting with $OST_NEXP1 OST exports
570 # OBD_FAIL_PTLRPC_DROP_RPC 0x505
571 do_facet client sysctl -w lustre.fail_loc=0x505
572 # evictor takes up to 2.25x to evict. But if there's a
573 # race to start the evictor from various obds, the loser
574 # might have to wait for the next ping.
575 echo Waiting for $(($TIMEOUT * 4)) secs
576 sleep $(($TIMEOUT * 4))
577 OST_EXP="`do_facet ost1 cat $OST_FILE`"
578 OST_NEXP2=`echo $OST_EXP | cut -d' ' -f2`
579 echo ending with $OST_NEXP2 OST exports
580 do_facet client sysctl -w lustre.fail_loc=0x0
581 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted"
584 run_test 26 "evict dead exports"
586 test_26b() { # bug 10140 - evict dead exports by pinger
588 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
589 sleep 1 # wait connections being established
590 MDS_FILE=$LPROC/mdt/${mds1_svc}/num_exports
591 MDS_NEXP1="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
592 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
593 OST_NEXP1="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
594 echo starting with $OST_NEXP1 OST and $MDS_NEXP1 MDS exports
595 zconf_umount `hostname` $MOUNT2 -f
596 # evictor takes up to 2.25x to evict. But if there's a
597 # race to start the evictor from various obds, the loser
598 # might have to wait for the next ping.
599 echo Waiting for $(($TIMEOUT * 4)) secs
600 sleep $(($TIMEOUT * 4))
601 OST_NEXP2="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
602 MDS_NEXP2="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
603 echo ending with $OST_NEXP2 OST and $MDS_NEXP2 MDS exports
604 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted from OST"
605 [ $MDS_NEXP1 -le $MDS_NEXP2 ] && error "client not evicted from MDS"
608 run_test 26b "evict dead exports"
611 remote_mds && { skip "remote MDS" && return 0; }
613 writemany -q -a $DIR/$tdir/$tfile 0 5 &
617 facet_failover $SINGLEMDS
618 #define OBD_FAIL_OSC_SHUTDOWN 0x407
619 sysctl -w lustre.fail_loc=0x80000407
620 # need to wait for reconnect
621 echo -n waiting for fail_loc
622 while [ `sysctl -n lustre.fail_loc` -eq -2147482617 ]; do
626 facet_failover $SINGLEMDS
628 kill -USR1 $CLIENT_PID
632 run_test 27 "fail LOV while using OSC's"
634 test_28() { # bug 6086 - error adding new clients
635 do_facet client mcreate $MOUNT/$tfile || return 1
636 drop_bl_callback "chmod 0777 $MOUNT/$tfile" ||echo "evicted as expected"
637 #define OBD_FAIL_MDS_CLIENT_ADD 0x12f
638 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012f"
639 # fail once (evicted), reconnect fail (fail_loc), ok
640 df || (sleep 10; df) || (sleep 10; df) || error "reconnect failed"
642 fail $SINGLEMDS # verify MDS last_rcvd can be loaded
644 run_test 28 "handle error adding new clients (bug 6086)"
648 # put a load of file creates/writes/deletes
649 writemany -q $DIR/$tdir/$tfile 0 5 &
651 echo writemany pid $CLIENT_PID
655 # wait for client to reconnect to MDS
660 # client process should see no problems even though MDS went down
662 kill -USR1 $CLIENT_PID
665 echo writemany returned $rc
666 #these may fail because of eviction due to slow AST response.
667 [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
669 run_test 50 "failover MDS under load"
673 # put a load of file creates/writes/deletes
674 writemany -q $DIR/$tdir/$tfile 0 5 &
678 facet_failover $SINGLEMDS
679 # failover at various points during recovery
680 SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
681 echo will failover at $SEQ
684 echo failover in $i sec
686 facet_failover $SINGLEMDS
688 # client process should see no problems even though MDS went down
689 # and recovery was interrupted
691 kill -USR1 $CLIENT_PID
694 echo writemany returned $rc
695 [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
697 run_test 51 "failover MDS during recovery"
700 do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
702 echo writemany pid $CLIENT_PID
707 wait $CLIENT_PID || rc=$?
708 # active client process should see an EIO for down OST
709 [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
710 # but timing or failover setup may allow success
711 [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
712 echo "writemany returned $rc"
720 [ $rc -ne 0 ] && { return $rc; }
721 # wait for client to reconnect to OST
725 [ $rc -ne 0 ] && { return $rc; }
732 run_test 52 "failover OST under load"
734 # test of open reconstruct
737 drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
740 run_test 53 "touch: drop rep"
743 zconf_mount `hostname` $MOUNT2
747 cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
750 ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
751 [ x"$ERROR" == x ] || error "back in time occured"
753 run_test 54 "back in time"
755 # bug 11330 - liblustre application death during I/O locks up OST
757 remote_ost && { skip "remote OST" && return 0; }
761 # first dd should be finished quickly
762 dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
765 echo "step1: testing ......"
767 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
769 if [ $count -gt 64 ]; then
770 error "dd should be finished!"
774 echo "(dd_pid=$DDPID, time=$count)successful"
776 #define OBD_FAIL_OST_DROP_REQ 0x21d
777 do_facet ost sysctl -w lustre.fail_loc=0x0000021d
778 # second dd will be never finished
779 dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
782 echo "step2: testing ......"
783 while [ $count -le 64 ]; do
784 dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
785 if [ -z $dd_name ]; then
787 echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
788 error "dd shouldn't be finished!"
793 echo "(dd_pid=$DDPID, time=$count)successful"
795 #Recover fail_loc and dd will finish soon
796 do_facet ost sysctl -w lustre.fail_loc=0
798 echo "step3: testing ......"
800 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
802 if [ $count -gt 500 ]; then
803 error "dd should be finished!"
807 echo "(dd_pid=$DDPID, time=$count)successful"
811 run_test 55 "ost_brw_read/write drops timed-out read/write request"
813 test_56() { # b=11277
814 #define OBD_FAIL_MDS_RESEND 0x136
816 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000136"
818 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
821 run_test 56 "do not allow reconnect to busy exports"
824 # no oscs means no client or mdt
825 while [ -e $LPROC/osc ]; do
826 for f in `find $LPROC -type f`; do
827 cat $f > /dev/null 2>&1
832 test_57() { # bug 10866
836 #define OBD_FAIL_LPROC_REMOVE 0xB00
837 sysctl -w lustre.fail_loc=0x80000B00
838 zconf_umount `hostname` $DIR
839 sysctl -w lustre.fail_loc=0x80000B00
840 fail_abort $SINGLEMDS
842 sysctl -w lustre.fail_loc=0
844 do_facet client "df $DIR"
846 run_test 57 "read procfs entries causes kernel crash"
848 test_58() { # bug 11546
849 #define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
852 sysctl -w lustre.fail_loc=0x80000801
853 cp $MOUNT/$tfile /dev/null &
856 sysctl -w lustre.fail_loc=0
857 drop_bl_callback rm -f $MOUNT/$tfile
859 do_facet client "df $DIR"
861 run_test 58 "Eviction in the middle of open RPC reply processing"
863 test_59() { # bug 10589
864 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
865 #define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
866 sysctl -w lustre.fail_loc=0x311
867 writes=`dd if=/dev/zero of=$DIR2/$tfile count=1 2>&1 | awk 'BEGIN { FS="+" } /out/ {print $1}'`
868 sysctl -w lustre.fail_loc=0
870 zconf_umount `hostname` $DIR2 -f
871 reads=`dd if=$DIR/$tfile of=/dev/null 2>&1 | awk 'BEGIN { FS="+" } /in/ {print $1}'`
872 [ $reads -eq $writes ] || error "read" $reads "blocks, must be" $writes
874 run_test 59 "Read cancel race on client eviction"
876 equals_msg `basename $0`: test complete, cleaning up
877 check_and_cleanup_lustre
878 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true