6 ALWAYS_EXCEPT="24 27 52 $RECOVERY_SMALL_EXCEPT"
8 # also long tests: 19, 21a, 21e, 21f, 23, 27
10 [ "$SLOW" = "no" ] && EXCEPT_SLOW="17 26b 50 51 57"
12 PTLDEBUG=${PTLDEBUG:--1}
13 LUSTRE=${LUSTRE:-`dirname $0`/..}
14 . $LUSTRE/tests/test-framework.sh
16 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
20 # Allow us to override the setup if we already have a mounted system by
21 # setting SETUP=" " and CLEANUP=" "
23 CLEANUP=${CLEANUP:-""}
25 cleanup_and_setup_lustre
26 rm -rf $DIR/${TESTSUITE}/[df][0-9]* # bug 13798 new t-f tdir staff
27 rm -rf $DIR/[df][0-9]*
30 drop_request "mcreate $MOUNT/1" || return 1
31 drop_reint_reply "mcreate $MOUNT/2" || return 2
33 run_test 1 "mcreate: drop req, drop rep"
36 drop_request "tchmod 111 $MOUNT/2" || return 1
37 drop_reint_reply "tchmod 666 $MOUNT/2" || return 2
39 run_test 2 "chmod: drop req, drop rep"
42 drop_request "statone $MOUNT/2" || return 1
43 drop_reply "statone $MOUNT/2" || return 2
45 run_test 3 "stat: drop req, drop rep"
48 do_facet client "cp /etc/inittab $MOUNT/inittab" || return 1
49 drop_request "cat $MOUNT/inittab > /dev/null" || return 2
50 drop_reply "cat $MOUNT/inittab > /dev/null" || return 3
52 run_test 4 "open: drop req, drop rep"
55 drop_request "mv $MOUNT/inittab $MOUNT/renamed" || return 1
56 drop_reint_reply "mv $MOUNT/renamed $MOUNT/renamed-again" || return 2
57 do_facet client "checkstat -v $MOUNT/renamed-again" || return 3
59 run_test 5 "rename: drop req, drop rep"
61 [ ! -e $MOUNT/renamed-again ] && cp /etc/inittab $MOUNT/renamed-again
63 drop_request "mlink $MOUNT/renamed-again $MOUNT/link1" || return 1
64 drop_reint_reply "mlink $MOUNT/renamed-again $MOUNT/link2" || return 2
66 run_test 6 "link: drop req, drop rep"
68 [ ! -e $MOUNT/link1 ] && mlink $MOUNT/renamed-again $MOUNT/link1
69 [ ! -e $MOUNT/link2 ] && mlink $MOUNT/renamed-again $MOUNT/link2
71 drop_request "munlink $MOUNT/link1" || return 1
72 drop_reint_reply "munlink $MOUNT/link2" || return 2
74 run_test 7 "unlink: drop req, drop rep"
78 drop_reint_reply "touch $MOUNT/$tfile" || return 1
80 run_test 8 "touch: drop rep (bug 1423)"
84 pause_bulk "cp /etc/profile $MOUNT/$tfile" || return 1
85 do_facet client "cp /etc/termcap $MOUNT/${tfile}.2" || return 2
86 do_facet client "sync"
87 do_facet client "rm $MOUNT/$tfile $MOUNT/${tfile}.2" || return 3
89 run_test 9 "pause bulk on OST (bug 1420)"
93 do_facet client mcreate $MOUNT/$tfile || return 1
94 drop_bl_callback "chmod 0777 $MOUNT/$tfile" || echo "evicted as expected"
95 # wait for the mds to evict the client
96 #echo "sleep $(($TIMEOUT*2))"
97 #sleep $(($TIMEOUT*2))
98 do_facet client touch $MOUNT/$tfile || echo "touch failed, evicted"
99 do_facet client checkstat -v -p 0777 $MOUNT/$tfile || return 3
100 do_facet client "munlink $MOUNT/$tfile"
102 run_test 10 "finish request on server after client eviction (bug 1521)"
105 # wake up a thread waiting for completion after eviction
107 do_facet client multiop $MOUNT/$tfile Ow || return 1
108 do_facet client multiop $MOUNT/$tfile or || return 2
112 do_facet client multiop $MOUNT/$tfile or || return 3
113 drop_bl_callback multiop $MOUNT/$tfile Ow || echo "evicted as expected"
115 do_facet client munlink $MOUNT/$tfile || return 4
117 run_test 11 "wake up a thread waiting for completion after eviction (b=2460)"
121 $LCTL mark multiop $MOUNT/$tfile OS_c
122 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x115"
123 clear_failloc $SINGLEMDS $((TIMEOUT * 2)) &
124 multiop $MOUNT/$tfile OS_c &
126 #define OBD_FAIL_MDS_CLOSE_NET 0x115
129 echo "waiting for multiop $PID"
130 wait $PID || return 2
131 do_facet client munlink $MOUNT/$tfile || return 3
133 run_test 12 "recover from timed out resend in ptlrpcd (b=2494)"
135 # Bug 113, check that readdir lost recv timeout works.
137 mkdir $MOUNT/readdir || return 1
138 touch $MOUNT/readdir/newentry || return
139 # OBD_FAIL_MDS_READPAGE_NET|OBD_FAIL_ONCE
140 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000104"
141 ls $MOUNT/readdir || return 3
142 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
143 rm -rf $MOUNT/readdir || return 4
145 run_test 13 "mdc_readpage restart test (bug 1138)"
147 # Bug 113, check that readdir lost send timeout works.
150 touch $MOUNT/readdir/newentry
151 # OBD_FAIL_MDS_SENDPAGE|OBD_FAIL_ONCE
152 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000106"
153 ls $MOUNT/readdir || return 1
154 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
156 run_test 14 "mdc_readpage resend test (bug 1138)"
159 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000128"
160 touch $DIR/$tfile && return 1
163 run_test 15 "failed open (-ENOMEM)"
165 READ_AHEAD=`cat $LPROC/llite/*/max_read_ahead_mb | head -n 1`
167 for f in $LPROC/llite/*/max_read_ahead_mb; do
173 for f in $LPROC/llite/*/max_read_ahead_mb; do
174 echo $READ_AHEAD > $f
179 do_facet client cp /etc/termcap $MOUNT
183 #define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504 | OBD_FAIL_ONCE
184 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000504"
186 # OST bulk will time out here, client resends
187 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 1
188 do_facet ost1 sysctl -w lustre.fail_loc=0
189 # give recovery a chance to finish (shouldn't take long)
191 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 2
194 run_test 16 "timeout bulk put, don't evict client (2732)"
197 # OBD_FAIL_PTLRPC_BULK_GET_NET 0x0503 | OBD_FAIL_ONCE
198 # OST bulk will time out here, client retries
199 do_facet ost1 sysctl -w lustre.fail_loc=0x80000503
200 # need to ensure we send an RPC
201 do_facet client cp /etc/termcap $DIR/$tfile
205 do_facet ost1 sysctl -w lustre.fail_loc=0
206 do_facet client "df $DIR"
207 # expect cmp to succeed, client resent bulk
208 do_facet client "cmp /etc/termcap $DIR/$tfile" || return 3
209 do_facet client "rm $DIR/$tfile" || return 4
212 run_test 17 "timeout bulk get, don't evict client (2732)"
215 [ -z ${ost2_svc} ] && skip "needs 2 osts" && return 0
217 do_facet client mkdir -p $MOUNT/$tdir
218 f=$MOUNT/$tdir/$tfile
221 pgcache_empty || return 1
224 lfs setstripe $f $((128 * 1024)) 1 1
226 do_facet client cp /etc/termcap $f
228 local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | egrep -v 'MDT' | awk '{print $1}'`
229 $LCTL --device $osc2dev deactivate || return 3
230 # my understanding is that there should be nothing in the page
231 # cache after the client reconnects?
233 pgcache_empty || rc=2
234 $LCTL --device $osc2dev activate
238 run_test 18a "manual ost invalidate clears page cache immediately"
241 do_facet client mkdir -p $MOUNT/$tdir
242 f=$MOUNT/$tdir/$tfile
243 f2=$MOUNT/$tdir/${tfile}-2
246 pgcache_empty || return 1
248 # shouldn't have to set stripe size of count==1
249 lfs setstripe $f $((128 * 1024)) 0 1
250 lfs setstripe $f2 $((128 * 1024)) 0 1
252 do_facet client cp /etc/termcap $f
255 # allow recovery to complete
256 sleep $((TIMEOUT + 2))
257 # my understanding is that there should be nothing in the page
258 # cache after the client reconnects?
260 pgcache_empty || rc=2
264 run_test 18b "eviction and reconnect clears page cache (2766)"
268 do_facet client mcreate $f || return 1
269 drop_ldlm_cancel "chmod 0777 $f" || echo "evicted as expected"
271 do_facet client checkstat -v -p 0777 $f || echo evicted
272 # let the client reconnect
274 do_facet client "munlink $f"
276 run_test 19a "test expired_lock_main on mds (2867)"
280 do_facet client multiop $f Ow || return 1
281 do_facet client multiop $f or || return 2
285 do_facet client multiop $f or || return 3
286 drop_ldlm_cancel multiop $f Ow || echo "client evicted, as expected"
288 do_facet client munlink $f || return 4
290 run_test 19b "test expired_lock_main on ost (2867)"
292 test_20a() { # bug 2983 - ldlm_handle_enqueue cleanup
294 multiop $DIR/$tdir/${tfile} O_wc &
298 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
299 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
300 kill -USR1 $MULTI_PID
303 [ $rc -eq 0 ] && error "multiop didn't fail enqueue: rc $rc" || true
305 run_test 20a "ldlm_handle_enqueue error (should return error)"
307 test_20b() { # bug 2986 - ldlm_handle_enqueue error during open
309 touch $DIR/$tdir/${tfile}
311 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
312 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
313 dd if=/etc/hosts of=$DIR/$tdir/$tfile && \
314 error "didn't fail open enqueue" || true
316 run_test 20b "ldlm_handle_enqueue error (should return error)"
319 mkdir -p $DIR/$tdir-1
320 mkdir -p $DIR/$tdir-2
321 multiop $DIR/$tdir-1/f O_c &
324 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
325 multiop $DIR/$tdir-2/f Oc &
328 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
330 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
331 kill -USR1 $close_pid
333 wait $close_pid || return 1
334 wait $open_pid || return 2
335 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
337 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
338 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
342 run_test 21a "drop close request while close and open are both in flight"
345 mkdir -p $DIR/$tdir-1
346 mkdir -p $DIR/$tdir-2
347 multiop $DIR/$tdir-1/f O_c &
350 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
351 mcreate $DIR/$tdir-2/f &
354 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
356 kill -USR1 $close_pid
358 wait $close_pid || return 1
359 wait $open_pid || return 3
361 $CHECKSTAT -t file $DIR/$tdir-1/f || return 4
362 $CHECKSTAT -t file $DIR/$tdir-2/f || return 5
365 run_test 21b "drop open request while close and open are both in flight"
368 mkdir -p $DIR/$tdir-1
369 mkdir -p $DIR/$tdir-2
370 multiop $DIR/$tdir-1/f O_c &
373 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
374 mcreate $DIR/$tdir-2/f &
377 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
379 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
380 kill -USR1 $close_pid
382 wait $close_pid || return 1
383 wait $open_pid || return 2
385 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
387 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
388 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
391 run_test 21c "drop both request while close and open are both in flight"
394 mkdir -p $DIR/$tdir-1
395 mkdir -p $DIR/$tdir-2
396 multiop $DIR/$tdir-1/f O_c &
399 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
400 multiop $DIR/$tdir-2/f Oc &
402 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
404 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
407 wait $pid || return 1
408 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
410 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
411 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
415 run_test 21d "drop close reply while close and open are both in flight"
418 mkdir -p $DIR/$tdir-1
419 mkdir -p $DIR/$tdir-2
420 multiop $DIR/$tdir-1/f O_c &
423 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
424 touch $DIR/$tdir-2/f &
426 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
430 wait $pid || return 1
433 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
434 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
437 run_test 21e "drop open reply while close and open are both in flight"
440 mkdir -p $DIR/$tdir-1
441 mkdir -p $DIR/$tdir-2
442 multiop $DIR/$tdir-1/f O_c &
445 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
446 touch $DIR/$tdir-2/f &
448 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
450 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
453 wait $pid || return 1
454 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
456 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
457 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
460 run_test 21f "drop both reply while close and open are both in flight"
463 mkdir -p $DIR/$tdir-1
464 mkdir -p $DIR/$tdir-2
465 multiop $DIR/$tdir-1/f O_c &
468 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
469 touch $DIR/$tdir-2/f &
471 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
473 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
476 wait $pid || return 1
477 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
479 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
480 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
483 run_test 21g "drop open reply and close request while close and open are both in flight"
486 mkdir -p $DIR/$tdir-1
487 mkdir -p $DIR/$tdir-2
488 multiop $DIR/$tdir-1/f O_c &
491 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
492 touch $DIR/$tdir-2/f &
495 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
497 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
500 wait $pid || return 1
501 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
503 wait $touch_pid || return 2
505 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
506 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
509 run_test 21h "drop open request and close reply while close and open are both in flight"
511 # bug 3462 - multiple MDC requests
516 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
521 multiop $f1 msu || return 1
524 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
526 wait $close_pid || return 2
527 rm -rf $f2 || return 4
529 run_test 22 "drop close request and do mknod"
532 multiop $DIR/$tfile O_c &
534 # give a chance for open
538 drop_request "kill -USR1 $pid"
541 wait $pid || return 1
544 run_test 23 "client hang when close a file after mds crash"
546 test_24() { # bug 2248 - eviction fails writeback but app doesn't see it
549 multiop $DIR/$tdir/$tfile Owy_wyc &
554 kill -USR1 $MULTI_PID
557 sysctl -w lustre.fail_loc=0x0
559 [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
561 run_test 24 "fsync error (should return error)"
563 test_26() { # bug 5921 - evict dead exports by pinger
564 # this test can only run from a client on a separate node.
565 remote_ost || skip "local OST" && return
566 remote_mds || skip "local MDS" && return
567 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
568 OST_EXP="`do_facet ost1 cat $OST_FILE`"
569 OST_NEXP1=`echo $OST_EXP | cut -d' ' -f2`
570 echo starting with $OST_NEXP1 OST exports
571 # OBD_FAIL_PTLRPC_DROP_RPC 0x505
572 do_facet client sysctl -w lustre.fail_loc=0x505
573 # evictor takes up to 2.25x to evict. But if there's a
574 # race to start the evictor from various obds, the loser
575 # might have to wait for the next ping.
576 echo Waiting for $(($TIMEOUT * 4)) secs
577 sleep $(($TIMEOUT * 4))
578 OST_EXP="`do_facet ost1 cat $OST_FILE`"
579 OST_NEXP2=`echo $OST_EXP | cut -d' ' -f2`
580 echo ending with $OST_NEXP2 OST exports
581 do_facet client sysctl -w lustre.fail_loc=0x0
582 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted"
585 run_test 26 "evict dead exports"
587 test_26b() { # bug 10140 - evict dead exports by pinger
589 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
590 sleep 1 # wait connections being established
591 MDS_FILE=$LPROC/mdt/${mds1_svc}/num_exports
592 MDS_NEXP1="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
593 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
594 OST_NEXP1="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
595 echo starting with $OST_NEXP1 OST and $MDS_NEXP1 MDS exports
596 zconf_umount `hostname` $MOUNT2 -f
597 # evictor takes up to 2.25x to evict. But if there's a
598 # race to start the evictor from various obds, the loser
599 # might have to wait for the next ping.
600 echo Waiting for $(($TIMEOUT * 4)) secs
601 sleep $(($TIMEOUT * 4))
602 OST_NEXP2="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
603 MDS_NEXP2="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
604 echo ending with $OST_NEXP2 OST and $MDS_NEXP2 MDS exports
605 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted from OST"
606 [ $MDS_NEXP1 -le $MDS_NEXP2 ] && error "client not evicted from MDS"
609 run_test 26b "evict dead exports"
612 remote_mds && { skip "remote MDS" && return 0; }
614 writemany -q -a $DIR/$tdir/$tfile 0 5 &
618 facet_failover $SINGLEMDS
619 #define OBD_FAIL_OSC_SHUTDOWN 0x407
620 sysctl -w lustre.fail_loc=0x80000407
621 # need to wait for reconnect
622 echo -n waiting for fail_loc
623 while [ `sysctl -n lustre.fail_loc` -eq -2147482617 ]; do
627 facet_failover $SINGLEMDS
629 kill -USR1 $CLIENT_PID
633 run_test 27 "fail LOV while using OSC's"
635 test_28() { # bug 6086 - error adding new clients
636 do_facet client mcreate $MOUNT/$tfile || return 1
637 drop_bl_callback "chmod 0777 $MOUNT/$tfile" ||echo "evicted as expected"
638 #define OBD_FAIL_MDS_CLIENT_ADD 0x12f
639 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012f"
640 # fail once (evicted), reconnect fail (fail_loc), ok
641 df || (sleep 10; df) || (sleep 10; df) || error "reconnect failed"
643 fail $SINGLEMDS # verify MDS last_rcvd can be loaded
645 run_test 28 "handle error adding new clients (bug 6086)"
649 # put a load of file creates/writes/deletes
650 writemany -q $DIR/$tdir/$tfile 0 5 &
652 echo writemany pid $CLIENT_PID
656 # wait for client to reconnect to MDS
661 # client process should see no problems even though MDS went down
663 kill -USR1 $CLIENT_PID
666 echo writemany returned $rc
667 #these may fail because of eviction due to slow AST response.
668 [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
670 run_test 50 "failover MDS under load"
674 # put a load of file creates/writes/deletes
675 writemany -q $DIR/$tdir/$tfile 0 5 &
679 facet_failover $SINGLEMDS
680 # failover at various points during recovery
681 SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
682 echo will failover at $SEQ
685 echo failover in $i sec
687 facet_failover $SINGLEMDS
689 # client process should see no problems even though MDS went down
690 # and recovery was interrupted
692 kill -USR1 $CLIENT_PID
695 echo writemany returned $rc
696 [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
698 run_test 51 "failover MDS during recovery"
701 do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
703 echo writemany pid $CLIENT_PID
708 wait $CLIENT_PID || rc=$?
709 # active client process should see an EIO for down OST
710 [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
711 # but timing or failover setup may allow success
712 [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
713 echo "writemany returned $rc"
721 [ $rc -ne 0 ] && { return $rc; }
722 # wait for client to reconnect to OST
726 [ $rc -ne 0 ] && { return $rc; }
733 run_test 52 "failover OST under load"
735 # test of open reconstruct
738 drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
741 run_test 53 "touch: drop rep"
744 zconf_mount `hostname` $MOUNT2
748 cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
751 ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
752 [ x"$ERROR" == x ] || error "back in time occured"
754 run_test 54 "back in time"
756 # bug 11330 - liblustre application death during I/O locks up OST
758 remote_ost && { skip "remote OST" && return 0; }
762 # first dd should be finished quickly
763 dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
766 echo "step1: testing ......"
768 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
770 if [ $count -gt 64 ]; then
771 error "dd should be finished!"
775 echo "(dd_pid=$DDPID, time=$count)successful"
777 #define OBD_FAIL_OST_DROP_REQ 0x21d
778 do_facet ost sysctl -w lustre.fail_loc=0x0000021d
779 # second dd will be never finished
780 dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
783 echo "step2: testing ......"
784 while [ $count -le 64 ]; do
785 dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
786 if [ -z $dd_name ]; then
788 echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
789 error "dd shouldn't be finished!"
794 echo "(dd_pid=$DDPID, time=$count)successful"
796 #Recover fail_loc and dd will finish soon
797 do_facet ost sysctl -w lustre.fail_loc=0
799 echo "step3: testing ......"
801 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
803 if [ $count -gt 500 ]; then
804 error "dd should be finished!"
808 echo "(dd_pid=$DDPID, time=$count)successful"
812 run_test 55 "ost_brw_read/write drops timed-out read/write request"
814 test_56() { # b=11277
815 #define OBD_FAIL_MDS_RESEND 0x136
817 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000136"
819 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
822 run_test 56 "do not allow reconnect to busy exports"
825 # no oscs means no client or mdt
826 while [ -e $LPROC/osc ]; do
827 for f in `find $LPROC -type f`; do
828 cat $f > /dev/null 2>&1
833 test_57() { # bug 10866
837 #define OBD_FAIL_LPROC_REMOVE 0xB00
838 sysctl -w lustre.fail_loc=0x80000B00
839 zconf_umount `hostname` $DIR
840 sysctl -w lustre.fail_loc=0x80000B00
841 fail_abort $SINGLEMDS
843 sysctl -w lustre.fail_loc=0
845 do_facet client "df $DIR"
847 run_test 57 "read procfs entries causes kernel crash"
849 test_58() { # bug 11546
850 #define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
853 sysctl -w lustre.fail_loc=0x80000801
854 cp $MOUNT/$tfile /dev/null &
857 sysctl -w lustre.fail_loc=0
858 drop_bl_callback rm -f $MOUNT/$tfile
860 do_facet client "df $DIR"
862 run_test 58 "Eviction in the middle of open RPC reply processing"
864 test_59() { # bug 10589
865 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
866 #define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
867 sysctl -w lustre.fail_loc=0x311
868 writes=`dd if=/dev/zero of=$DIR2/$tfile count=1 2>&1 | awk 'BEGIN { FS="+" } /out/ {print $1}'`
869 sysctl -w lustre.fail_loc=0
871 zconf_umount `hostname` $DIR2 -f
872 reads=`dd if=$DIR/$tfile of=/dev/null 2>&1 | awk 'BEGIN { FS="+" } /in/ {print $1}'`
873 [ $reads -eq $writes ] || error "read" $reads "blocks, must be" $writes
875 run_test 59 "Read cancel race on client eviction"
877 equals_msg `basename $0`: test complete, cleaning up
878 check_and_cleanup_lustre
879 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true