6 ALWAYS_EXCEPT="24 27 52 $RECOVERY_SMALL_EXCEPT"
8 PTLDEBUG=${PTLDEBUG:--1}
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
12 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
16 # Allow us to override the setup if we already have a mounted system by
17 # setting SETUP=" " and CLEANUP=" "
18 SETUP=${SETUP:-"setup"}
19 CLEANUP=${CLEANUP:-"cleanup"}
22 [ "$REFORMAT" ] && formatall
27 cleanupall || { echo "FAILed to clean up"; exit 20; }
30 if [ ! -z "$EVAL" ]; then
35 if [ "$ONLY" == "cleanup" ]; then
36 sysctl -w lnet.debug=0 || true
43 [ "$ONLY" == "setup" ] && exit
46 drop_request "mcreate $MOUNT/1" || return 1
47 drop_reint_reply "mcreate $MOUNT/2" || return 2
49 run_test 1 "mcreate: drop req, drop rep"
52 drop_request "tchmod 111 $MOUNT/2" || return 1
53 drop_reint_reply "tchmod 666 $MOUNT/2" || return 2
55 run_test 2 "chmod: drop req, drop rep"
58 drop_request "statone $MOUNT/2" || return 1
59 drop_reply "statone $MOUNT/2" || return 2
61 run_test 3 "stat: drop req, drop rep"
64 do_facet client "cp /etc/inittab $MOUNT/inittab" || return 1
65 drop_request "cat $MOUNT/inittab > /dev/null" || return 2
66 drop_reply "cat $MOUNT/inittab > /dev/null" || return 3
68 run_test 4 "open: drop req, drop rep"
71 drop_request "mv $MOUNT/inittab $MOUNT/renamed" || return 1
72 drop_reint_reply "mv $MOUNT/renamed $MOUNT/renamed-again" || return 2
73 do_facet client "checkstat -v $MOUNT/renamed-again" || return 3
75 run_test 5 "rename: drop req, drop rep"
77 [ ! -e $MOUNT/renamed-again ] && cp /etc/inittab $MOUNT/renamed-again
79 drop_request "mlink $MOUNT/renamed-again $MOUNT/link1" || return 1
80 drop_reint_reply "mlink $MOUNT/renamed-again $MOUNT/link2" || return 2
82 run_test 6 "link: drop req, drop rep"
84 [ ! -e $MOUNT/link1 ] && mlink $MOUNT/renamed-again $MOUNT/link1
85 [ ! -e $MOUNT/link2 ] && mlink $MOUNT/renamed-again $MOUNT/link2
87 drop_request "munlink $MOUNT/link1" || return 1
88 drop_reint_reply "munlink $MOUNT/link2" || return 2
90 run_test 7 "unlink: drop req, drop rep"
94 drop_reint_reply "touch $MOUNT/$tfile" || return 1
96 run_test 8 "touch: drop rep (bug 1423)"
100 pause_bulk "cp /etc/profile $MOUNT/$tfile" || return 1
101 do_facet client "cp /etc/termcap $MOUNT/${tfile}.2" || return 2
102 do_facet client "sync"
103 do_facet client "rm $MOUNT/$tfile $MOUNT/${tfile}.2" || return 3
105 run_test 9 "pause bulk on OST (bug 1420)"
109 do_facet client mcreate $MOUNT/$tfile || return 1
110 drop_bl_callback "chmod 0777 $MOUNT/$tfile" || echo "evicted as expected"
111 # wait for the mds to evict the client
112 #echo "sleep $(($TIMEOUT*2))"
113 #sleep $(($TIMEOUT*2))
114 do_facet client touch $MOUNT/$tfile || echo "touch failed, evicted"
115 do_facet client checkstat -v -p 0777 $MOUNT/$tfile || return 3
116 do_facet client "munlink $MOUNT/$tfile"
118 run_test 10 "finish request on server after client eviction (bug 1521)"
121 # wake up a thread waiting for completion after eviction
123 do_facet client multiop $MOUNT/$tfile Ow || return 1
124 do_facet client multiop $MOUNT/$tfile or || return 2
128 do_facet client multiop $MOUNT/$tfile or || return 3
129 drop_bl_callback multiop $MOUNT/$tfile Ow || echo "evicted as expected"
131 do_facet client munlink $MOUNT/$tfile || return 4
133 run_test 11 "wake up a thread waiting for completion after eviction (b=2460)"
137 $LCTL mark multiop $MOUNT/$tfile OS_c
138 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x115"
139 clear_failloc $SINGLEMDS $((TIMEOUT * 2)) &
140 multiop $MOUNT/$tfile OS_c &
142 #define OBD_FAIL_MDS_CLOSE_NET 0x115
145 echo "waiting for multiop $PID"
146 wait $PID || return 2
147 do_facet client munlink $MOUNT/$tfile || return 3
149 run_test 12 "recover from timed out resend in ptlrpcd (b=2494)"
151 # Bug 113, check that readdir lost recv timeout works.
153 mkdir $MOUNT/readdir || return 1
154 touch $MOUNT/readdir/newentry || return
155 # OBD_FAIL_MDS_READPAGE_NET|OBD_FAIL_ONCE
156 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000104"
157 ls $MOUNT/readdir || return 3
158 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
159 rm -rf $MOUNT/readdir || return 4
161 run_test 13 "mdc_readpage restart test (bug 1138)"
163 # Bug 113, check that readdir lost send timeout works.
166 touch $MOUNT/readdir/newentry
167 # OBD_FAIL_MDS_SENDPAGE|OBD_FAIL_ONCE
168 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000106"
169 ls $MOUNT/readdir || return 1
170 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
172 run_test 14 "mdc_readpage resend test (bug 1138)"
175 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000128"
176 touch $DIR/$tfile && return 1
179 run_test 15 "failed open (-ENOMEM)"
181 READ_AHEAD=`cat $LPROC/llite/*/max_read_ahead_mb | head -n 1`
183 for f in $LPROC/llite/*/max_read_ahead_mb; do
189 for f in $LPROC/llite/*/max_read_ahead_mb; do
190 echo $READ_AHEAD > $f
195 do_facet client cp /etc/termcap $MOUNT
199 #define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504 | OBD_FAIL_ONCE
200 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000504"
202 # OST bulk will time out here, client resends
203 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 1
204 do_facet ost1 sysctl -w lustre.fail_loc=0
205 # give recovery a chance to finish (shouldn't take long)
207 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 2
210 run_test 16 "timeout bulk put, don't evict client (2732)"
213 # OBD_FAIL_PTLRPC_BULK_GET_NET 0x0503 | OBD_FAIL_ONCE
214 # OST bulk will time out here, client retries
215 do_facet ost1 sysctl -w lustre.fail_loc=0x80000503
216 # need to ensure we send an RPC
217 do_facet client cp /etc/termcap $DIR/$tfile
221 do_facet ost1 sysctl -w lustre.fail_loc=0
222 do_facet client "df $DIR"
223 # expect cmp to succeed, client resent bulk
224 do_facet client "cmp /etc/termcap $DIR/$tfile" || return 3
225 do_facet client "rm $DIR/$tfile" || return 4
228 run_test 17 "timeout bulk get, don't evict client (2732)"
231 [ -z ${ost2_svc} ] && echo Skipping, needs 2 osts && return 0
233 do_facet client mkdir -p $MOUNT/$tdir
234 f=$MOUNT/$tdir/$tfile
237 pgcache_empty || return 1
240 lfs setstripe $f $((128 * 1024)) 1 1
242 do_facet client cp /etc/termcap $f
244 local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | egrep -v 'MDT' | awk '{print $1}'`
245 $LCTL --device $osc2dev deactivate || return 3
246 # my understanding is that there should be nothing in the page
247 # cache after the client reconnects?
249 pgcache_empty || rc=2
250 $LCTL --device $osc2dev activate
254 run_test 18a "manual ost invalidate clears page cache immediately"
257 do_facet client mkdir -p $MOUNT/$tdir
258 f=$MOUNT/$tdir/$tfile
259 f2=$MOUNT/$tdir/${tfile}-2
262 pgcache_empty || return 1
264 # shouldn't have to set stripe size of count==1
265 lfs setstripe $f $((128 * 1024)) 0 1
266 lfs setstripe $f2 $((128 * 1024)) 0 1
268 do_facet client cp /etc/termcap $f
271 # allow recovery to complete
272 sleep $((TIMEOUT + 2))
273 # my understanding is that there should be nothing in the page
274 # cache after the client reconnects?
276 pgcache_empty || rc=2
280 run_test 18b "eviction and reconnect clears page cache (2766)"
284 do_facet client mcreate $f || return 1
285 drop_ldlm_cancel "chmod 0777 $f" || echo "evicted as expected"
287 do_facet client checkstat -v -p 0777 $f || echo evicted
288 # let the client reconnect
290 do_facet client "munlink $f"
292 run_test 19a "test expired_lock_main on mds (2867)"
296 do_facet client multiop $f Ow || return 1
297 do_facet client multiop $f or || return 2
301 do_facet client multiop $f or || return 3
302 drop_ldlm_cancel multiop $f Ow || echo "client evicted, as expected"
304 do_facet client munlink $f || return 4
306 run_test 19b "test expired_lock_main on ost (2867)"
308 test_20a() { # bug 2983 - ldlm_handle_enqueue cleanup
310 multiop $DIR/$tdir/${tfile} O_wc &
314 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
315 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
316 kill -USR1 $MULTI_PID
319 [ $rc -eq 0 ] && error "multiop didn't fail enqueue: rc $rc" || true
321 run_test 20a "ldlm_handle_enqueue error (should return error)"
323 test_20b() { # bug 2986 - ldlm_handle_enqueue error during open
325 touch $DIR/$tdir/${tfile}
327 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
328 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
329 dd if=/etc/hosts of=$DIR/$tdir/$tfile && \
330 error "didn't fail open enqueue" || true
332 run_test 20b "ldlm_handle_enqueue error (should return error)"
335 mkdir -p $DIR/$tdir-1
336 mkdir -p $DIR/$tdir-2
337 multiop $DIR/$tdir-1/f O_c &
340 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
341 multiop $DIR/$tdir-2/f Oc &
344 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
346 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
347 kill -USR1 $close_pid
349 wait $close_pid || return 1
350 wait $open_pid || return 2
351 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
353 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
354 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
358 run_test 21a "drop close request while close and open are both in flight"
361 mkdir -p $DIR/$tdir-1
362 mkdir -p $DIR/$tdir-2
363 multiop $DIR/$tdir-1/f O_c &
366 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
367 mcreate $DIR/$tdir-2/f &
370 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
372 kill -USR1 $close_pid
374 wait $close_pid || return 1
375 wait $open_pid || return 3
377 $CHECKSTAT -t file $DIR/$tdir-1/f || return 4
378 $CHECKSTAT -t file $DIR/$tdir-2/f || return 5
381 run_test 21b "drop open request while close and open are both in flight"
384 mkdir -p $DIR/$tdir-1
385 mkdir -p $DIR/$tdir-2
386 multiop $DIR/$tdir-1/f O_c &
389 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
390 mcreate $DIR/$tdir-2/f &
393 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
395 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
396 kill -USR1 $close_pid
398 wait $close_pid || return 1
399 wait $open_pid || return 2
401 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
403 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
404 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
407 run_test 21c "drop both request while close and open are both in flight"
410 mkdir -p $DIR/$tdir-1
411 mkdir -p $DIR/$tdir-2
412 multiop $DIR/$tdir-1/f O_c &
415 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
416 multiop $DIR/$tdir-2/f Oc &
418 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
420 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
423 wait $pid || return 1
424 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
426 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
427 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
431 run_test 21d "drop close reply while close and open are both in flight"
434 mkdir -p $DIR/$tdir-1
435 mkdir -p $DIR/$tdir-2
436 multiop $DIR/$tdir-1/f O_c &
439 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
440 touch $DIR/$tdir-2/f &
442 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
446 wait $pid || return 1
449 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
450 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
453 run_test 21e "drop open reply while close and open are both in flight"
456 mkdir -p $DIR/$tdir-1
457 mkdir -p $DIR/$tdir-2
458 multiop $DIR/$tdir-1/f O_c &
461 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
462 touch $DIR/$tdir-2/f &
464 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
466 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
469 wait $pid || return 1
470 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
472 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
473 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
476 run_test 21f "drop both reply while close and open are both in flight"
479 mkdir -p $DIR/$tdir-1
480 mkdir -p $DIR/$tdir-2
481 multiop $DIR/$tdir-1/f O_c &
484 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
485 touch $DIR/$tdir-2/f &
487 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
489 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
492 wait $pid || return 1
493 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
495 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
496 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
499 run_test 21g "drop open reply and close request while close and open are both in flight"
502 mkdir -p $DIR/$tdir-1
503 mkdir -p $DIR/$tdir-2
504 multiop $DIR/$tdir-1/f O_c &
507 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
508 touch $DIR/$tdir-2/f &
511 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
513 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
516 wait $pid || return 1
517 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
519 wait $touch_pid || return 2
521 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
522 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
525 run_test 21h "drop open request and close reply while close and open are both in flight"
527 # bug 3462 - multiple MDC requests
532 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
537 multiop $f1 msu || return 1
540 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
542 wait $close_pid || return 2
543 rm -rf $f2 || return 4
545 run_test 22 "drop close request and do mknod"
548 multiop $DIR/$tfile O_c &
550 # give a chance for open
554 drop_request "kill -USR1 $pid"
557 wait $pid || return 1
560 run_test 23 "client hang when close a file after mds crash"
562 test_24() { # bug 2248 - eviction fails writeback but app doesn't see it
565 multiop $DIR/$tdir/$tfile Owy_wyc &
570 kill -USR1 $MULTI_PID
573 sysctl -w lustre.fail_loc=0x0
575 [ $rc -eq 0 ] && error "multiop didn't fail fsync: rc $rc" || true
577 run_test 24 "fsync error (should return error)"
579 test_26() { # bug 5921 - evict dead exports by pinger
580 # this test can only run from a client on a separate node.
581 [ "`lsmod | grep obdfilter`" ] && \
582 echo "skipping test 26 (local OST)" && return
583 [ "`lsmod | grep mds`" ] && \
584 echo "skipping test 26 (local MDS)" && return
585 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
586 OST_EXP="`do_facet ost1 cat $OST_FILE`"
587 OST_NEXP1=`echo $OST_EXP | cut -d' ' -f2`
588 echo starting with $OST_NEXP1 OST exports
589 # OBD_FAIL_PTLRPC_DROP_RPC 0x505
590 do_facet client sysctl -w lustre.fail_loc=0x505
591 # evictor takes up to 2.25x to evict. But if there's a
592 # race to start the evictor from various obds, the loser
593 # might have to wait for the next ping.
594 echo Waiting for $(($TIMEOUT * 4)) secs
595 sleep $(($TIMEOUT * 4))
596 OST_EXP="`do_facet ost1 cat $OST_FILE`"
597 OST_NEXP2=`echo $OST_EXP | cut -d' ' -f2`
598 echo ending with $OST_NEXP2 OST exports
599 do_facet client sysctl -w lustre.fail_loc=0x0
600 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted"
603 run_test 26 "evict dead exports"
605 test_26b() { # bug 10140 - evict dead exports by pinger
607 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
608 MDS_FILE=$LPROC/mdt/${mds1_svc}/num_exports
609 MDS_NEXP1="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
610 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
611 OST_NEXP1="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
612 echo starting with $OST_NEXP1 OST and $MDS_NEXP1 MDS exports
613 zconf_umount `hostname` $MOUNT2 -f
614 # evictor takes up to 2.25x to evict. But if there's a
615 # race to start the evictor from various obds, the loser
616 # might have to wait for the next ping.
617 echo Waiting for $(($TIMEOUT * 4)) secs
618 sleep $(($TIMEOUT * 4))
619 OST_NEXP2="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
620 MDS_NEXP2="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
621 echo ending with $OST_NEXP2 OST and $MDS_NEXP2 MDS exports
622 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted from OST"
623 [ $MDS_NEXP1 -le $MDS_NEXP2 ] && error "client not evicted from MDS"
626 run_test 26b "evict dead exports"
629 [ "`lsmod | grep mds`" ] || \
630 { echo "skipping test 27 (non-local MDS)" && return 0; }
632 writemany -q -a $DIR/$tdir/$tfile 0 5 &
636 facet_failover $SINGLEMDS
637 #define OBD_FAIL_OSC_SHUTDOWN 0x407
638 sysctl -w lustre.fail_loc=0x80000407
639 # need to wait for reconnect
640 echo -n waiting for fail_loc
641 while [ `sysctl -n lustre.fail_loc` -eq -2147482617 ]; do
645 facet_failover $SINGLEMDS
647 kill -USR1 $CLIENT_PID
651 run_test 27 "fail LOV while using OSC's"
653 test_28() { # bug 6086 - error adding new clients
654 do_facet client mcreate $MOUNT/$tfile || return 1
655 drop_bl_callback "chmod 0777 $MOUNT/$tfile" ||echo "evicted as expected"
656 #define OBD_FAIL_MDS_CLIENT_ADD 0x12f
657 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012f"
658 # fail once (evicted), reconnect fail (fail_loc), ok
659 df || (sleep 10; df) || (sleep 10; df) || error "reconnect failed"
661 fail $SINGLEMDS # verify MDS last_rcvd can be loaded
663 run_test 28 "handle error adding new clients (bug 6086)"
667 # put a load of file creates/writes/deletes
668 writemany -q $DIR/$tdir/$tfile 0 5 &
670 echo writemany pid $CLIENT_PID
674 # wait for client to reconnect to MDS
679 # client process should see no problems even though MDS went down
681 kill -USR1 $CLIENT_PID
684 echo writemany returned $rc
685 #these may fail because of eviction due to slow AST response.
688 run_test 50 "failover MDS under load"
692 # put a load of file creates/writes/deletes
693 writemany -q $DIR/$tdir/$tfile 0 5 &
697 facet_failover $SINGLEMDS
698 # failover at various points during recovery
699 SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
700 echo will failover at $SEQ
703 echo failover in $i sec
705 facet_failover $SINGLEMDS
707 # client process should see no problems even though MDS went down
708 # and recovery was interrupted
710 kill -USR1 $CLIENT_PID
713 echo writemany returned $rc
716 run_test 51 "failover MDS during recovery"
719 do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
721 echo writemany pid $CLIENT_PID
726 wait $CLIENT_PID || rc=$?
727 # active client process should see an EIO for down OST
728 [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
729 # but timing or failover setup may allow success
730 [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
731 echo "writemany returned $rc"
739 [ $rc -ne 0 ] && { return $rc; }
740 # wait for client to reconnect to OST
744 [ $rc -ne 0 ] && { return $rc; }
751 run_test 52 "failover OST under load"
753 # test of open reconstruct
756 drop_ldlm_reply "./openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
759 run_test 53 "touch: drop rep"
762 zconf_mount `hostname` $MOUNT2
766 cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
769 ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
770 [ x"$ERROR" == x ] || error "back in time occured"
772 run_test 54 "back in time"
774 # bug 11330 - liblustre application death during I/O locks up OST
776 [ "`lsmod | grep obdfilter`" ] || \
777 { echo "skipping test 55 (non-local OST)" && return 0; }
781 # first dd should be finished quickly
782 dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
785 echo "step1: testing ......"
787 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
789 if [ $count -gt 64 ]; then
790 error "dd should be finished!"
794 echo "(dd_pid=$DDPID, time=$count)successful"
796 #define OBD_FAIL_OST_DROP_REQ 0x21d
797 do_facet ost sysctl -w lustre.fail_loc=0x0000021d
798 # second dd will be never finished
799 dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
802 echo "step2: testing ......"
803 while [ $count -le 64 ]; do
804 dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
805 if [ -z $dd_name ]; then
807 echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
808 error "dd shouldn't be finished!"
813 echo "(dd_pid=$DDPID, time=$count)successful"
815 #Recover fail_loc and dd will finish soon
816 do_facet ost sysctl -w lustre.fail_loc=0
818 echo "step3: testing ......"
820 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
822 if [ $count -gt 500 ]; then
823 error "dd should be finished!"
827 echo "(dd_pid=$DDPID, time=$count)successful"
831 run_test 55 "ost_brw_read/write drops timed-out read/write request"
833 test_56() { # b=11277
834 #define OBD_FAIL_MDS_RESEND 0x136
836 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000136"
838 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
841 run_test 56 "do not allow reconnect to busy exports"
844 # no oscs means no client or mdt
845 while [ -e $LPROC/osc ]; do
846 for f in `find $LPROC -type f`; do
847 cat $f > /dev/null 2>&1
852 test_57() { # bug 10866
856 #define OBD_FAIL_LPROC_REMOVE 0xB00
857 sysctl -w lustre.fail_loc=0x80000B00
858 zconf_umount `hostname` $DIR
859 sysctl -w lustre.fail_loc=0x80000B00
860 fail_abort $SINGLEMDS
862 sysctl -w lustre.fail_loc=0
864 do_facet client "df $DIR"
866 run_test 57 "read procfs entries causes kernel crash"
868 test_58() { # bug 11546
869 #define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
872 sysctl -w lustre.fail_loc=0x80000801
873 cp $MOUNT/$tfile /dev/null &
876 sysctl -w lustre.fail_loc=0
877 drop_bl_callback rm -f $MOUNT/$tfile
879 do_facet client "df $DIR"
881 run_test 58 "Eviction in the middle of open RPC reply processing"