6 ALWAYS_EXCEPT="24 27 52 $RECOVERY_SMALL_EXCEPT"
8 PTLDEBUG=${PTLDEBUG:--1}
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
12 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
16 # Allow us to override the setup if we already have a mounted system by
17 # setting SETUP=" " and CLEANUP=" "
19 CLEANUP=${CLEANUP:-""}
21 cleanup_and_setup_lustre
22 rm -rf $DIR/${TESTSUITE}/[df][0-9]* # bug 13798 new t-f tdir staff
23 rm -rf $DIR/[df][0-9]*
26 drop_request "mcreate $MOUNT/1" || return 1
27 drop_reint_reply "mcreate $MOUNT/2" || return 2
29 run_test 1 "mcreate: drop req, drop rep"
32 drop_request "tchmod 111 $MOUNT/2" || return 1
33 drop_reint_reply "tchmod 666 $MOUNT/2" || return 2
35 run_test 2 "chmod: drop req, drop rep"
38 drop_request "statone $MOUNT/2" || return 1
39 drop_reply "statone $MOUNT/2" || return 2
41 run_test 3 "stat: drop req, drop rep"
44 do_facet client "cp /etc/inittab $MOUNT/inittab" || return 1
45 drop_request "cat $MOUNT/inittab > /dev/null" || return 2
46 drop_reply "cat $MOUNT/inittab > /dev/null" || return 3
48 run_test 4 "open: drop req, drop rep"
51 drop_request "mv $MOUNT/inittab $MOUNT/renamed" || return 1
52 drop_reint_reply "mv $MOUNT/renamed $MOUNT/renamed-again" || return 2
53 do_facet client "checkstat -v $MOUNT/renamed-again" || return 3
55 run_test 5 "rename: drop req, drop rep"
57 [ ! -e $MOUNT/renamed-again ] && cp /etc/inittab $MOUNT/renamed-again
59 drop_request "mlink $MOUNT/renamed-again $MOUNT/link1" || return 1
60 drop_reint_reply "mlink $MOUNT/renamed-again $MOUNT/link2" || return 2
62 run_test 6 "link: drop req, drop rep"
64 [ ! -e $MOUNT/link1 ] && mlink $MOUNT/renamed-again $MOUNT/link1
65 [ ! -e $MOUNT/link2 ] && mlink $MOUNT/renamed-again $MOUNT/link2
67 drop_request "munlink $MOUNT/link1" || return 1
68 drop_reint_reply "munlink $MOUNT/link2" || return 2
70 run_test 7 "unlink: drop req, drop rep"
74 drop_reint_reply "touch $MOUNT/$tfile" || return 1
76 run_test 8 "touch: drop rep (bug 1423)"
80 pause_bulk "cp /etc/profile $MOUNT/$tfile" || return 1
81 do_facet client "cp /etc/termcap $MOUNT/${tfile}.2" || return 2
82 do_facet client "sync"
83 do_facet client "rm $MOUNT/$tfile $MOUNT/${tfile}.2" || return 3
85 run_test 9 "pause bulk on OST (bug 1420)"
89 do_facet client mcreate $MOUNT/$tfile || return 1
90 drop_bl_callback "chmod 0777 $MOUNT/$tfile" || echo "evicted as expected"
91 # wait for the mds to evict the client
92 #echo "sleep $(($TIMEOUT*2))"
93 #sleep $(($TIMEOUT*2))
94 do_facet client touch $MOUNT/$tfile || echo "touch failed, evicted"
95 do_facet client checkstat -v -p 0777 $MOUNT/$tfile || return 3
96 do_facet client "munlink $MOUNT/$tfile"
98 run_test 10 "finish request on server after client eviction (bug 1521)"
101 # wake up a thread waiting for completion after eviction
103 do_facet client multiop $MOUNT/$tfile Ow || return 1
104 do_facet client multiop $MOUNT/$tfile or || return 2
108 do_facet client multiop $MOUNT/$tfile or || return 3
109 drop_bl_callback multiop $MOUNT/$tfile Ow || echo "evicted as expected"
111 do_facet client munlink $MOUNT/$tfile || return 4
113 run_test 11 "wake up a thread waiting for completion after eviction (b=2460)"
117 $LCTL mark multiop $MOUNT/$tfile OS_c
118 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x115"
119 clear_failloc $SINGLEMDS $((TIMEOUT * 2)) &
120 multiop $MOUNT/$tfile OS_c &
122 #define OBD_FAIL_MDS_CLOSE_NET 0x115
125 echo "waiting for multiop $PID"
126 wait $PID || return 2
127 do_facet client munlink $MOUNT/$tfile || return 3
129 run_test 12 "recover from timed out resend in ptlrpcd (b=2494)"
131 # Bug 113, check that readdir lost recv timeout works.
133 mkdir $MOUNT/readdir || return 1
134 touch $MOUNT/readdir/newentry || return
135 # OBD_FAIL_MDS_READPAGE_NET|OBD_FAIL_ONCE
136 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000104"
137 ls $MOUNT/readdir || return 3
138 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
139 rm -rf $MOUNT/readdir || return 4
141 run_test 13 "mdc_readpage restart test (bug 1138)"
143 # Bug 113, check that readdir lost send timeout works.
146 touch $MOUNT/readdir/newentry
147 # OBD_FAIL_MDS_SENDPAGE|OBD_FAIL_ONCE
148 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000106"
149 ls $MOUNT/readdir || return 1
150 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
152 run_test 14 "mdc_readpage resend test (bug 1138)"
155 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000128"
156 touch $DIR/$tfile && return 1
159 run_test 15 "failed open (-ENOMEM)"
161 READ_AHEAD=`cat $LPROC/llite/*/max_read_ahead_mb | head -n 1`
163 for f in $LPROC/llite/*/max_read_ahead_mb; do
169 for f in $LPROC/llite/*/max_read_ahead_mb; do
170 echo $READ_AHEAD > $f
175 do_facet client cp /etc/termcap $MOUNT
179 #define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504 | OBD_FAIL_ONCE
180 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000504"
182 # OST bulk will time out here, client resends
183 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 1
184 do_facet ost1 sysctl -w lustre.fail_loc=0
185 # give recovery a chance to finish (shouldn't take long)
187 do_facet client "cmp /etc/termcap $MOUNT/termcap" || return 2
190 run_test 16 "timeout bulk put, don't evict client (2732)"
193 # OBD_FAIL_PTLRPC_BULK_GET_NET 0x0503 | OBD_FAIL_ONCE
194 # OST bulk will time out here, client retries
195 do_facet ost1 sysctl -w lustre.fail_loc=0x80000503
196 # need to ensure we send an RPC
197 do_facet client cp /etc/termcap $DIR/$tfile
201 do_facet ost1 sysctl -w lustre.fail_loc=0
202 do_facet client "df $DIR"
203 # expect cmp to succeed, client resent bulk
204 do_facet client "cmp /etc/termcap $DIR/$tfile" || return 3
205 do_facet client "rm $DIR/$tfile" || return 4
208 run_test 17 "timeout bulk get, don't evict client (2732)"
211 [ -z ${ost2_svc} ] && skip "needs 2 osts" && return 0
213 do_facet client mkdir -p $MOUNT/$tdir
214 f=$MOUNT/$tdir/$tfile
217 pgcache_empty || return 1
220 lfs setstripe $f $((128 * 1024)) 1 1
222 do_facet client cp /etc/termcap $f
224 local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | egrep -v 'MDT' | awk '{print $1}'`
225 $LCTL --device $osc2dev deactivate || return 3
226 # my understanding is that there should be nothing in the page
227 # cache after the client reconnects?
229 pgcache_empty || rc=2
230 $LCTL --device $osc2dev activate
234 run_test 18a "manual ost invalidate clears page cache immediately"
237 do_facet client mkdir -p $MOUNT/$tdir
238 f=$MOUNT/$tdir/$tfile
239 f2=$MOUNT/$tdir/${tfile}-2
242 pgcache_empty || return 1
244 # shouldn't have to set stripe size of count==1
245 lfs setstripe $f $((128 * 1024)) 0 1
246 lfs setstripe $f2 $((128 * 1024)) 0 1
248 do_facet client cp /etc/termcap $f
251 # allow recovery to complete
252 sleep $((TIMEOUT + 2))
253 # my understanding is that there should be nothing in the page
254 # cache after the client reconnects?
256 pgcache_empty || rc=2
260 run_test 18b "eviction and reconnect clears page cache (2766)"
264 do_facet client mcreate $f || return 1
265 drop_ldlm_cancel "chmod 0777 $f" || echo "evicted as expected"
267 do_facet client checkstat -v -p 0777 $f || echo evicted
268 # let the client reconnect
270 do_facet client "munlink $f"
272 run_test 19a "test expired_lock_main on mds (2867)"
276 do_facet client multiop $f Ow || return 1
277 do_facet client multiop $f or || return 2
281 do_facet client multiop $f or || return 3
282 drop_ldlm_cancel multiop $f Ow || echo "client evicted, as expected"
284 do_facet client munlink $f || return 4
286 run_test 19b "test expired_lock_main on ost (2867)"
288 test_20a() { # bug 2983 - ldlm_handle_enqueue cleanup
290 multiop $DIR/$tdir/${tfile} O_wc &
294 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
295 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
296 kill -USR1 $MULTI_PID
299 [ $rc -eq 0 ] && error "multiop didn't fail enqueue: rc $rc" || true
301 run_test 20a "ldlm_handle_enqueue error (should return error)"
303 test_20b() { # bug 2986 - ldlm_handle_enqueue error during open
305 touch $DIR/$tdir/${tfile}
307 #define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
308 do_facet ost1 sysctl -w lustre.fail_loc=0x80000308
309 dd if=/etc/hosts of=$DIR/$tdir/$tfile && \
310 error "didn't fail open enqueue" || true
312 run_test 20b "ldlm_handle_enqueue error (should return error)"
315 mkdir -p $DIR/$tdir-1
316 mkdir -p $DIR/$tdir-2
317 multiop $DIR/$tdir-1/f O_c &
320 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
321 multiop $DIR/$tdir-2/f Oc &
324 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
326 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
327 kill -USR1 $close_pid
329 wait $close_pid || return 1
330 wait $open_pid || return 2
331 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
333 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
334 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
338 run_test 21a "drop close request while close and open are both in flight"
341 mkdir -p $DIR/$tdir-1
342 mkdir -p $DIR/$tdir-2
343 multiop $DIR/$tdir-1/f O_c &
346 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
347 mcreate $DIR/$tdir-2/f &
350 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
352 kill -USR1 $close_pid
354 wait $close_pid || return 1
355 wait $open_pid || return 3
357 $CHECKSTAT -t file $DIR/$tdir-1/f || return 4
358 $CHECKSTAT -t file $DIR/$tdir-2/f || return 5
361 run_test 21b "drop open request while close and open are both in flight"
364 mkdir -p $DIR/$tdir-1
365 mkdir -p $DIR/$tdir-2
366 multiop $DIR/$tdir-1/f O_c &
369 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
370 mcreate $DIR/$tdir-2/f &
373 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
375 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
376 kill -USR1 $close_pid
378 wait $close_pid || return 1
379 wait $open_pid || return 2
381 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
383 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
384 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
387 run_test 21c "drop both request while close and open are both in flight"
390 mkdir -p $DIR/$tdir-1
391 mkdir -p $DIR/$tdir-2
392 multiop $DIR/$tdir-1/f O_c &
395 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000129"
396 multiop $DIR/$tdir-2/f Oc &
398 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
400 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
403 wait $pid || return 1
404 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
406 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
407 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
411 run_test 21d "drop close reply while close and open are both in flight"
414 mkdir -p $DIR/$tdir-1
415 mkdir -p $DIR/$tdir-2
416 multiop $DIR/$tdir-1/f O_c &
419 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
420 touch $DIR/$tdir-2/f &
422 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
426 wait $pid || return 1
429 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
430 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
433 run_test 21e "drop open reply while close and open are both in flight"
436 mkdir -p $DIR/$tdir-1
437 mkdir -p $DIR/$tdir-2
438 multiop $DIR/$tdir-1/f O_c &
441 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
442 touch $DIR/$tdir-2/f &
444 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
446 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
449 wait $pid || return 1
450 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
452 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
453 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
456 run_test 21f "drop both reply while close and open are both in flight"
459 mkdir -p $DIR/$tdir-1
460 mkdir -p $DIR/$tdir-2
461 multiop $DIR/$tdir-1/f O_c &
464 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
465 touch $DIR/$tdir-2/f &
467 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
469 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
472 wait $pid || return 1
473 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
475 $CHECKSTAT -t file $DIR/$tdir-1/f || return 2
476 $CHECKSTAT -t file $DIR/$tdir-2/f || return 3
479 run_test 21g "drop open reply and close request while close and open are both in flight"
482 mkdir -p $DIR/$tdir-1
483 mkdir -p $DIR/$tdir-2
484 multiop $DIR/$tdir-1/f O_c &
487 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
488 touch $DIR/$tdir-2/f &
491 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
493 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
496 wait $pid || return 1
497 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
499 wait $touch_pid || return 2
501 $CHECKSTAT -t file $DIR/$tdir-1/f || return 3
502 $CHECKSTAT -t file $DIR/$tdir-2/f || return 4
505 run_test 21h "drop open request and close reply while close and open are both in flight"
507 # bug 3462 - multiple MDC requests
512 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
517 multiop $f1 msu || return 1
520 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
522 wait $close_pid || return 2
523 rm -rf $f2 || return 4
525 run_test 22 "drop close request and do mknod"
528 multiop $DIR/$tfile O_c &
530 # give a chance for open
534 drop_request "kill -USR1 $pid"
537 wait $pid || return 1
540 run_test 23 "client hang when close a file after mds crash"
542 test_24() { # bug 2248 - eviction fails writeback but app doesn't see it
545 multiop $DIR/$tdir/$tfile Owy_wyc &
550 kill -USR1 $MULTI_PID
553 sysctl -w lustre.fail_loc=0x0
555 [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
557 run_test 24 "fsync error (should return error)"
559 test_26() { # bug 5921 - evict dead exports by pinger
560 # this test can only run from a client on a separate node.
561 remote_ost || skip "local OST" && return
562 remote_mds || skip "local MDS" && return
563 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
564 OST_EXP="`do_facet ost1 cat $OST_FILE`"
565 OST_NEXP1=`echo $OST_EXP | cut -d' ' -f2`
566 echo starting with $OST_NEXP1 OST exports
567 # OBD_FAIL_PTLRPC_DROP_RPC 0x505
568 do_facet client sysctl -w lustre.fail_loc=0x505
569 # evictor takes up to 2.25x to evict. But if there's a
570 # race to start the evictor from various obds, the loser
571 # might have to wait for the next ping.
572 echo Waiting for $(($TIMEOUT * 4)) secs
573 sleep $(($TIMEOUT * 4))
574 OST_EXP="`do_facet ost1 cat $OST_FILE`"
575 OST_NEXP2=`echo $OST_EXP | cut -d' ' -f2`
576 echo ending with $OST_NEXP2 OST exports
577 do_facet client sysctl -w lustre.fail_loc=0x0
578 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted"
581 run_test 26 "evict dead exports"
583 test_26b() { # bug 10140 - evict dead exports by pinger
585 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
586 sleep 1 # wait connections being established
587 MDS_FILE=$LPROC/mdt/${mds1_svc}/num_exports
588 MDS_NEXP1="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
589 OST_FILE=$LPROC/obdfilter/${ost1_svc}/num_exports
590 OST_NEXP1="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
591 echo starting with $OST_NEXP1 OST and $MDS_NEXP1 MDS exports
592 zconf_umount `hostname` $MOUNT2 -f
593 # evictor takes up to 2.25x to evict. But if there's a
594 # race to start the evictor from various obds, the loser
595 # might have to wait for the next ping.
596 echo Waiting for $(($TIMEOUT * 4)) secs
597 sleep $(($TIMEOUT * 4))
598 OST_NEXP2="`do_facet ost1 cat $OST_FILE | cut -d' ' -f2`"
599 MDS_NEXP2="`do_facet $SINGLEMDS cat $MDS_FILE | cut -d' ' -f2`"
600 echo ending with $OST_NEXP2 OST and $MDS_NEXP2 MDS exports
601 [ $OST_NEXP1 -le $OST_NEXP2 ] && error "client not evicted from OST"
602 [ $MDS_NEXP1 -le $MDS_NEXP2 ] && error "client not evicted from MDS"
605 run_test 26b "evict dead exports"
608 remote_mds && { skip "remote MDS" && return 0; }
610 writemany -q -a $DIR/$tdir/$tfile 0 5 &
614 facet_failover $SINGLEMDS
615 #define OBD_FAIL_OSC_SHUTDOWN 0x407
616 sysctl -w lustre.fail_loc=0x80000407
617 # need to wait for reconnect
618 echo -n waiting for fail_loc
619 while [ `sysctl -n lustre.fail_loc` -eq -2147482617 ]; do
623 facet_failover $SINGLEMDS
625 kill -USR1 $CLIENT_PID
629 run_test 27 "fail LOV while using OSC's"
631 test_28() { # bug 6086 - error adding new clients
632 do_facet client mcreate $MOUNT/$tfile || return 1
633 drop_bl_callback "chmod 0777 $MOUNT/$tfile" ||echo "evicted as expected"
634 #define OBD_FAIL_MDS_CLIENT_ADD 0x12f
635 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012f"
636 # fail once (evicted), reconnect fail (fail_loc), ok
637 df || (sleep 10; df) || (sleep 10; df) || error "reconnect failed"
639 fail $SINGLEMDS # verify MDS last_rcvd can be loaded
641 run_test 28 "handle error adding new clients (bug 6086)"
645 # put a load of file creates/writes/deletes
646 writemany -q $DIR/$tdir/$tfile 0 5 &
648 echo writemany pid $CLIENT_PID
652 # wait for client to reconnect to MDS
657 # client process should see no problems even though MDS went down
659 kill -USR1 $CLIENT_PID
662 echo writemany returned $rc
663 #these may fail because of eviction due to slow AST response.
666 run_test 50 "failover MDS under load"
670 # put a load of file creates/writes/deletes
671 writemany -q $DIR/$tdir/$tfile 0 5 &
675 facet_failover $SINGLEMDS
676 # failover at various points during recovery
677 SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
678 echo will failover at $SEQ
681 echo failover in $i sec
683 facet_failover $SINGLEMDS
685 # client process should see no problems even though MDS went down
686 # and recovery was interrupted
688 kill -USR1 $CLIENT_PID
691 echo writemany returned $rc
694 run_test 51 "failover MDS during recovery"
697 do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
699 echo writemany pid $CLIENT_PID
704 wait $CLIENT_PID || rc=$?
705 # active client process should see an EIO for down OST
706 [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
707 # but timing or failover setup may allow success
708 [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
709 echo "writemany returned $rc"
717 [ $rc -ne 0 ] && { return $rc; }
718 # wait for client to reconnect to OST
722 [ $rc -ne 0 ] && { return $rc; }
729 run_test 52 "failover OST under load"
731 # test of open reconstruct
734 drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
737 run_test 53 "touch: drop rep"
740 zconf_mount `hostname` $MOUNT2
744 cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
747 ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
748 [ x"$ERROR" == x ] || error "back in time occured"
750 run_test 54 "back in time"
752 # bug 11330 - liblustre application death during I/O locks up OST
754 remote_ost && { skip "remote OST" && return 0; }
758 # first dd should be finished quickly
759 dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
762 echo "step1: testing ......"
764 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
766 if [ $count -gt 64 ]; then
767 error "dd should be finished!"
771 echo "(dd_pid=$DDPID, time=$count)successful"
773 #define OBD_FAIL_OST_DROP_REQ 0x21d
774 do_facet ost sysctl -w lustre.fail_loc=0x0000021d
775 # second dd will be never finished
776 dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
779 echo "step2: testing ......"
780 while [ $count -le 64 ]; do
781 dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
782 if [ -z $dd_name ]; then
784 echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
785 error "dd shouldn't be finished!"
790 echo "(dd_pid=$DDPID, time=$count)successful"
792 #Recover fail_loc and dd will finish soon
793 do_facet ost sysctl -w lustre.fail_loc=0
795 echo "step3: testing ......"
797 if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
799 if [ $count -gt 500 ]; then
800 error "dd should be finished!"
804 echo "(dd_pid=$DDPID, time=$count)successful"
808 run_test 55 "ost_brw_read/write drops timed-out read/write request"
810 test_56() { # b=11277
811 #define OBD_FAIL_MDS_RESEND 0x136
813 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000136"
815 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
818 run_test 56 "do not allow reconnect to busy exports"
821 # no oscs means no client or mdt
822 while [ -e $LPROC/osc ]; do
823 for f in `find $LPROC -type f`; do
824 cat $f > /dev/null 2>&1
829 test_57() { # bug 10866
833 #define OBD_FAIL_LPROC_REMOVE 0xB00
834 sysctl -w lustre.fail_loc=0x80000B00
835 zconf_umount `hostname` $DIR
836 sysctl -w lustre.fail_loc=0x80000B00
837 fail_abort $SINGLEMDS
839 sysctl -w lustre.fail_loc=0
841 do_facet client "df $DIR"
843 run_test 57 "read procfs entries causes kernel crash"
845 test_58() { # bug 11546
846 #define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
849 sysctl -w lustre.fail_loc=0x80000801
850 cp $MOUNT/$tfile /dev/null &
853 sysctl -w lustre.fail_loc=0
854 drop_bl_callback rm -f $MOUNT/$tfile
856 do_facet client "df $DIR"
858 run_test 58 "Eviction in the middle of open RPC reply processing"
860 test_59() { # bug 10589
861 zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
862 #define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
863 sysctl -w lustre.fail_loc=0x311
864 writes=`dd if=/dev/zero of=$DIR2/$tfile count=1 2>&1 | awk 'BEGIN { FS="+" } /out/ {print $1}'`
865 sysctl -w lustre.fail_loc=0
867 zconf_umount `hostname` $DIR2 -f
868 reads=`dd if=$DIR/$tfile of=/dev/null 2>&1 | awk 'BEGIN { FS="+" } /in/ {print $1}'`
869 [ $reads -eq $writes ] || error "read" $reads "blocks, must be" $writes
871 run_test 59 "Read cancel race on client eviction"
873 equals_msg `basename $0`: test complete, cleaning up
874 check_and_cleanup_lustre
875 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true