7 # This test needs to be run on the client
10 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
13 . $LUSTRE/tests/test-framework.sh
15 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
17 CHECK_GRANT=${CHECK_GRANT:-"yes"}
18 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
20 require_dsh_mds || exit 0
23 # bug number: 17466 18857
24 ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT"
26 # 63 min 7 min AT AT AT AT"
27 [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
31 check_and_setup_lustre
36 rm -rf $DIR/[df][0-9]*
38 test_0a() { # was test_0
41 replay_barrier $SINGLEMDS
45 run_test 0a "empty replay"
48 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
50 # this test attempts to trigger a race in the precreation code,
51 # and must run before any other objects are created on the filesystem
53 createmany -o $DIR/$tfile 20 || return 1
54 unlinkmany $DIR/$tfile 20 || return 2
56 run_test 0b "ensure object created after recover exists. (3284)"
62 lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width
68 lctl get_param -n seq.cli-srv-$mds-mdc-*.width
71 # This test should pass for single-mds and multi-mds configs.
72 # But for different configurations it tests different things.
76 # (1) fld_create replay should happen;
78 # (2) fld_create replay should not return -EEXISTS, if it does
79 # this means sequence manager recovery code is buggy and allocated
80 # same sequence two times after recovery.
84 # (1) fld_create replay may not happen, because its home MDS is
85 # MDS2 which is not involved to revovery;
87 # (2) as fld_create does not happen on MDS1, it does not make any
90 local label=`mdsdevlabel 1`
91 [ -z "$label" ] && echo "No label for mds1" && return 1
93 replay_barrier $SINGLEMDS
94 local sw=`seq_get_width $label`
96 # make seq manager switch to next sequence each
97 # time as new fid is needed.
98 seq_set_width $label 1
100 # make sure that fld has created at least one new
102 touch $DIR/$tfile || return 2
103 seq_set_width $label $sw
105 # fail $SINGLEMDS and start recovery, replay RPCs, etc.
108 # wait for recovery finish
112 # flush fld cache and dentry cache to make it lookup
113 # created entry instead of revalidating existent one
115 zconf_mount `hostname` $MOUNT
117 # issue lookup which should call fld lookup which
118 # should fail if client did not replay fld create
119 # correctly and server has no fld entry
120 touch $DIR/$tfile || return 3
121 rm $DIR/$tfile || return 4
123 start_full_debug_logging
124 run_test 0c "fld create"
125 stop_full_debug_logging
128 replay_barrier $SINGLEMDS
131 $CHECKSTAT -t file $DIR/$tfile || return 1
134 run_test 1 "simple create"
137 replay_barrier $SINGLEMDS
140 $CHECKSTAT -t file $DIR/$tfile || return 1
147 replay_barrier $SINGLEMDS
150 $CHECKSTAT -t file $DIR/$tfile || return 1
156 local file=$DIR/$tfile
157 replay_barrier $SINGLEMDS
159 openfile -f O_DIRECTORY $file
161 $CHECKSTAT -t file $file || return 2
164 run_test 3a "replay failed open(O_DIRECTORY)"
167 replay_barrier $SINGLEMDS
168 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
169 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
171 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
173 $CHECKSTAT -t file $DIR/$tfile && return 2
176 run_test 3b "replay failed open -ENOMEM"
179 replay_barrier $SINGLEMDS
180 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
181 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
183 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
186 $CHECKSTAT -t file $DIR/$tfile && return 2
189 run_test 3c "replay failed open -ENOMEM"
191 test_4a() { # was test_4
192 replay_barrier $SINGLEMDS
193 for i in `seq 10`; do
194 echo "tag-$i" > $DIR/$tfile-$i
197 for i in `seq 10`; do
198 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
201 run_test 4a "|x| 10 open(O_CREAT)s"
204 replay_barrier $SINGLEMDS
207 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
209 run_test 4b "|x| rm 10 files"
211 # The idea is to get past the first block of precreated files on both
212 # osts, and then replay.
214 replay_barrier $SINGLEMDS
215 for i in `seq 220`; do
216 echo "tag-$i" > $DIR/$tfile-$i
219 for i in `seq 220`; do
220 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
224 # waiting for commitment of removal
226 run_test 5 "|x| 220 open(O_CREAT)"
229 test_6a() { # was test_6
231 replay_barrier $SINGLEMDS
232 mcreate $DIR/$tdir/$tfile
234 $CHECKSTAT -t dir $DIR/$tdir || return 1
235 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
237 # waiting for log process thread
239 run_test 6a "mkdir + contained create"
243 replay_barrier $SINGLEMDS
246 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
248 run_test 6b "|X| rmdir"
252 replay_barrier $SINGLEMDS
253 mcreate $DIR/$tdir/$tfile
255 $CHECKSTAT -t dir $DIR/$tdir || return 1
256 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
259 run_test 7 "mkdir |X| contained create"
262 # make sure no side-effect from previous test.
264 replay_barrier $SINGLEMDS
265 multiop_bg_pause $DIR/$tfile mo_c || return 4
269 $CHECKSTAT -t file $DIR/$tfile || return 1
270 kill -USR1 $MULTIPID || return 2
271 wait $MULTIPID || return 3
274 run_test 8 "creat open |X| close"
277 replay_barrier $SINGLEMDS
279 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
281 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
283 echo " old_inum == $old_inum, new_inum == $new_inum"
284 if [ $old_inum -eq $new_inum ] ;
286 echo " old_inum and new_inum match"
288 echo "!!!! old_inum and new_inum NOT match"
293 run_test 9 "|X| create (same inum/gen)"
297 replay_barrier $SINGLEMDS
298 mv $DIR/$tfile $DIR/$tfile-2
301 $CHECKSTAT $DIR/$tfile && return 1
302 $CHECKSTAT $DIR/$tfile-2 ||return 2
306 run_test 10 "create |X| rename unlink"
310 echo "old" > $DIR/$tfile
311 mv $DIR/$tfile $DIR/$tfile-2
312 replay_barrier $SINGLEMDS
313 echo "new" > $DIR/$tfile
315 grep old $DIR/$tfile-2
317 grep new $DIR/$tfile || return 1
318 grep old $DIR/$tfile-2 || return 2
320 run_test 11 "create open write rename |X| create-old-name read"
324 multiop_bg_pause $DIR/$tfile o_tSc || return 3
327 replay_barrier $SINGLEMDS
329 wait $pid || return 1
332 [ -e $DIR/$tfile ] && return 2
335 run_test 12 "open, unlink |X| close"
338 # 1777 - replay open after committed chmod that would make
339 # a regular open a failure
342 multiop_bg_pause $DIR/$tfile O_wc || return 3
345 $CHECKSTAT -p 0 $DIR/$tfile
346 replay_barrier $SINGLEMDS
349 wait $pid || return 1
351 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
352 rm $DIR/$tfile || return 4
355 run_test 13 "open chmod 0 |x| write close"
358 multiop_bg_pause $DIR/$tfile O_tSc || return 4
361 replay_barrier $SINGLEMDS
362 kill -USR1 $pid || return 1
363 wait $pid || return 2
366 [ -e $DIR/$tfile ] && return 3
369 run_test 14 "open(O_CREAT), unlink |X| close"
372 multiop_bg_pause $DIR/$tfile O_tSc || return 5
375 replay_barrier $SINGLEMDS
376 touch $DIR/g11 || return 1
378 wait $pid || return 2
381 [ -e $DIR/$tfile ] && return 3
382 touch $DIR/h11 || return 4
385 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
389 replay_barrier $SINGLEMDS
392 mcreate $DIR/$tfile-2
394 [ -e $DIR/$tfile ] && return 1
395 [ -e $DIR/$tfile-2 ] || return 2
396 munlink $DIR/$tfile-2 || return 3
398 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
401 replay_barrier $SINGLEMDS
402 multiop_bg_pause $DIR/$tfile O_c || return 4
405 kill -USR1 $pid || return 1
406 wait $pid || return 2
407 $CHECKSTAT -t file $DIR/$tfile || return 3
410 run_test 17 "|X| open(O_CREAT), |replay| close"
413 replay_barrier $SINGLEMDS
414 multiop_bg_pause $DIR/$tfile O_tSc || return 8
417 touch $DIR/$tfile-2 || return 1
418 echo "pid: $pid will close"
420 wait $pid || return 2
423 [ -e $DIR/$tfile ] && return 3
424 [ -e $DIR/$tfile-2 ] || return 4
425 # this touch frequently fails
426 touch $DIR/$tfile-3 || return 5
427 munlink $DIR/$tfile-2 || return 6
428 munlink $DIR/$tfile-3 || return 7
431 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
433 # bug 1855 (a simpler form of test_11 above)
435 replay_barrier $SINGLEMDS
437 echo "old" > $DIR/$tfile
438 mv $DIR/$tfile $DIR/$tfile-2
439 grep old $DIR/$tfile-2
441 grep old $DIR/$tfile-2 || return 2
443 run_test 19 "|X| mcreate, open, write, rename "
445 test_20a() { # was test_20
446 replay_barrier $SINGLEMDS
447 multiop_bg_pause $DIR/$tfile O_tSc || return 3
453 wait $pid || return 1
454 [ -e $DIR/$tfile ] && return 2
457 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
459 test_20b() { # bug 10480
460 BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
462 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
464 while [ ! -e $DIR/$tfile ] ; do
465 usleep 60 # give dd a chance to start
468 lfs getstripe $DIR/$tfile || return 1
469 rm -f $DIR/$tfile || return 2 # make it an orphan
471 client_up || client_up || true # reconnect
473 fail $SINGLEMDS # start orphan recovery
474 wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
475 wait_mds_ost_sync || return 3
476 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
477 log "before $BEFOREUSED, after $AFTERUSED"
478 [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
479 error "after $AFTERUSED > before $BEFOREUSED"
482 run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
484 test_20c() { # bug 10480
485 multiop_bg_pause $DIR/$tfile Ow_c || return 1
491 client_up || client_up || true # reconnect
494 wait $pid || return 1
495 [ -s $DIR/$tfile ] || error "File was truncated"
499 run_test 20c "check that client eviction does not affect file content"
502 replay_barrier $SINGLEMDS
503 multiop_bg_pause $DIR/$tfile O_tSc || return 5
506 touch $DIR/g11 || return 1
510 wait $pid || return 2
511 [ -e $DIR/$tfile ] && return 3
512 touch $DIR/h11 || return 4
515 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
518 multiop_bg_pause $DIR/$tfile O_tSc || return 3
521 replay_barrier $SINGLEMDS
526 wait $pid || return 1
527 [ -e $DIR/$tfile ] && return 2
530 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
533 multiop_bg_pause $DIR/$tfile O_tSc || return 5
536 replay_barrier $SINGLEMDS
538 touch $DIR/g11 || return 1
542 wait $pid || return 2
543 [ -e $DIR/$tfile ] && return 3
544 touch $DIR/h11 || return 4
547 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
550 multiop_bg_pause $DIR/$tfile O_tSc || return 3
553 replay_barrier $SINGLEMDS
557 wait $pid || return 1
558 [ -e $DIR/$tfile ] && return 2
561 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
564 multiop_bg_pause $DIR/$tfile O_tSc || return 3
568 replay_barrier $SINGLEMDS
571 wait $pid || return 1
572 [ -e $DIR/$tfile ] && return 2
575 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
578 replay_barrier $SINGLEMDS
579 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
581 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
586 wait $pid2 || return 1
590 wait $pid1 || return 2
591 [ -e $DIR/$tfile-1 ] && return 3
592 [ -e $DIR/$tfile-2 ] && return 4
595 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
598 replay_barrier $SINGLEMDS
599 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
601 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
608 wait $pid1 || return 1
610 wait $pid2 || return 2
611 [ -e $DIR/$tfile-1 ] && return 3
612 [ -e $DIR/$tfile-2 ] && return 4
615 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
618 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
620 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
622 replay_barrier $SINGLEMDS
626 wait $pid2 || return 1
630 wait $pid1 || return 2
631 [ -e $DIR/$tfile-1 ] && return 3
632 [ -e $DIR/$tfile-2 ] && return 4
635 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
638 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
640 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
642 replay_barrier $SINGLEMDS
648 wait $pid1 || return 1
650 wait $pid2 || return 2
651 [ -e $DIR/$tfile-1 ] && return 3
652 [ -e $DIR/$tfile-2 ] && return 4
655 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
658 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
660 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
665 replay_barrier $SINGLEMDS
668 wait $pid1 || return 1
670 wait $pid2 || return 2
671 [ -e $DIR/$tfile-1 ] && return 3
672 [ -e $DIR/$tfile-2 ] && return 4
675 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
678 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
680 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
684 replay_barrier $SINGLEMDS
688 wait $pid1 || return 1
690 wait $pid2 || return 2
691 [ -e $DIR/$tfile-1 ] && return 3
692 [ -e $DIR/$tfile-2 ] && return 4
695 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
697 # tests for bug 2104; completion without crashing is success. The close is
698 # stale, but we always return 0 for close, so the app never sees it.
700 multiop_bg_pause $DIR/$tfile O_c || return 2
702 multiop_bg_pause $DIR/$tfile O_c || return 3
705 client_up || client_up || return 1
708 wait $pid1 || return 4
709 wait $pid2 || return 5
712 run_test 32 "close() notices client eviction; close() after client eviction"
715 createmany -o $DIR/$tfile-%d 10
716 replay_barrier_nosync $SINGLEMDS
717 fail_abort $SINGLEMDS
718 # recreate shouldn't fail
719 createmany -o $DIR/$tfile--%d 10 || return 1
723 run_test 33a "fid seq shouldn't be reused after abort recovery"
726 #define OBD_FAIL_SEQ_ALLOC 0x1311
727 do_facet $SINGLEMDS "lctl set_param fail_loc=0x1311"
729 createmany -o $DIR/$tfile-%d 10
730 replay_barrier_nosync $SINGLEMDS
731 fail_abort $SINGLEMDS
732 # recreate shouldn't fail
733 createmany -o $DIR/$tfile--%d 10 || return 1
737 run_test 33b "test fid seq allocation"
740 multiop_bg_pause $DIR/$tfile O_c || return 2
744 replay_barrier $SINGLEMDS
745 fail_abort $SINGLEMDS
747 wait $pid || return 3
748 [ -e $DIR/$tfile ] && return 1
752 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
754 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
758 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
759 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
764 # give a chance to remove from MDS
765 fail_abort $SINGLEMDS
766 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
768 run_test 35 "test recovery from llog for unlink op"
770 # b=2432 resent cancel after replay uses wrong cookie,
771 # so don't resend cancels
773 replay_barrier $SINGLEMDS
775 checkstat $DIR/$tfile
776 facet_failover $SINGLEMDS
778 if dmesg | grep "unknown lock cookie"; then
779 echo "cancel after replay failed"
783 run_test 36 "don't resend cancel"
786 # directory orphans can't be unlinked from PENDING directory
788 rmdir $DIR/$tfile 2>/dev/null
789 multiop_bg_pause $DIR/$tfile dD_c || return 2
793 replay_barrier $SINGLEMDS
794 # clear the dmesg buffer so we only see errors from this recovery
796 fail_abort $SINGLEMDS
798 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
799 wait $pid || return 3
803 start_full_debug_logging
804 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
805 stop_full_debug_logging
808 createmany -o $DIR/$tfile-%d 800
809 unlinkmany $DIR/$tfile-%d 0 400
810 replay_barrier $SINGLEMDS
812 unlinkmany $DIR/$tfile-%d 400 400
814 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
816 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
818 test_39() { # bug 4176
819 createmany -o $DIR/$tfile-%d 800
820 replay_barrier $SINGLEMDS
821 unlinkmany $DIR/$tfile-%d 0 400
823 unlinkmany $DIR/$tfile-%d 400 400
825 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
827 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
830 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
835 $LCTL mark multiop $MOUNT/$tfile OS_c
836 multiop $MOUNT/$tfile OS_c &
838 writeme -s $MOUNT/${tfile}-2 &
841 facet_failover $SINGLEMDS
842 #define OBD_FAIL_MDS_CONNECT_NET 0x117
843 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
845 stat1=`count_ost_writes`
847 stat2=`count_ost_writes`
848 echo "$stat1, $stat2"
849 if [ $stat1 -lt $stat2 ]; then
850 echo "writes continuing during recovery"
853 echo "writes not continuing during recovery, bug 2477"
856 echo "waiting for writeme $WRITE_PID"
860 echo "waiting for multiop $PID"
861 wait $PID || return 2
862 do_facet client munlink $MOUNT/$tfile || return 3
863 do_facet client munlink $MOUNT/${tfile}-2 || return 3
866 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
870 # make sure that a read to one osc doesn't try to double-unlock its page just
871 # because another osc is invalid. trigger_group_io used to mistakenly return
872 # an error if any oscs were invalid even after having successfully put rpcs
873 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
874 # the page, guarnateeing that the unlock from the RPC completion would
875 # assert on trying to unlock the unlocked page.
877 [ $OSTCOUNT -lt 2 ] && \
878 skip_env "skipping test 41: we don't have a second OST to test with" && \
881 local f=$MOUNT/$tfile
882 # make sure the start of the file is ost1
883 lfs setstripe $f -s $((128 * 1024)) -i 0
884 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
886 # fail ost2 and read from ost1
887 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost2_svc)
888 local osc2dev=$(do_facet $SINGLEMDS "lctl get_param -n devices" | \
889 grep $mdtosc | awk '{print $1}')
890 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
891 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
892 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
893 do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
896 run_test 41 "read from a valid osc while other oscs are invalid"
898 # test MDS recovery after ost failure
900 blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
901 createmany -o $DIR/$tfile-%d 800
903 unlinkmany $DIR/$tfile-%d 0 400
905 lctl set_param debug=-1
908 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
909 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
910 #[ $blocks_after -lt $blocks ] || return 1
911 echo wait for MDS to timeout and recover
912 sleep $((TIMEOUT * 2))
914 unlinkmany $DIR/$tfile-%d 400 400
915 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
917 run_test 42 "recovery after ost failure"
919 # timeout in MDS/OST recovery RPC will LBUG MDS
920 test_43() { # bug 2530
921 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
923 replay_barrier $SINGLEMDS
925 # OBD_FAIL_OST_CREATE_NET 0x204
926 do_facet ost1 "lctl set_param fail_loc=0x80000204"
929 do_facet ost1 "lctl set_param fail_loc=0"
933 run_test 43 "mds osc import failure during recovery; don't LBUG"
935 test_44a() { # was test_44
938 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
939 [ "$mdcdev" ] || return 2
940 [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
942 # adaptive timeouts slow this way down
943 if at_is_enabled; then
944 at_max_saved=$(at_max_get mds)
948 for i in `seq 1 10`; do
949 echo "$i of 10 ($(date +%s))"
950 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
951 #define OBD_FAIL_TGT_CONN_RACE 0x701
952 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
953 # lctl below may fail, it is valid case
954 $LCTL --device $mdcdev recover
957 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
958 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
961 run_test 44a "race in target handle connect"
964 local mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
965 [ "$mdcdev" ] || return 2
966 [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
968 for i in `seq 1 10`; do
969 echo "$i of 10 ($(date +%s))"
970 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
971 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
972 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
973 # lctl below may fail, it is valid case
974 $LCTL --device $mdcdev recover
977 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
980 run_test 44b "race in target handle connect"
982 # Handle failed close
984 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
985 [ "$mdcdev" ] || return 2
986 [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
988 $LCTL --device $mdcdev recover || return 6
990 multiop_bg_pause $DIR/$tfile O_c || return 1
993 # This will cause the CLOSE to fail before even
994 # allocating a reply buffer
995 $LCTL --device $mdcdev deactivate || return 4
999 wait $pid || return 1
1001 $LCTL --device $mdcdev activate || return 5
1004 $CHECKSTAT -t file $DIR/$tfile || return 2
1007 run_test 45 "Handle failed close"
1011 drop_reply "touch $DIR/$tfile"
1013 # ironically, the previous test, 45, will cause a real forced close,
1014 # so just look for one for this test
1015 dmesg | grep -i "force closing client file handle for $tfile" && return 1
1018 run_test 46 "Don't leak file handle after open resend (3325)"
1020 test_47() { # bug 2824
1021 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1023 # create some files to make sure precreate has been done on all
1024 # OSTs. (just in case this test is run independently)
1025 createmany -o $DIR/$tfile 20 || return 1
1027 # OBD_FAIL_OST_CREATE_NET 0x204
1029 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1030 client_up || return 2
1032 # let the MDS discover the OST failure, attempt to recover, fail
1033 # and recover again.
1034 sleep $((3 * TIMEOUT))
1036 # Without 2824, this createmany would hang
1037 createmany -o $DIR/$tfile 20 || return 3
1038 unlinkmany $DIR/$tfile 20 || return 4
1040 do_facet ost1 "lctl set_param fail_loc=0"
1043 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1046 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1047 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2 OSTs -- skipping" && return
1049 replay_barrier $SINGLEMDS
1050 createmany -o $DIR/$tfile 20 || return 1
1051 # OBD_FAIL_OST_EROFS 0x216
1052 facet_failover $SINGLEMDS
1053 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1054 client_up || return 2
1056 createmany -o $DIR/$tfile 20 20 || return 2
1057 unlinkmany $DIR/$tfile 40 || return 3
1060 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1063 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost1_svc)
1064 local oscdev=$(do_facet $SINGLEMDS "lctl get_param -n devices" | \
1065 grep $mdtosc | awk '{print $1}')
1066 [ "$oscdev" ] || return 1
1067 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2
1068 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3
1069 # give the mds_lov_sync threads a chance to run
1072 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1074 # b3764 timed out lock replay
1077 cancel_lru_locks mdc
1079 multiop $DIR/$tfile s || return 1
1080 replay_barrier $SINGLEMDS
1081 #define OBD_FAIL_LDLM_REPLY 0x30c
1082 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1083 fail $SINGLEMDS || return 2
1084 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1086 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1088 run_test 52 "time out lock replay (3764)"
1090 # bug 3462 - simultaneous MDC requests
1092 cancel_lru_locks mdc # cleanup locks from former test cases
1093 mkdir -p $DIR/${tdir}-1
1094 mkdir -p $DIR/${tdir}-2
1095 multiop $DIR/${tdir}-1/f O_c &
1097 # give multiop a change to open
1100 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1101 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1102 kill -USR1 $close_pid
1103 cancel_lru_locks mdc # force the close
1104 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1106 mcreate $DIR/${tdir}-2/f || return 1
1108 # close should still be here
1109 [ -d /proc/$close_pid ] || return 2
1111 replay_barrier_nodf $SINGLEMDS
1113 wait $close_pid || return 3
1115 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1116 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1117 rm -rf $DIR/${tdir}-*
1119 run_test 53a "|X| close request while two MDC requests in flight"
1122 cancel_lru_locks mdc # cleanup locks from former test cases
1123 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1125 mkdir -p $DIR/${tdir}-1
1126 mkdir -p $DIR/${tdir}-2
1127 multiop_bg_pause $DIR/${tdir}-1/f O_c || return 6
1130 #define OBD_FAIL_MDS_REINT_NET 0x107
1131 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1132 mcreate $DIR/${tdir}-2/f &
1136 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1137 kill -USR1 $close_pid
1138 cancel_lru_locks mdc # force the close
1139 wait $close_pid || return 1
1140 # open should still be here
1141 [ -d /proc/$open_pid ] || return 2
1143 replay_barrier_nodf $SINGLEMDS
1145 wait $open_pid || return 3
1147 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1148 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1149 rm -rf $DIR/${tdir}-*
1151 run_test 53b "|X| open request while two MDC requests in flight"
1154 cancel_lru_locks mdc # cleanup locks from former test cases
1155 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1157 mkdir -p $DIR/${tdir}-1
1158 mkdir -p $DIR/${tdir}-2
1159 multiop $DIR/${tdir}-1/f O_c &
1162 #define OBD_FAIL_MDS_REINT_NET 0x107
1163 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1164 mcreate $DIR/${tdir}-2/f &
1168 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1169 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1170 kill -USR1 $close_pid
1171 cancel_lru_locks mdc # force the close
1173 #bz20647: make sure all pids are exists before failover
1174 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1175 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1176 replay_barrier_nodf $SINGLEMDS
1177 fail_nodf $SINGLEMDS
1178 wait $open_pid || return 1
1180 # close should be gone
1181 [ -d /proc/$close_pid ] && return 2
1182 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1184 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1185 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1186 rm -rf $DIR/${tdir}-*
1188 run_test 53c "|X| open request and close request while two MDC requests in flight"
1191 cancel_lru_locks mdc # cleanup locks from former test cases
1192 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1194 mkdir -p $DIR/${tdir}-1
1195 mkdir -p $DIR/${tdir}-2
1196 multiop $DIR/${tdir}-1/f O_c &
1198 # give multiop a chance to open
1201 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1202 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1203 kill -USR1 $close_pid
1204 cancel_lru_locks mdc # force the close
1205 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1206 mcreate $DIR/${tdir}-2/f || return 1
1208 # close should still be here
1209 [ -d /proc/$close_pid ] || return 2
1211 wait $close_pid || return 3
1213 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1214 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1215 rm -rf $DIR/${tdir}-*
1217 run_test 53d "|X| close reply while two MDC requests in flight"
1220 cancel_lru_locks mdc # cleanup locks from former test cases
1221 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1223 mkdir -p $DIR/${tdir}-1
1224 mkdir -p $DIR/${tdir}-2
1225 multiop $DIR/${tdir}-1/f O_c &
1228 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1229 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1230 mcreate $DIR/${tdir}-2/f &
1234 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1235 kill -USR1 $close_pid
1236 cancel_lru_locks mdc # force the close
1237 wait $close_pid || return 1
1238 # open should still be here
1239 [ -d /proc/$open_pid ] || return 2
1241 replay_barrier_nodf $SINGLEMDS
1243 wait $open_pid || return 3
1245 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1246 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1247 rm -rf $DIR/${tdir}-*
1249 run_test 53e "|X| open reply while two MDC requests in flight"
1252 cancel_lru_locks mdc # cleanup locks from former test cases
1253 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1255 mkdir -p $DIR/${tdir}-1
1256 mkdir -p $DIR/${tdir}-2
1257 multiop $DIR/${tdir}-1/f O_c &
1260 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1261 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1262 mcreate $DIR/${tdir}-2/f &
1266 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1267 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1268 kill -USR1 $close_pid
1269 cancel_lru_locks mdc # force the close
1271 #bz20647: make sure all pids are exists before failover
1272 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1273 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1274 replay_barrier_nodf $SINGLEMDS
1275 fail_nodf $SINGLEMDS
1276 wait $open_pid || return 1
1278 # close should be gone
1279 [ -d /proc/$close_pid ] && return 2
1280 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1282 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1283 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1284 rm -rf $DIR/${tdir}-*
1286 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1289 cancel_lru_locks mdc # cleanup locks from former test cases
1290 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1292 mkdir -p $DIR/${tdir}-1
1293 mkdir -p $DIR/${tdir}-2
1294 multiop $DIR/${tdir}-1/f O_c &
1297 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1298 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1299 mcreate $DIR/${tdir}-2/f &
1303 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1304 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1305 kill -USR1 $close_pid
1306 cancel_lru_locks mdc # force the close
1307 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1309 #bz20647: make sure all pids are exists before failover
1310 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1311 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1312 replay_barrier_nodf $SINGLEMDS
1313 fail_nodf $SINGLEMDS
1314 wait $open_pid || return 1
1316 # close should be gone
1317 [ -d /proc/$close_pid ] && return 2
1319 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1320 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1321 rm -rf $DIR/${tdir}-*
1323 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1326 cancel_lru_locks mdc # cleanup locks from former test cases
1327 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1329 mkdir -p $DIR/${tdir}-1
1330 mkdir -p $DIR/${tdir}-2
1331 multiop $DIR/${tdir}-1/f O_c &
1334 #define OBD_FAIL_MDS_REINT_NET 0x107
1335 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1336 mcreate $DIR/${tdir}-2/f &
1340 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1341 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1342 kill -USR1 $close_pid
1343 cancel_lru_locks mdc # force the close
1346 #bz20647: make sure all pids are exists before failover
1347 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1348 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1349 replay_barrier_nodf $SINGLEMDS
1350 fail_nodf $SINGLEMDS
1351 wait $open_pid || return 1
1353 # close should be gone
1354 [ -d /proc/$close_pid ] && return 2
1355 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1357 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1358 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1359 rm -rf $DIR/${tdir}-*
1361 run_test 53h "|X| open request and close reply while two MDC requests in flight"
1363 #b_cray 54 "|X| open request and close reply while two MDC requests in flight"
1365 #b3761 ASSERTION(hash != 0) failed
1367 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1368 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1370 # give touch a chance to run
1372 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1376 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1378 #b3440 ASSERTION(rec->ur_fid2->id) failed
1380 ln -s foo $DIR/$tfile
1381 replay_barrier $SINGLEMDS
1382 #drop_reply "cat $DIR/$tfile"
1386 run_test 56 "don't replay a symlink open request (3440)"
1388 #recovery one mds-ost setattr from llog
1390 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1391 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1393 replay_barrier $SINGLEMDS
1396 $CHECKSTAT -t file $DIR/$tfile || return 1
1397 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1400 run_test 57 "test recovery from llog for setattr op"
1402 #recovery many mds-ost setattr from llog
1405 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1406 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1407 createmany -o $DIR/$tdir/$tfile-%d 2500
1408 replay_barrier $SINGLEMDS
1411 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1
1412 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1413 unlinkmany $DIR/$tdir/$tfile-%d 2500
1416 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1419 mount_client $MOUNT2
1421 touch $DIR/$tdir/$tfile
1422 replay_barrier $SINGLEMDS
1423 setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile
1425 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1426 [ x$VAL = x"bar" ] || return 1
1427 rm -f $DIR/$tdir/$tfile
1429 zconf_umount `hostname` $MOUNT2
1431 run_test 58b "test replay of setxattr op"
1433 test_58c() { # bug 16570
1434 mount_client $MOUNT2
1436 touch $DIR/$tdir/$tfile
1437 drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \
1439 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1440 [ x$VAL = x"bar" ] || return 2
1441 drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \
1443 VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile`
1444 [ x$VAL = x"bar1" ] || return 4
1445 rm -f $DIR/$tdir/$tfile
1447 zconf_umount `hostname` $MOUNT2
1449 run_test 58c "resend/reconstruct setxattr op"
1451 # log_commit_thread vs filter_destroy race used to lead to import use after free
1454 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1457 createmany -o $DIR/$tdir/$tfile-%d 200
1459 unlinkmany $DIR/$tdir/$tfile-%d 200
1460 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1461 do_facet ost1 "lctl set_param fail_loc=0x507"
1464 do_facet ost1 "lctl set_param fail_loc=0x0"
1468 run_test 59 "test log_commit_thread vs filter_destroy race"
1470 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1471 # bug 12086: should no oops and No ctxt error for this test
1474 createmany -o $DIR/$tdir/$tfile-%d 200
1475 replay_barrier $SINGLEMDS
1476 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1478 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1479 local no_ctxt=`dmesg | grep "No ctxt"`
1480 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1482 run_test 60 "test llog post recovery init vs llog unlink"
1484 #test race llog recovery thread vs llog cleanup
1485 test_61a() { # was test_61
1486 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1489 createmany -o $DIR/$tdir/$tfile-%d 800
1491 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1492 unlinkmany $DIR/$tdir/$tfile-%d 800
1493 set_nodes_failloc "$(osts_nodes)" 0x80000221
1498 set_nodes_failloc "$(osts_nodes)" 0x0
1500 $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1
1503 run_test 61a "test race llog recovery vs llog cleanup"
1505 #test race mds llog sync vs llog cleanup
1507 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
1508 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a"
1509 facet_failover $SINGLEMDS
1512 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1
1514 run_test 61b "test race mds llog sync vs llog cleanup"
1516 #test race cancel cookie cb vs llog cleanup
1518 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1520 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1522 set_nodes_failloc "$(osts_nodes)" 0x80000222
1526 set_nodes_failloc "$(osts_nodes)" 0x0
1528 run_test 61c "test race mds llog sync vs llog cleanup"
1530 test_61d() { # bug 16002 # bug 17466 # bug 22137
1531 # OBD_FAIL_OBD_LLOG_SETUP 0x605
1533 do_facet mgs "lctl set_param fail_loc=0x80000605"
1534 start mgs $MGSDEV $MGS_MOUNT_OPTS && error "mgs start should have failed"
1535 do_facet mgs "lctl set_param fail_loc=0"
1536 start mgs $MGSDEV $MGS_MOUNT_OPTS || error "cannot restart mgs"
1538 run_test 61d "error in llog_setup should cleanup the llog context correctly"
1540 test_62() { # Bug 15756 - don't mis-drop resent replay
1542 replay_barrier $SINGLEMDS
1543 createmany -o $DIR/$tdir/$tfile- 25
1544 #define OBD_FAIL_TGT_REPLAY_DROP 0x707
1545 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1547 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1548 unlinkmany $DIR/$tdir/$tfile- 25 || return 2
1551 run_test 62 "don't mis-drop resent replay"
1553 #Adaptive Timeouts (bug 3055)
1561 echo "Cleaning up AT ..."
1562 if [ -n "$ATOLDBASE" ]; then
1563 local at_history=$($LCTL get_param -n at_history)
1564 do_facet mds "lctl set_param at_history=$at_history" || true
1565 do_facet ost1 "lctl set_param at_history=$at_history" || true
1568 if [ $AT_MAX_SET -ne 0 ]; then
1569 for facet in mds client ost; do
1570 var=AT_MAX_SAVE_${facet}
1571 echo restore AT on $facet to saved value ${!var}
1572 at_max_set ${!var} $facet
1573 at_new=$(at_max_get $facet)
1574 echo Restored AT value on $facet $at_new
1575 [ $at_new -eq ${!var} ] || \
1576 error "$facet : AT value was not restored SAVED ${!var} NEW $at_new"
1583 local at_max_new=600
1585 # Save at_max original values
1587 if [ $AT_MAX_SET -eq 0 ]; then
1588 # Suppose that all osts have the same at_max
1589 for facet in mds client ost; do
1590 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1594 for facet in mds client ost; do
1595 at_max=$(at_max_get $facet)
1596 if [ $at_max -ne $at_max_new ]; then
1597 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1598 at_max_set $at_max_new $facet
1603 if [ -z "$ATOLDBASE" ]; then
1604 ATOLDBASE=$(do_facet mds "lctl get_param -n at_history")
1605 # speed up the timebase so we can check decreasing AT
1606 do_facet mds "lctl set_param at_history=8" || true
1607 do_facet ost1 "lctl set_param at_history=8" || true
1609 # sleep for a while to cool down, should be > 8s and also allow
1610 # at least one ping to be sent. simply use TIMEOUT to be safe.
1615 test_65a() #bug 3055
1617 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1619 at_start || return 0
1620 $LCTL dk > /dev/null
1622 sysctl -w lnet.debug="+other"
1623 # Slow down a request to the current service time, this is critical
1624 # because previous tests may have caused this value to increase.
1625 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1626 awk '/portal 12/ {print $5}'`
1627 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1629 do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1630 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1631 do_facet mds sysctl -w lustre.fail_loc=0x8000050a
1632 createmany -o $DIR/$tfile 10 > /dev/null
1633 unlinkmany $DIR/$tfile 10 > /dev/null
1634 # check for log message
1635 $LCTL dk | grep "Early reply #" || error "No early reply"
1637 # client should show REQ_DELAY estimates
1638 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1640 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1642 run_test 65a "AT: verify early replies"
1644 test_65b() #bug 3055
1646 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1648 at_start || return 0
1651 sysctl -w lnet.debug="other trace"
1652 $LCTL dk > /dev/null
1653 # Slow down a request to the current service time, this is critical
1654 # because previous tests may have caused this value to increase.
1655 lfs setstripe $DIR/$tfile --index=0 --count=1
1656 multiop $DIR/$tfile Ow1yc
1657 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1658 awk '/portal 6/ {print $5}'`
1659 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1661 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1662 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1663 do_facet ost1 sysctl -w lustre.fail_loc=0x224
1666 lfs setstripe $DIR/$tfile --index=0 --count=1
1667 # force some real bulk transfer
1668 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1670 do_facet ost1 sysctl -w lustre.fail_loc=0
1671 # check for log message
1672 $LCTL dk | grep "Early reply #" || error "No early reply"
1674 # client should show REQ_DELAY estimates
1675 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1677 run_test 65b "AT: verify early replies on packed reply / bulk"
1679 test_66a() #bug 3055
1681 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1683 at_start || return 0
1684 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1685 # adjust 5s at a time so no early reply is sent (within deadline)
1686 do_facet mds "sysctl -w lustre.fail_val=5000"
1687 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1688 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1689 createmany -o $DIR/$tfile 20 > /dev/null
1690 unlinkmany $DIR/$tfile 20 > /dev/null
1691 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1692 do_facet mds "sysctl -w lustre.fail_val=10000"
1693 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1694 createmany -o $DIR/$tfile 20 > /dev/null
1695 unlinkmany $DIR/$tfile 20 > /dev/null
1696 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1697 do_facet mds "sysctl -w lustre.fail_loc=0"
1699 createmany -o $DIR/$tfile 20 > /dev/null
1700 unlinkmany $DIR/$tfile 20 > /dev/null
1701 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1702 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1703 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1704 echo "Current MDT timeout $CUR, worst $WORST"
1705 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1707 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1709 test_66b() #bug 3055
1711 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1713 at_start || return 0
1714 ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1715 sysctl -w lustre.fail_val=$(($ORIG + 5))
1716 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1717 sysctl -w lustre.fail_loc=0x50c
1718 ls $DIR/$tfile > /dev/null 2>&1
1719 sysctl -w lustre.fail_loc=0
1720 CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1721 WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
1722 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1723 [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
1725 run_test 66b "AT: verify net latency adjusts"
1727 test_67a() #bug 3055
1729 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1731 at_start || return 0
1732 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1733 # sleeping threads may drive values above this
1734 do_facet ost1 "sysctl -w lustre.fail_val=400"
1735 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1736 do_facet ost1 "sysctl -w lustre.fail_loc=0x50a"
1737 createmany -o $DIR/$tfile 20 > /dev/null
1738 unlinkmany $DIR/$tfile 20 > /dev/null
1739 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1740 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1741 ATTEMPTS=$(($CONN2 - $CONN1))
1742 echo "$ATTEMPTS osc reconnect attempts on gradual slow"
1743 [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect"
1746 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1748 test_67b() #bug 3055
1750 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1752 at_start || return 0
1753 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1755 # exhaust precreations on ost1
1756 local OST=$(lfs osts | grep ^0": " | awk '{print $2}' | sed -e 's/_UUID$//')
1757 local mdtosc=$(get_mdtosc_proc_path mds $OST)
1758 local last_id=$(do_facet mds lctl get_param -n \
1759 osc.$mdtosc.prealloc_last_id)
1760 local next_id=$(do_facet mds lctl get_param -n \
1761 osc.$mdtosc.prealloc_next_id)
1763 mkdir -p $DIR/$tdir/${OST}
1764 lfs setstripe $DIR/$tdir/${OST} -o 0 -c 1 || error "setstripe"
1765 echo "Creating to objid $last_id on ost $OST..."
1766 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1767 do_facet ost1 "sysctl -w lustre.fail_val=20000"
1768 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1769 createmany -o $DIR/$tdir/${OST}/f $next_id $((last_id - next_id + 2))
1772 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1774 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1775 ATTEMPTS=$(($CONN2 - $CONN1))
1776 echo "$ATTEMPTS osc reconnect attempts on instant slow"
1777 # do it again; should not timeout
1778 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1779 cp /etc/profile $DIR/$tfile || error "cp failed"
1780 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1782 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1783 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1784 ATTEMPTS=$(($CONN3 - $CONN2))
1785 echo "$ATTEMPTS osc reconnect attempts on 2nd slow"
1786 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
1789 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
1791 test_68 () #bug 13813
1793 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1795 at_start || return 0
1796 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
1797 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
1798 local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min")
1799 [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0
1800 local ENQ_MIN=$(cat $ldlm_enqueue_min)
1801 local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r")
1802 echo $TIMEOUT >> $ldlm_enqueue_min
1803 do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
1807 lfs setstripe $DIR/$tdir --index=0 --count=1
1808 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
1809 sysctl -w lustre.fail_val=$(($TIMEOUT - 1))
1810 sysctl -w lustre.fail_loc=0x80000312
1811 cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
1812 sysctl -w lustre.fail_val=$((TIMEOUT * 5 / 4))
1813 sysctl -w lustre.fail_loc=0x80000312
1814 cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
1815 sysctl -w lustre.fail_loc=0
1817 echo $ENQ_MIN >> $ldlm_enqueue_min
1818 do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
1822 run_test 68 "AT: verify slowing locks"
1825 # end of AT tests includes above lines
1828 # start multi-client tests
1830 [ -z "$CLIENTS" ] && \
1831 { skip "Need two or more clients." && return; }
1832 [ $CLIENTCOUNT -lt 2 ] && \
1833 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1835 echo "mount clients $CLIENTS ..."
1836 zconf_mount_clients $CLIENTS $DIR
1838 local clients=${CLIENTS//,/ }
1839 echo "Write/read files on $DIR ; clients $CLIENTS ... "
1840 for CLIENT in $clients; do
1841 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
1842 of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \
1843 error "dd failed on $CLIENT"
1846 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
1847 for C in ${CLIENTS//,/ }; do
1848 do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \
1849 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
1855 run_test 70a "check multi client t-f"
1857 check_dbench_load () {
1858 local clients=${1//,/ }
1861 for client in $clients; do
1862 if ! do_node $client "ps ax | grep -v grep | awk '{ print $6 }' | grep -q rundbench"; then
1863 error_noexit "rundbench load on $client failed!"
1870 kill_dbench_load () {
1871 local clients=${1:-$(hostname)}
1872 do_nodes $clients "killall dbench"
1876 local clients=${CLIENTS:-$HOSTNAME}
1878 zconf_mount_clients $clients $DIR
1881 [ "$SLOW" = "no" ] && duration=60
1882 local cmd="rundbench 1 -t $duration"
1884 do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
1885 PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
1886 DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
1889 log "Started rundbench load PID=$PID ..."
1892 START_TS=$(date +%s)
1893 CURRENT_TS=$START_TS
1894 while [ $ELAPSED -lt $duration ]; do
1895 if ! check_dbench_load $clients; then
1896 kill_dbench_load $clients
1900 replay_barrier $SINGLEMDS
1901 sleep 1 # give clients a time to do operations
1902 # Increment the number of failovers
1903 NUM_FAILOVERS=$((NUM_FAILOVERS+1))
1904 log "$TESTNAME fail mds1 $NUM_FAILOVERS times"
1906 CURRENT_TS=$(date +%s)
1907 ELAPSED=$((CURRENT_TS - START_TS))
1909 wait $PID || error "rundbench load on $CLIENTS failed!"
1911 run_test 70b "mds recovery; $CLIENTCOUNT clients"
1912 # end multi-client tests
1915 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1919 replay_barrier $SINGLEMDS
1920 #define OBD_FAIL_LDLM_ENQUEUE 0x302
1921 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
1924 wait $pid || return 1
1925 [ -e $DIR/$tfile ] && return 2
1928 run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay , close"
1931 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1935 replay_barrier $SINGLEMDS
1936 #define OBD_FAIL_LDLM_REPLY 0x30c
1937 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1940 wait $pid || return 1
1941 [ -e $DIR/$tfile ] && return 2
1944 run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close"
1947 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1951 replay_barrier $SINGLEMDS
1952 #define OBD_FAIL_TGT_LAST_REPLAY 0x710
1953 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000710"
1956 wait $pid || return 1
1957 [ -e $DIR/$tfile ] && return 2
1960 run_test 73c "open(O_CREAT), unlink, replay, reconnect at last_replay, close"
1964 local clients=${CLIENTS:-$HOSTNAME}
1967 zconf_umount_clients $clients $MOUNT
1968 facet_failover $SINGLEMDS
1969 zconf_mount_clients $clients $MOUNT
1971 touch $DIR/$tfile || return 1
1972 rm $DIR/$tfile || return 2
1973 clients_up || error "client evicted: $?"
1976 run_test 74 "Ensure applications don't fail waiting for OST recovery"
1979 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1983 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1984 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1986 stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
1989 run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
1992 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1996 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1997 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1999 stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
2002 run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
2005 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2008 createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
2010 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
2011 $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
2013 rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
2015 stat $DIR/$tdir/f1002
2017 run_test 81a "CMD: unlink cross-node file (fail mds with name)"
2020 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2024 mkdir $dir || error "mkdir $dir failed"
2028 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
2030 run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
2033 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2037 mkdir $dir || error "mkdir $dir failed"
2041 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
2043 run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
2047 createmany -o $DIR/$tdir/$tfile- 10 || return 1
2048 #define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
2049 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
2050 unlinkmany $DIR/$tdir/$tfile- 10 || return 2
2052 run_test 83a "fail log_add during unlink recovery"
2056 createmany -o $DIR/$tdir/$tfile- 10 || return 1
2057 replay_barrier $SINGLEMDS
2058 unlinkmany $DIR/$tdir/$tfile- 10 || return 2
2059 #define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
2060 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
2063 run_test 83b "fail log_add during unlink recovery"
2066 #define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
2067 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000144"
2068 createmany -o $DIR/$tfile- 1 &
2072 client_up || client_up || true # reconnect
2074 run_test 84a "stale open during export disconnect"
2076 test_85a() { #bug 16774
2077 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
2079 for i in `seq 100`; do
2080 echo "tag-$i" > $DIR/$tfile-$i
2081 grep -q "tag-$i" $DIR/$tfile-$i || error "f2-$i"
2084 lov_id=`lctl dl | grep "clilov"`
2085 addr=`echo $lov_id | awk '{print $4}' | awk -F '-' '{print $3}'`
2086 count=`lctl get_param -n ldlm.namespaces.*MDT0000*$addr.lock_unused_count`
2087 echo "before recovery: unused locks count = $count"
2091 count2=`lctl get_param -n ldlm.namespaces.*MDT0000*$addr.lock_unused_count`
2092 echo "after recovery: unused locks count = $count2"
2094 if [ $count2 -ge $count ]; then
2095 error "unused locks are not canceled"
2098 run_test 85a "check the cancellation of unused locks during recovery(IBITS)"
2101 local clients=${CLIENTS:-$HOSTNAME}
2103 zconf_umount_clients $clients $MOUNT
2104 do_facet $SINGLEMDS lctl set_param mdt.${FSNAME}-MDT*.exports.clear=0
2105 remount_facet $SINGLEMDS
2106 zconf_mount_clients $clients $MOUNT
2108 run_test 86 "umount server after clear nid_stats should not hit LBUG"
2111 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
2114 lfs setstripe -i 0 -c 1 $DIR/$tfile
2115 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 || error "Cannot write"
2116 cksum=`md5sum $DIR/$tfile | awk '{print $1}'`
2117 cancel_lru_locks osc
2119 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
2120 cksum2=`md5sum $DIR/$tfile | awk '{print $1}'`
2121 if [ $cksum != $cksum2 ] ; then
2122 error "New checksum $cksum2 does not match original $cksum"
2125 run_test 87 "write replay"
2128 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
2131 lfs setstripe -i 0 -c 1 $DIR/$tfile
2132 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 || error "Cannot write"
2133 sleep 1 # Give it a chance to flush dirty data
2134 echo TESTTEST | dd of=$DIR/$tfile bs=1 count=8 seek=64
2135 cksum=`md5sum $DIR/$tfile | awk '{print $1}'`
2136 cancel_lru_locks osc
2138 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
2139 cksum2=`md5sum $DIR/$tfile | awk '{print $1}'`
2140 if [ $cksum != $cksum2 ] ; then
2141 error "New checksum $cksum2 does not match original $cksum"
2144 run_test 87b "write replay with changed data (checksum resend)"
2146 test_88() { #bug 17485
2150 lfs setstripe $DIR/$tdir -o 0 -c 1 || error "setstripe"
2155 # exhaust precreations on ost1
2156 local OST=$(lfs osts | grep ^0": " | awk '{print $2}' | sed -e 's/_UUID$//')
2157 local mdtosc=$(get_mdtosc_proc_path $OST)
2158 local last_id=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id)
2159 local next_id=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id)
2160 echo "before test: last_id = $last_id, next_id = $next_id"
2162 echo "Creating to objid $last_id on ost $OST..."
2163 createmany -o $DIR/$tdir/f-%d $next_id $((last_id - next_id + 2))
2165 #create some files to use some uncommitted objids
2166 last_id=$(($last_id + 1))
2167 createmany -o $DIR/$tdir/f-%d $last_id 8
2169 last_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id)
2170 next_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id)
2171 echo "before recovery: last_id = $last_id2, next_id = $next_id2"
2179 mount_facet mds1 || error "Restart of mds failed"
2184 mount_facet ost1 || error "Restart of ost1 failed"
2188 last_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id)
2189 next_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id)
2190 echo "after recovery: last_id = $last_id2, next_id = $next_id2"
2192 # create new files, which should use new objids, and ensure the orphan
2193 # cleanup phase for ost1 is completed at the same time
2194 for i in `seq 8`; do
2195 file_id=$(($last_id + 10 + $i))
2196 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
2199 # if the objids were not recreated, then "ls" will failed for -ENOENT
2200 ls -l $DIR/$tdir/* || error "can't get the status of precreated files"
2203 # write into previously created files
2204 for i in `seq 8`; do
2205 file_id=$(($last_id + $i))
2206 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
2207 cp -f $DIR/$tdir/f-$file_id $TMP/$tdir/
2210 # compare the content
2211 for i in `seq 8`; do
2212 file_id=$(($last_id + $i))
2213 cmp $TMP/$tdir/f-$file_id $DIR/$tdir/f-$file_id || error "the content" \
2214 "of file is modified!"
2219 run_test 88 "MDS should not assign same objid to different files "
2223 rm -f $DIR/$tdir/$tfile
2225 BLOCKS1=$(df $MOUNT | tail -n 1 | awk '{ print $3 }')
2226 lfs setstripe -i 0 -c 1 $DIR/$tdir/$tfile
2227 dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile
2230 facet_failover $SINGLEMDS
2231 rm $DIR/$tdir/$tfile
2234 zconf_mount $(hostname) $MOUNT
2237 BLOCKS2=$(df $MOUNT | tail -n 1 | awk '{ print $3 }')
2238 [ "$BLOCKS1" == "$BLOCKS2" ] || error $((BLOCKS2 - BLOCKS1)) blocks leaked
2241 run_test 89 "no disk space leak on late ost connection"
2243 equals_msg `basename $0`: test complete, cleaning up
2244 check_and_cleanup_lustre
2245 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true