7 # This test needs to be run on the client
10 export MULTIOP=${MULTIOP:-multiop}
11 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
14 . $LUSTRE/tests/test-framework.sh
16 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
18 CHECK_GRANT=${CHECK_GRANT:-"yes"}
19 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
21 require_dsh_mds || exit 0
24 # bug number for skipped tests:
25 ALWAYS_EXCEPT="$REPLAY_SINGLE_EXCEPT "
26 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
29 [ "$SLOW" = "no" ] && EXCEPT_SLOW="44b"
31 [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
32 # bug number for skipped test: LU-5761
33 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 89"
37 check_and_setup_lustre
42 rm -rf $DIR/[df][0-9]* $DIR/f.$TESTSUITE.*
44 # LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
45 if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
47 do_facet $SINGLEMDS sync
50 test_0a() { # was test_0
51 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
52 replay_barrier $SINGLEMDS
56 run_test 0a "empty replay"
59 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
61 # this test attempts to trigger a race in the precreation code,
62 # and must run before any other objects are created on the filesystem
64 createmany -o $DIR/$tfile 20 || error "createmany -o $DIR/$tfile failed"
65 unlinkmany $DIR/$tfile 20 || error "unlinkmany $DIR/$tfile failed"
67 run_test 0b "ensure object created after recover exists. (3284)"
70 replay_barrier $SINGLEMDS
73 facet_failover $SINGLEMDS
74 zconf_mount $(hostname) $MOUNT || error "mount fails"
75 client_up || error "post-failover df failed"
76 # file shouldn't exist if replay-barrier works as expected
77 rm $DIR/$tfile && error "File exists and it shouldn't"
80 run_test 0c "check replay-barrier"
83 replay_barrier $SINGLEMDS
85 facet_failover $SINGLEMDS
86 zconf_mount $(hostname) $MOUNT || error "mount fails"
87 client_up || error "post-failover df failed"
89 run_test 0d "expired recovery with no clients"
92 replay_barrier $SINGLEMDS
95 $CHECKSTAT -t file $DIR/$tfile ||
96 error "$CHECKSTAT $DIR/$tfile attribute check failed"
99 run_test 1 "simple create"
102 replay_barrier $SINGLEMDS
105 $CHECKSTAT -t file $DIR/$tfile ||
106 error "$CHECKSTAT $DIR/$tfile attribute check failed"
112 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
113 replay_barrier $SINGLEMDS
116 $CHECKSTAT -t file $DIR/$tfile ||
117 error "$CHECKSTAT $DIR/$tfile attribute check failed"
123 replay_barrier $SINGLEMDS
124 $LFS setstripe -c $OSTCOUNT $DIR/$tfile
126 $CHECKSTAT -t file $DIR/$tfile ||
127 error "$CHECKSTAT $DIR/$tfile check failed"
129 run_test 2c "setstripe replay"
132 replay_barrier $SINGLEMDS
133 $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir
135 $CHECKSTAT -t dir $DIR/$tdir ||
136 error "$CHECKSTAT $DIR/$tdir check failed"
138 run_test 2d "setdirstripe replay"
141 local file=$DIR/$tfile
142 replay_barrier $SINGLEMDS
144 openfile -f O_DIRECTORY $file
146 $CHECKSTAT -t file $file ||
147 error "$CHECKSTAT $file attribute check failed"
150 run_test 3a "replay failed open(O_DIRECTORY)"
153 replay_barrier $SINGLEMDS
154 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
155 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
157 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
159 $CHECKSTAT -t file $DIR/$tfile &&
160 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
163 run_test 3b "replay failed open -ENOMEM"
166 replay_barrier $SINGLEMDS
167 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
168 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
170 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
173 $CHECKSTAT -t file $DIR/$tfile &&
174 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
177 run_test 3c "replay failed open -ENOMEM"
179 test_4a() { # was test_4
180 replay_barrier $SINGLEMDS
181 for i in $(seq 10); do
182 echo "tag-$i" > $DIR/$tfile-$i
185 for i in $(seq 10); do
186 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
189 run_test 4a "|x| 10 open(O_CREAT)s"
192 for i in $(seq 10); do
193 echo "tag-$i" > $DIR/$tfile-$i
195 replay_barrier $SINGLEMDS
198 $CHECKSTAT -t file $DIR/$tfile-* &&
199 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
202 run_test 4b "|x| rm 10 files"
204 # The idea is to get past the first block of precreated files on both
205 # osts, and then replay.
207 replay_barrier $SINGLEMDS
208 for i in $(seq 220); do
209 echo "tag-$i" > $DIR/$tfile-$i
212 for i in $(seq 220); do
213 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
217 # waiting for commitment of removal
219 run_test 5 "|x| 220 open(O_CREAT)"
221 test_6a() { # was test_6
222 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
223 replay_barrier $SINGLEMDS
224 mcreate $DIR/$tdir/$tfile
226 $CHECKSTAT -t dir $DIR/$tdir ||
227 error "$CHECKSTAT $DIR/$tdir attribute check failed"
228 $CHECKSTAT -t file $DIR/$tdir/$tfile ||
229 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check failed"
231 # waiting for log process thread
233 run_test 6a "mkdir + contained create"
236 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
237 replay_barrier $SINGLEMDS
240 $CHECKSTAT -t dir $DIR/$tdir &&
241 error "$CHECKSTAT $DIR/$tdir attribute check should fail" ||
244 run_test 6b "|X| rmdir"
247 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
248 replay_barrier $SINGLEMDS
249 mcreate $DIR/$tdir/$tfile
251 $CHECKSTAT -t dir $DIR/$tdir ||
252 error "$CHECKSTAT $DIR/$tdir attribute check failed"
253 $CHECKSTAT -t file $DIR/$tdir/$tfile ||
254 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check failed"
257 run_test 7 "mkdir |X| contained create"
260 replay_barrier $SINGLEMDS
261 multiop_bg_pause $DIR/$tfile mo_c ||
262 error "multiop mknod $DIR/$tfile failed"
266 $CHECKSTAT -t file $DIR/$tfile ||
267 error "$CHECKSTAT $DIR/$tfile attribute check failed"
268 kill -USR1 $MULTIPID || error "multiop mknod $MULTIPID not running"
269 wait $MULTIPID || error "multiop mknod $MULTIPID failed"
272 run_test 8 "creat open |X| close"
275 replay_barrier $SINGLEMDS
277 local old_inum=$(ls -i $DIR/$tfile | awk '{print $1}')
279 local new_inum=$(ls -i $DIR/$tfile | awk '{print $1}')
281 echo " old_inum == $old_inum, new_inum == $new_inum"
282 if [ $old_inum -eq $new_inum ] ;
284 echo "old_inum and new_inum match"
286 echo " old_inum and new_inum do not match"
287 error "old index($old_inum) does not match new index($new_inum)"
291 run_test 9 "|X| create (same inum/gen)"
294 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
295 replay_barrier $SINGLEMDS
296 mv $DIR/$tfile $DIR/$tfile-2
299 $CHECKSTAT $DIR/$tfile &&
300 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
301 $CHECKSTAT $DIR/$tfile-2 ||
302 error "$CHECKSTAT $DIR/$tfile-2 attribute check failed"
306 run_test 10 "create |X| rename unlink"
309 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
310 echo "old" > $DIR/$tfile
311 mv $DIR/$tfile $DIR/$tfile-2
312 replay_barrier $SINGLEMDS
313 echo "new" > $DIR/$tfile
315 grep old $DIR/$tfile-2
317 grep new $DIR/$tfile || error "grep $DIR/$tfile failed"
318 grep old $DIR/$tfile-2 || error "grep $DIR/$tfile-2 failed"
320 run_test 11 "create open write rename |X| create-old-name read"
323 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
324 multiop_bg_pause $DIR/$tfile o_tSc ||
325 error "multiop_bg_pause $DIR/$tfile failed"
328 replay_barrier $SINGLEMDS
329 kill -USR1 $pid || error "multiop $pid not running"
330 wait $pid || error "multiop $pid failed"
333 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
336 run_test 12 "open, unlink |X| close"
338 # 1777 - replay open after committed chmod that would make
339 # a regular open a failure
341 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
342 multiop_bg_pause $DIR/$tfile O_wc ||
343 error "multiop_bg_pause $DIR/$tfile failed"
346 $CHECKSTAT -p 0 $DIR/$tfile ||
347 error "$CHECKSTAT $DIR/$tfile attribute check failed"
348 replay_barrier $SINGLEMDS
350 kill -USR1 $pid || error "multiop $pid not running"
351 wait $pid || error "multiop $pid failed"
353 $CHECKSTAT -s 1 -p 0 $DIR/$tfile ||
354 error "second $CHECKSTAT $DIR/$tfile attribute check failed"
355 rm $DIR/$tfile || error "rm $DIR/$tfile failed"
358 run_test 13 "open chmod 0 |x| write close"
361 multiop_bg_pause $DIR/$tfile O_tSc ||
362 error "multiop_bg_pause $DIR/$tfile failed"
365 replay_barrier $SINGLEMDS
366 kill -USR1 $pid || error "multiop $pid not running"
367 wait $pid || error "multiop $pid failed"
370 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
373 run_test 14 "open(O_CREAT), unlink |X| close"
376 multiop_bg_pause $DIR/$tfile O_tSc ||
377 error "multiop_bg_pause $DIR/$tfile failed"
380 replay_barrier $SINGLEMDS
381 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
382 kill -USR1 $pid || error "multiop $pid not running"
383 wait $pid || error "multiop $pid failed"
386 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
387 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
390 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
393 replay_barrier $SINGLEMDS
396 mcreate $DIR/$tfile-2
398 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
399 [ -e $DIR/$tfile-2 ] || error "file $DIR/$tfile-2 does not exist"
400 munlink $DIR/$tfile-2 || error "munlink $DIR/$tfile-2 failed"
402 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
405 replay_barrier $SINGLEMDS
406 multiop_bg_pause $DIR/$tfile O_c ||
407 error "multiop_bg_pause $DIR/$tfile failed"
410 kill -USR1 $pid || error "multiop $pid not running"
411 wait $pid || error "multiop $pid failed"
412 $CHECKSTAT -t file $DIR/$tfile ||
413 error "$CHECKSTAT $DIR/$tfile attribute check failed"
416 run_test 17 "|X| open(O_CREAT), |replay| close"
419 replay_barrier $SINGLEMDS
420 multiop_bg_pause $DIR/$tfile O_tSc ||
421 error "multiop_bg_pause $DIR/$tfile failed"
424 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
425 echo "pid: $pid will close"
426 kill -USR1 $pid || error "multiop $pid not running"
427 wait $pid || error "multiop $pid failed"
430 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
431 [ -e $DIR/$tfile-2 ] || error "file $DIR/$tfile-2 does not exist"
432 # this touch frequently fails
433 touch $DIR/$tfile-3 || error "touch $DIR/$tfile-3 failed"
434 munlink $DIR/$tfile-2 || error "munlink $DIR/$tfile-2 failed"
435 munlink $DIR/$tfile-3 || error "munlink $DIR/$tfile-3 failed"
438 run_test 18 "open(O_CREAT), unlink, touch new, close, touch, unlink"
440 # bug 1855 (a simpler form of test_11 above)
442 replay_barrier $SINGLEMDS
444 echo "old" > $DIR/$tfile
445 mv $DIR/$tfile $DIR/$tfile-2
446 grep old $DIR/$tfile-2
448 grep old $DIR/$tfile-2 || error "grep $DIR/$tfile-2 failed"
450 run_test 19 "mcreate, open, write, rename "
452 test_20a() { # was test_20
453 replay_barrier $SINGLEMDS
454 multiop_bg_pause $DIR/$tfile O_tSc ||
455 error "multiop_bg_pause $DIR/$tfile failed"
460 kill -USR1 $pid || error "multiop $pid not running"
461 wait $pid || error "multiop $pid failed"
462 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
465 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
467 test_20b() { # bug 10480
468 local wait_timeout=$((TIMEOUT * 4))
469 local extra=$(fs_log_size)
473 $LFS setstripe -i 0 -c 1 $DIR
475 local beforeused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
477 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
478 while [ ! -e $DIR/$tfile ] ; do
479 usleep 60 # give dd a chance to start
482 $LFS getstripe $DIR/$tfile || error "$LFS getstripe $DIR/$tfile failed"
484 rm -f $DIR/$tfile || error "rm -f $DIR/$tfile failed"
486 client_up || client_up || true # reconnect
488 do_facet $SINGLEMDS "lctl set_param -n osd*.*MDT*.force_sync=1"
490 fail $SINGLEMDS # start orphan recovery
491 wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
492 wait_delete_completed $wait_timeout || error "delete did not finish"
496 local afterused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
497 log "before $beforeused, after $afterused"
499 (( $beforeused + $extra >= $afterused )) && break
500 n_attempts=$((n_attempts + 1))
501 [ $n_attempts -gt 3 ] &&
502 error "after $afterused > before $beforeused"
504 wait_zfs_commit $SINGLEMDS 5
509 run_test 20b "write, unlink, eviction, replay (test mds_cleanup_orphans)"
511 test_20c() { # bug 10480
512 multiop_bg_pause $DIR/$tfile Ow_c ||
513 error "multiop_bg_pause $DIR/$tfile failed"
519 client_up || client_up || true # reconnect
521 kill -USR1 $pid || error "multiop $pid not running"
522 wait $pid || error "multiop $pid failed"
523 [ -s $DIR/$tfile ] || error "File was truncated"
527 run_test 20c "check that client eviction does not affect file content"
530 replay_barrier $SINGLEMDS
531 multiop_bg_pause $DIR/$tfile O_tSc ||
532 error "multiop_bg_pause $DIR/$tfile failed"
535 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
538 kill -USR1 $pid || error "multiop $pid not running"
539 wait $pid || error "multiop $pid failed"
540 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
541 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
544 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
547 multiop_bg_pause $DIR/$tfile O_tSc ||
548 error "multiop_bg_pause $DIR/$tfile failed"
551 replay_barrier $SINGLEMDS
555 kill -USR1 $pid || error "multiop $pid not running"
556 wait $pid || error "multiop $pid failed"
557 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
560 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
563 multiop_bg_pause $DIR/$tfile O_tSc ||
564 error "multiop_bg_pause $DIR/$tfile failed"
567 replay_barrier $SINGLEMDS
569 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
572 kill -USR1 $pid || error "multiop $pid not running"
573 wait $pid || error "multiop $pid failed"
574 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
575 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
578 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
581 multiop_bg_pause $DIR/$tfile O_tSc ||
582 error "multiop_bg_pause $DIR/$tfile failed"
585 replay_barrier $SINGLEMDS
588 kill -USR1 $pid || error "multiop $pid not running"
589 wait $pid || error "multiop $pid failed"
590 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
593 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
596 multiop_bg_pause $DIR/$tfile O_tSc ||
597 error "multiop_bg_pause $DIR/$tfile failed"
601 replay_barrier $SINGLEMDS
603 kill -USR1 $pid || error "multiop $pid not running"
604 wait $pid || error "multiop $pid failed"
605 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
608 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
611 replay_barrier $SINGLEMDS
612 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
613 error "multiop_bg_pause $DIR/$tfile-1 failed"
615 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
616 error "multiop_bg_pause $DIR/$tfile-2 failed"
620 kill -USR1 $pid2 || error "second multiop $pid2 not running"
621 wait $pid2 || error "second multiop $pid2 failed"
624 kill -USR1 $pid1 || error "multiop $pid1 not running"
625 wait $pid1 || error "multiop $pid1 failed"
626 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
627 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
630 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
633 replay_barrier $SINGLEMDS
634 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
635 error "multiop_bg_pause $DIR/$tfile-1 failed"
637 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
638 error "multiop_bg_pause $DIR/$tfile-2 failed"
644 kill -USR1 $pid1 || error "multiop $pid1 not running"
645 wait $pid1 || error "multiop $pid1 failed"
646 kill -USR1 $pid2 || error "second multiop $pid2 not running"
647 wait $pid2 || error "second multiop $pid2 failed"
648 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
649 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
652 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
655 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
656 error "multiop_bg_pause $DIR/$tfile-1 failed"
658 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
659 error "multiop_bg_pause $DIR/$tfile-2 failed"
661 replay_barrier $SINGLEMDS
664 kill -USR1 $pid2 || error "second multiop $pid2 not running"
665 wait $pid2 || error "second multiop $pid2 failed"
668 kill -USR1 $pid1 || error "multiop $pid1 not running"
669 wait $pid1 || error "multiop $pid1 failed"
670 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
671 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
674 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
677 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
678 error "multiop_bg_pause $DIR/$tfile-1 failed"
680 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
681 error "multiop_bg_pause $DIR/$tfile-2 failed"
683 replay_barrier $SINGLEMDS
688 kill -USR1 $pid1 || error "multiop $pid1 not running"
689 wait $pid1 || error "multiop $pid1 failed"
690 kill -USR1 $pid2 || error "second multiop $pid2 not running"
691 wait $pid2 || error "second multiop $pid2 failed"
692 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
693 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
696 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
699 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
700 error "multiop_bg_pause $DIR/$tfile-1 failed"
702 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
703 error "multiop_bg_pause $DIR/$tfile-2 failed"
708 replay_barrier $SINGLEMDS
710 kill -USR1 $pid1 || error "multiop $pid1 not running"
711 wait $pid1 || error "multiop $pid1 failed"
712 kill -USR1 $pid2 || error "second multiop $pid2 not running"
713 wait $pid2 || error "second multiop $pid2 failed"
714 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
715 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
718 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
721 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
722 error "multiop_bg_pause $DIR/$tfile-1 failed"
724 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
725 error "multiop_bg_pause $DIR/$tfile-2 failed"
729 replay_barrier $SINGLEMDS
732 kill -USR1 $pid1 || error "multiop $pid1 not running"
733 wait $pid1 || error "multiop $pid1 failed"
734 kill -USR1 $pid2 || error "second multiop $pid2 not running"
735 wait $pid2 || error "second multiop $pid2 failed"
736 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
737 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
740 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
742 # tests for bug 2104; completion without crashing is success. The close is
743 # stale, but we always return 0 for close, so the app never sees it.
745 multiop_bg_pause $DIR/$tfile O_c ||
746 error "multiop_bg_pause $DIR/$tfile failed"
748 multiop_bg_pause $DIR/$tfile O_c ||
749 error "second multiop_bg_pause $DIR/$tfile failed"
752 client_up || client_up || error "client_up failed"
753 kill -USR1 $pid1 || error "multiop $pid1 not running"
754 kill -USR1 $pid2 || error "second multiop $pid2 not running"
755 wait $pid1 || error "multiop $pid1 failed"
756 wait $pid2 || error "second multiop $pid2 failed"
759 run_test 32 "close() notices client eviction; close() after client eviction"
762 createmany -o $DIR/$tfile-%d 10 ||
763 error "createmany create $DIR/$tfile failed"
764 replay_barrier_nosync $SINGLEMDS
765 fail_abort $SINGLEMDS
766 # recreate shouldn't fail
767 createmany -o $DIR/$tfile--%d 10 ||
768 error "createmany recreate $DIR/$tfile failed"
772 run_test 33a "fid seq shouldn't be reused after abort recovery"
775 #define OBD_FAIL_SEQ_ALLOC 0x1311
776 do_facet $SINGLEMDS "lctl set_param fail_loc=0x1311"
778 createmany -o $DIR/$tfile-%d 10
779 replay_barrier_nosync $SINGLEMDS
780 fail_abort $SINGLEMDS
781 # recreate shouldn't fail
782 createmany -o $DIR/$tfile--%d 10 ||
783 error "createmany recreate $DIR/$tfile failed"
787 run_test 33b "test fid seq allocation"
790 multiop_bg_pause $DIR/$tfile O_c ||
791 error "multiop_bg_pause $DIR/$tfile failed"
795 replay_barrier $SINGLEMDS
796 fail_abort $SINGLEMDS
797 kill -USR1 $pid || error "multiop $pid not running"
798 wait $pid || error "multiop $pid failed"
799 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
803 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
805 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
807 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
809 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
810 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
815 # give a chance to remove from MDS
816 fail_abort $SINGLEMDS
817 $CHECKSTAT -t file $DIR/$tfile &&
818 error "$CHECKSTAT $DIR/$tfile attribute check should fail" ||
821 run_test 35 "test recovery from llog for unlink op"
823 # b=2432 resent cancel after replay uses wrong cookie,
824 # so don't resend cancels
826 replay_barrier $SINGLEMDS
828 checkstat $DIR/$tfile
829 facet_failover $SINGLEMDS
831 if dmesg | grep "unknown lock cookie"; then
832 error "cancel after replay failed"
835 run_test 36 "don't resend cancel"
838 # directory orphans can't be unlinked from PENDING directory
840 rmdir $DIR/$tfile 2>/dev/null
841 multiop_bg_pause $DIR/$tfile dD_c ||
842 error "multiop_bg_pause $DIR/$tfile failed"
846 replay_barrier $SINGLEMDS
847 # clear the dmesg buffer so we only see errors from this recovery
848 do_facet $SINGLEMDS dmesg -c >/dev/null
849 fail_abort $SINGLEMDS
850 kill -USR1 $pid || error "multiop $pid not running"
851 do_facet $SINGLEMDS dmesg | grep "error .* unlinking .* from PENDING" &&
852 error "error unlinking files"
853 wait $pid || error "multiop $pid failed"
857 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
860 createmany -o $DIR/$tfile-%d 800 ||
861 error "createmany -o $DIR/$tfile failed"
862 unlinkmany $DIR/$tfile-%d 0 400 || error "unlinkmany $DIR/$tfile failed"
863 replay_barrier $SINGLEMDS
865 unlinkmany $DIR/$tfile-%d 400 400 ||
866 error "unlinkmany $DIR/$tfile 400 failed"
868 $CHECKSTAT -t file $DIR/$tfile-* &&
869 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
872 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
874 test_39() { # bug 4176
875 createmany -o $DIR/$tfile-%d 800 ||
876 error "createmany -o $DIR/$tfile failed"
877 replay_barrier $SINGLEMDS
878 unlinkmany $DIR/$tfile-%d 0 400
880 unlinkmany $DIR/$tfile-%d 400 400 ||
881 error "unlinkmany $DIR/$tfile 400 failed"
883 $CHECKSTAT -t file $DIR/$tfile-* &&
884 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
887 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
890 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
895 # always need connection to MDS to verify layout during IO. LU-2628.
896 lctl get_param mdc.*.connect_flags | grep -q layout_lock &&
897 skip "layout_lock needs MDS connection for IO" && return 0
899 $LCTL mark multiop $MOUNT/$tfile OS_c
900 multiop $MOUNT/$tfile OS_c &
902 writeme -s $MOUNT/${tfile}-2 &
905 facet_failover $SINGLEMDS
906 #define OBD_FAIL_MDS_CONNECT_NET 0x117
907 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
909 stat1=$(count_ost_writes)
911 stat2=$(count_ost_writes)
912 echo "$stat1, $stat2"
913 if [ $stat1 -lt $stat2 ]; then
914 echo "writes continuing during recovery"
917 echo "writes not continuing during recovery, bug 2477"
920 echo "waiting for writeme $WRITE_PID"
924 echo "waiting for multiop $PID"
925 wait $PID || error "multiop $PID failed"
926 do_facet client munlink $MOUNT/$tfile ||
927 error "munlink $MOUNT/$tfile failed"
928 do_facet client munlink $MOUNT/${tfile}-2 ||
929 error "munlink $MOUNT/$tfile-2 failed"
932 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
935 # make sure that a read to one osc doesn't try to double-unlock its page just
936 # because another osc is invalid. trigger_group_io used to mistakenly return
937 # an error if any oscs were invalid even after having successfully put rpcs
938 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
939 # the page, guarnateeing that the unlock from the RPC completion would
940 # assert on trying to unlock the unlocked page.
942 [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return
944 local f=$MOUNT/$tfile
945 # make sure the start of the file is ost1
946 $SETSTRIPE -S $((128 * 1024)) -i 0 $f
947 do_facet client dd if=/dev/zero of=$f bs=4k count=1 ||
948 error "dd on client failed"
950 # fail ost2 and read from ost1
951 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost2_svc)
952 local osc2dev=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
953 grep $mdtosc | awk '{print $1}')
954 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" &&
955 lctl get_param -n devices &&
956 error "OST 2 $osc2dev does not exist"
957 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate ||
958 error "deactive device on $SINGLEMDS failed"
959 do_facet client dd if=$f of=/dev/null bs=4k count=1 ||
960 error "second dd on client failed"
961 do_facet $SINGLEMDS $LCTL --device $osc2dev activate ||
962 error "active device on $SINGLEMDS failed"
965 run_test 41 "read from a valid osc while other oscs are invalid"
967 # test MDS recovery after ost failure
969 blocks=$(df -P $MOUNT | tail -n 1 | awk '{ print $2 }')
970 createmany -o $DIR/$tfile-%d 800 ||
971 error "createmany -o $DIR/$tfile failed"
973 unlinkmany $DIR/$tfile-%d 0 400
975 lctl set_param debug=-1
978 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
979 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
980 #[ $blocks_after -lt $blocks ] || return 1
981 echo "wait for MDS to timeout and recover"
982 sleep $((TIMEOUT * 2))
984 unlinkmany $DIR/$tfile-%d 400 400 ||
985 error "unlinkmany $DIR/$tfile 400 failed"
986 $CHECKSTAT -t file $DIR/$tfile-* &&
987 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
990 run_test 42 "recovery after ost failure"
992 # timeout in MDS/OST recovery RPC will LBUG MDS
993 test_43() { # bug 2530
994 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
996 replay_barrier $SINGLEMDS
998 # OBD_FAIL_OST_CREATE_NET 0x204
999 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1005 run_test 43 "mds osc import failure during recovery; don't LBUG"
1007 test_44a() { # was test_44
1008 local at_max_saved=0
1010 local mdcdev=$($LCTL dl |
1011 awk "/${FSNAME}-MDT0000-mdc-/ {if (\$2 == \"UP\") {print \$1}}")
1012 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc- not UP"
1013 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1014 { $LCTL dl; error "looking for mdcdev=$mdcdev"; }
1016 # adaptive timeouts slow this way down
1017 if at_is_enabled; then
1018 at_max_saved=$(at_max_get mds)
1022 for i in $(seq 1 10); do
1023 echo "$i of 10 ($(date +%s))"
1024 do_facet $SINGLEMDS \
1025 "lctl get_param -n md[ts].*.mdt.timeouts | grep service"
1026 #define OBD_FAIL_TGT_CONN_RACE 0x701
1027 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
1028 # lctl below may fail, it is valid case
1029 $LCTL --device $mdcdev recover
1032 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1033 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
1036 run_test 44a "race in target handle connect"
1039 local mdcdev=$($LCTL dl |
1040 awk "/${FSNAME}-MDT0000-mdc-/ {if (\$2 == \"UP\") {print \$1}}")
1041 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc not up"
1042 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1043 { echo mdcdev=$mdcdev; $LCTL dl;
1044 error "more than one ${FSNAME}-MDT0000-mdc"; }
1046 for i in $(seq 1 10); do
1047 echo "$i of 10 ($(date +%s))"
1048 do_facet $SINGLEMDS \
1049 "lctl get_param -n md[ts].*.mdt.timeouts | grep service"
1050 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
1051 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
1052 # lctl below may fail, it is valid case
1053 $LCTL --device $mdcdev recover
1058 run_test 44b "race in target handle connect"
1061 replay_barrier $SINGLEMDS
1062 createmany -m $DIR/$tfile-%d 100 || error "failed to create directories"
1063 #define OBD_FAIL_TGT_RCVG_FLAG 0x712
1064 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712"
1065 fail_abort $SINGLEMDS
1066 unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail abort"
1068 unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail"
1071 run_test 44c "race in target handle connect"
1073 # Handle failed close
1075 local mdcdev=$($LCTL get_param -n devices |
1076 awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}")
1077 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc not up"
1078 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1079 { echo mdcdev=$mdcdev; $LCTL dl;
1080 error "more than one ${FSNAME}-MDT0000-mdc"; }
1082 $LCTL --device $mdcdev recover ||
1083 error "$LCTL --device $mdcdev recover failed"
1085 multiop_bg_pause $DIR/$tfile O_c ||
1086 error "multiop_bg_pause $DIR/$tfile failed"
1089 # This will cause the CLOSE to fail before even
1090 # allocating a reply buffer
1091 $LCTL --device $mdcdev deactivate ||
1092 error "$LCTL --device $mdcdev deactivate failed"
1095 kill -USR1 $pid || error "multiop $pid not running"
1096 wait $pid || error "multiop $pid failed"
1098 $LCTL --device $mdcdev activate ||
1099 error "$LCTL --device $mdcdev activate failed"
1102 $CHECKSTAT -t file $DIR/$tfile ||
1103 error "$CHECKSTAT $DIR/$tfile attribute check failed"
1106 run_test 45 "Handle failed close"
1110 drop_reply "touch $DIR/$tfile"
1112 # ironically, the previous test, 45, will cause a real forced close,
1113 # so just look for one for this test
1114 dmesg | grep -i "force closing client file handle for $tfile" &&
1115 error "found force closing in dmesg"
1118 run_test 46 "Don't leak file handle after open resend (3325)"
1120 test_47() { # bug 2824
1121 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1123 # create some files to make sure precreate has been done on all
1124 # OSTs. (just in case this test is run independently)
1125 createmany -o $DIR/$tfile 20 ||
1126 error "createmany create $DIR/$tfile failed"
1128 # OBD_FAIL_OST_CREATE_NET 0x204
1130 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1131 client_up || error "client_up failed"
1133 # let the MDS discover the OST failure, attempt to recover, fail
1134 # and recover again.
1135 sleep $((3 * TIMEOUT))
1137 # Without 2824, this createmany would hang
1138 createmany -o $DIR/$tfile 20 ||
1139 error "createmany recraete $DIR/$tfile failed"
1140 unlinkmany $DIR/$tfile 20 || error "unlinkmany $DIR/$tfile failed"
1144 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1147 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1148 [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
1150 replay_barrier $SINGLEMDS
1151 createmany -o $DIR/$tfile 20 ||
1152 error "createmany -o $DIR/$tfile failed"
1153 # OBD_FAIL_OST_EROFS 0x216
1154 facet_failover $SINGLEMDS
1155 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1156 client_up || error "client_up failed"
1158 # let the MDS discover the OST failure, attempt to recover, fail
1159 # and recover again.
1160 sleep $((3 * TIMEOUT))
1162 createmany -o $DIR/$tfile 20 20 ||
1163 error "createmany recraete $DIR/$tfile failed"
1164 unlinkmany $DIR/$tfile 40 || error "unlinkmany $DIR/$tfile failed"
1167 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1170 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost1_svc)
1171 local oscdev=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
1172 grep $mdtosc | awk '{print $1}')
1173 [ "$oscdev" ] || error "could not find OSC device on MDS"
1174 do_facet $SINGLEMDS $LCTL --device $oscdev recover ||
1175 error "OSC device $oscdev recovery failed"
1176 do_facet $SINGLEMDS $LCTL --device $oscdev recover ||
1177 error "second OSC device $oscdev recovery failed"
1178 # give the mds_lov_sync threads a chance to run
1181 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1183 # b3764 timed out lock replay
1185 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.6.90) ] &&
1186 skip "MDS prior to 2.6.90 handle LDLM_REPLY_NET incorrectly" &&
1189 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1190 cancel_lru_locks mdc
1192 multiop_bg_pause $DIR/$tfile s_s || error "multiop $DIR/$tfile failed"
1195 #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
1196 lctl set_param -n ldlm.cancel_unused_locks_before_replay "0"
1197 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000157"
1199 fail $SINGLEMDS || error "fail $SINGLEMDS failed"
1201 wait $mpid || error "multiop_bg_pause pid failed"
1203 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1204 lctl set_param fail_loc=0x0
1205 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
1208 run_test 52 "time out lock replay (3764)"
1210 # bug 3462 - simultaneous MDC requests
1212 [[ $(lctl get_param mdc.*.import |
1213 grep "connect_flags:.*multi_mod_rpc") ]] ||
1214 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
1216 cancel_lru_locks mdc # cleanup locks from former test cases
1217 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1218 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1219 multiop $DIR/${tdir}-1/f O_c &
1221 # give multiop a change to open
1224 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1225 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1226 kill -USR1 $close_pid
1227 cancel_lru_locks mdc # force the close
1228 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1230 mcreate $DIR/${tdir}-2/f || error "mcreate $DIR/${tdir}-2/f failed"
1232 # close should still be here
1233 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1235 replay_barrier_nodf $SINGLEMDS
1237 wait $close_pid || error "close_pid $close_pid failed"
1239 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1240 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1241 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1242 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1243 rm -rf $DIR/${tdir}-*
1245 run_test 53a "|X| close request while two MDC requests in flight"
1248 cancel_lru_locks mdc # cleanup locks from former test cases
1250 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1251 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1252 multiop_bg_pause $DIR/${tdir}-1/f O_c ||
1253 error "multiop_bg_pause $DIR/${tdir}-1/f failed"
1256 #define OBD_FAIL_MDS_REINT_NET 0x107
1257 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1258 mcreate $DIR/${tdir}-2/f &
1262 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1263 kill -USR1 $close_pid
1264 cancel_lru_locks mdc # force the close
1265 wait $close_pid || error "close_pid $close_pid failed"
1266 # open should still be here
1267 [ -d /proc/$open_pid ] || error "open_pid doesn't exist"
1269 replay_barrier_nodf $SINGLEMDS
1271 wait $open_pid || error "open_pid failed"
1273 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1274 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1275 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1276 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1277 rm -rf $DIR/${tdir}-*
1279 run_test 53b "|X| open request while two MDC requests in flight"
1282 cancel_lru_locks mdc # cleanup locks from former test cases
1284 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1285 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1286 multiop $DIR/${tdir}-1/f O_c &
1289 #define OBD_FAIL_MDS_REINT_NET 0x107
1290 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1291 mcreate $DIR/${tdir}-2/f &
1295 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1296 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1297 kill -USR1 $close_pid
1298 cancel_lru_locks mdc # force the close
1300 #bz20647: make sure all pids exist before failover
1301 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1302 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1303 replay_barrier_nodf $SINGLEMDS
1304 fail_nodf $SINGLEMDS
1305 wait $open_pid || error "open_pid failed"
1307 # close should be gone
1308 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1309 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1311 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1312 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1313 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1314 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1315 rm -rf $DIR/${tdir}-*
1317 run_test 53c "|X| open request and close request while two MDC requests in flight"
1320 [[ $(lctl get_param mdc.*.import |
1321 grep "connect_flags:.*multi_mod_rpc") ]] ||
1322 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
1324 cancel_lru_locks mdc # cleanup locks from former test cases
1326 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1327 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1328 multiop $DIR/${tdir}-1/f O_c &
1330 # give multiop a chance to open
1333 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1334 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1335 kill -USR1 $close_pid
1336 cancel_lru_locks mdc # force the close
1337 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1338 mcreate $DIR/${tdir}-2/f || error "mcreate $DIR/${tdir}-2/f failed"
1340 # close should still be here
1341 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1343 wait $close_pid || error "close_pid failed"
1345 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1346 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1347 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1348 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1349 rm -rf $DIR/${tdir}-*
1351 run_test 53d "close reply while two MDC requests in flight"
1354 cancel_lru_locks mdc # cleanup locks from former test cases
1356 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1357 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1358 multiop $DIR/${tdir}-1/f O_c &
1361 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1362 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1363 mcreate $DIR/${tdir}-2/f &
1367 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1368 kill -USR1 $close_pid
1369 cancel_lru_locks mdc # force the close
1370 wait $close_pid || error "close_pid failed"
1371 # open should still be here
1372 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1374 replay_barrier_nodf $SINGLEMDS
1376 wait $open_pid || error "open_pid failed"
1378 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1379 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1380 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1381 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1382 rm -rf $DIR/${tdir}-*
1384 run_test 53e "|X| open reply while two MDC requests in flight"
1387 cancel_lru_locks mdc # cleanup locks from former test cases
1389 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1390 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1391 multiop $DIR/${tdir}-1/f O_c &
1394 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1395 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1396 mcreate $DIR/${tdir}-2/f &
1400 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1401 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1402 kill -USR1 $close_pid
1403 cancel_lru_locks mdc # force the close
1405 #bz20647: make sure all pids are exists before failover
1406 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1407 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1408 replay_barrier_nodf $SINGLEMDS
1409 fail_nodf $SINGLEMDS
1410 wait $open_pid || error "open_pid failed"
1412 # close should be gone
1413 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1414 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1416 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1417 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1418 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1419 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1420 rm -rf $DIR/${tdir}-*
1422 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1425 cancel_lru_locks mdc # cleanup locks from former test cases
1427 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1428 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1429 multiop $DIR/${tdir}-1/f O_c &
1432 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1433 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1434 mcreate $DIR/${tdir}-2/f &
1438 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1439 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1440 kill -USR1 $close_pid
1441 cancel_lru_locks mdc # force the close
1442 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1444 #bz20647: make sure all pids are exists before failover
1445 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1446 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1447 replay_barrier_nodf $SINGLEMDS
1448 fail_nodf $SINGLEMDS
1449 wait $open_pid || error "open_pid failed"
1451 # close should be gone
1452 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1454 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1455 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1456 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1457 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1458 rm -rf $DIR/${tdir}-*
1460 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1463 cancel_lru_locks mdc # cleanup locks from former test cases
1465 mkdir $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1466 mkdir $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1467 multiop $DIR/${tdir}-1/f O_c &
1470 #define OBD_FAIL_MDS_REINT_NET 0x107
1471 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1472 mcreate $DIR/${tdir}-2/f &
1476 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1477 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1478 kill -USR1 $close_pid
1479 cancel_lru_locks mdc # force the close
1482 #bz20647: make sure all pids are exists before failover
1483 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1484 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1485 replay_barrier_nodf $SINGLEMDS
1486 fail_nodf $SINGLEMDS
1487 wait $open_pid || error "open_pid failed"
1489 # close should be gone
1490 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1491 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1493 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1494 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1495 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1496 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1497 rm -rf $DIR/${tdir}-*
1499 run_test 53h "open request and close reply while two MDC requests in flight"
1501 #b3761 ASSERTION(hash != 0) failed
1503 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1504 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1506 # give touch a chance to run
1508 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1512 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1514 #b3440 ASSERTION(rec->ur_fid2->id) failed
1516 ln -s foo $DIR/$tfile
1517 replay_barrier $SINGLEMDS
1518 #drop_reply "cat $DIR/$tfile"
1522 run_test 56 "don't replay a symlink open request (3440)"
1524 #recovery one mds-ost setattr from llog
1526 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1527 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1528 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1529 replay_barrier $SINGLEMDS
1531 wait_recovery_complete $SINGLEMDS || error "MDS recovery is not done"
1532 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
1533 $CHECKSTAT -t file $DIR/$tfile ||
1534 error "$CHECKSTAT $DIR/$tfile attribute check failed"
1535 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1538 run_test 57 "test recovery from llog for setattr op"
1541 zconf_umount $(hostname) $MOUNT2
1545 #recovery many mds-ost setattr from llog
1547 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1548 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1549 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1550 createmany -o $DIR/$tdir/$tfile-%d 2500
1551 replay_barrier $SINGLEMDS
1554 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null ||
1555 error "$CHECKSTAT $DIR/$tfile-* attribute check failed"
1556 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1557 unlinkmany $DIR/$tdir/$tfile-%d 2500 ||
1558 error "unlinkmany $DIR/$tfile failed"
1561 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1567 trap cleanup_58 EXIT
1569 large_xattr_enabled &&
1570 orig="$(generate_string $(max_xattr_size))" || orig="bar"
1571 # Original extended attribute can be long. Print a small version of
1572 # attribute if an error occurs
1573 local sm_msg=$(printf "%.9s" $orig)
1575 mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
1576 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1577 touch $DIR/$tdir/$tfile || error "touch $DIR/$tdir/$tfile failed"
1578 replay_barrier $SINGLEMDS
1579 setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile
1581 new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile)
1582 [[ "$new" = "$orig" ]] ||
1583 error "xattr set ($sm_msg...) differs from xattr get ($new)"
1584 rm -f $DIR/$tdir/$tfile
1587 wait_clients_import_state ${CLIENTS:-$HOSTNAME} "mgs" FULL
1589 run_test 58b "test replay of setxattr op"
1591 test_58c() { # bug 16570
1596 trap cleanup_58 EXIT
1598 if large_xattr_enabled; then
1599 local xattr_size=$(max_xattr_size)
1600 orig="$(generate_string $((xattr_size / 2)))"
1601 orig1="$(generate_string $xattr_size)"
1607 # PING_INTERVAL max(obd_timeout / 4, 1U)
1608 sleep $((TIMEOUT / 4))
1610 # Original extended attribute can be long. Print a small version of
1611 # attribute if an error occurs
1612 local sm_msg=$(printf "%.9s" $orig)
1613 local sm_msg1=$(printf "%.9s" $orig1)
1615 mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
1616 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1617 touch $DIR/$tdir/$tfile || error "touch $DIR/$tdir/$tfile failed"
1618 drop_request "setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile" ||
1619 error "drop_request for setfattr failed"
1620 new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile)
1621 [[ "$new" = "$orig" ]] ||
1622 error "xattr set ($sm_msg...) differs from xattr get ($new)"
1623 drop_reint_reply "setfattr -n trusted.foo1 \
1624 -v $orig1 $DIR/$tdir/$tfile" ||
1625 error "drop_reint_reply for setfattr failed"
1626 new=$(get_xattr_value trusted.foo1 $MOUNT2/$tdir/$tfile)
1627 [[ "$new" = "$orig1" ]] ||
1628 error "second xattr set ($sm_msg1...) differs xattr get ($new)"
1629 rm -f $DIR/$tdir/$tfile
1633 run_test 58c "resend/reconstruct setxattr op"
1635 # log_commit_thread vs filter_destroy race used to lead to import use after free
1638 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1640 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1641 createmany -o $DIR/$tdir/$tfile-%d 200 ||
1642 error "createmany create files failed"
1644 unlinkmany $DIR/$tdir/$tfile-%d 200 ||
1645 error "unlinkmany $DIR/$tdir/$tfile failed"
1646 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1647 do_facet ost1 "lctl set_param fail_loc=0x507"
1650 do_facet ost1 "lctl set_param fail_loc=0x0"
1654 run_test 59 "test log_commit_thread vs filter_destroy race"
1656 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1657 # bug 12086: should no oops and No ctxt error for this test
1659 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1660 createmany -o $DIR/$tdir/$tfile-%d 200 ||
1661 error "createmany create files failed"
1662 replay_barrier $SINGLEMDS
1663 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1665 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1666 local no_ctxt=$(dmesg | grep "No ctxt")
1667 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1669 run_test 60 "test llog post recovery init vs llog unlink"
1671 #test race llog recovery thread vs llog cleanup
1672 test_61a() { # was test_61
1673 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1675 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1676 createmany -o $DIR/$tdir/$tfile-%d 800 ||
1677 error "createmany create files failed"
1679 unlinkmany $DIR/$tdir/$tfile-%d 800
1680 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1681 set_nodes_failloc "$(osts_nodes)" 0x80000221
1686 set_nodes_failloc "$(osts_nodes)" 0x0
1688 $CHECKSTAT -t file $DIR/$tdir/$tfile-* &&
1689 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check should fail"
1692 run_test 61a "test race llog recovery vs llog cleanup"
1694 #test race mds llog sync vs llog cleanup
1696 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
1697 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a"
1698 facet_failover $SINGLEMDS
1701 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 ||
1704 run_test 61b "test race mds llog sync vs llog cleanup"
1706 #test race cancel cookie cb vs llog cleanup
1708 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1710 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1711 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1712 set_nodes_failloc "$(osts_nodes)" 0x80000222
1716 set_nodes_failloc "$(osts_nodes)" 0x0
1718 run_test 61c "test race mds llog sync vs llog cleanup"
1720 test_61d() { # bug 16002 # bug 17466 # bug 22137
1721 # OBD_FAIL_OBD_LLOG_SETUP 0x605
1723 do_facet mgs "lctl set_param fail_loc=0x80000605"
1724 start mgs $(mgsdevname) $MGS_MOUNT_OPTS &&
1725 error "mgs start should have failed"
1726 do_facet mgs "lctl set_param fail_loc=0"
1727 start mgs $(mgsdevname) $MGS_MOUNT_OPTS || error "cannot restart mgs"
1729 run_test 61d "error in llog_setup should cleanup the llog context correctly"
1731 test_62() { # Bug 15756 - don't mis-drop resent replay
1732 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1733 replay_barrier $SINGLEMDS
1734 createmany -o $DIR/$tdir/$tfile- 25 ||
1735 error "createmany create files failed"
1736 #define OBD_FAIL_TGT_REPLAY_DROP 0x707
1737 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1739 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1740 unlinkmany $DIR/$tdir/$tfile- 25 ||
1741 error "unlinkmany $DIR/$tdir/$tfile failed"
1744 run_test 62 "don't mis-drop resent replay"
1746 #Adaptive Timeouts (bug 3055)
1754 echo "Cleaning up AT ..."
1755 if [ -n "$ATOLDBASE" ]; then
1756 local at_history=$($LCTL get_param -n at_history)
1757 do_facet $SINGLEMDS "lctl set_param at_history=$at_history" || true
1758 do_facet ost1 "lctl set_param at_history=$at_history" || true
1761 if [ $AT_MAX_SET -ne 0 ]; then
1762 for facet in mds client ost; do
1763 var=AT_MAX_SAVE_${facet}
1764 echo restore AT on $facet to saved value ${!var}
1765 at_max_set ${!var} $facet
1766 at_new=$(at_max_get $facet)
1767 echo Restored AT value on $facet $at_new
1768 [ $at_new -eq ${!var} ] ||
1769 error "AT value not restored SAVED ${!var} NEW $at_new"
1776 local at_max_new=600
1778 # Save at_max original values
1780 if [ $AT_MAX_SET -eq 0 ]; then
1781 # Suppose that all osts have the same at_max
1782 for facet in mds client ost; do
1783 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1787 for facet in mds client ost; do
1788 at_max=$(at_max_get $facet)
1789 if [ $at_max -ne $at_max_new ]; then
1790 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1791 at_max_set $at_max_new $facet
1796 if [ -z "$ATOLDBASE" ]; then
1797 ATOLDBASE=$(do_facet $SINGLEMDS "lctl get_param -n at_history")
1798 # speed up the timebase so we can check decreasing AT
1799 do_facet $SINGLEMDS "lctl set_param at_history=8" || true
1800 do_facet ost1 "lctl set_param at_history=8" || true
1802 # sleep for a while to cool down, should be > 8s and also allow
1803 # at least one ping to be sent. simply use TIMEOUT to be safe.
1808 test_65a() #bug 3055
1810 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1812 at_start || return 0
1813 $LCTL dk > /dev/null
1815 $LCTL set_param debug="other"
1816 # Slow down a request to the current service time, this is critical
1817 # because previous tests may have caused this value to increase.
1818 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1819 awk '/portal 12/ {print $5}'`
1820 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1822 do_facet $SINGLEMDS lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1823 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1824 do_facet $SINGLEMDS $LCTL set_param fail_loc=0x8000050a
1825 createmany -o $DIR/$tfile 10 > /dev/null
1826 unlinkmany $DIR/$tfile 10 > /dev/null
1827 # check for log message
1828 $LCTL dk | grep "Early reply #" || error "No early reply"
1830 # client should show REQ_DELAY estimates
1831 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1833 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1835 run_test 65a "AT: verify early replies"
1837 test_65b() #bug 3055
1839 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1841 at_start || return 0
1844 $LCTL set_param debug="other trace"
1845 $LCTL dk > /dev/null
1846 # Slow down a request to the current service time, this is critical
1847 # because previous tests may have caused this value to increase.
1848 $SETSTRIPE --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
1849 error "$SETSTRIPE failed for $DIR/$tfile"
1851 multiop $DIR/$tfile Ow1yc
1852 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1853 awk '/portal 6/ {print $5}'`
1854 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1856 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1857 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1858 do_facet ost1 $LCTL set_param fail_loc=0x224
1861 $SETSTRIPE --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
1862 error "$SETSTRIPE failed"
1863 # force some real bulk transfer
1864 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1866 do_facet ost1 $LCTL set_param fail_loc=0
1867 # check for log message
1868 $LCTL dk | grep "Early reply #" || error "No early reply"
1870 # client should show REQ_DELAY estimates
1871 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1873 run_test 65b "AT: verify early replies on packed reply / bulk"
1875 test_66a() #bug 3055
1877 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1879 at_start || return 0
1880 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1881 # adjust 5s at a time so no early reply is sent (within deadline)
1882 do_facet $SINGLEMDS "$LCTL set_param fail_val=5000"
1883 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1884 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a"
1885 createmany -o $DIR/$tfile 20 > /dev/null
1886 unlinkmany $DIR/$tfile 20 > /dev/null
1887 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1888 do_facet $SINGLEMDS "$LCTL set_param fail_val=10000"
1889 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a"
1890 createmany -o $DIR/$tfile 20 > /dev/null
1891 unlinkmany $DIR/$tfile 20 > /dev/null
1892 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1893 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0"
1895 createmany -o $DIR/$tfile 20 > /dev/null
1896 unlinkmany $DIR/$tfile 20 > /dev/null
1897 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1898 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1899 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1900 echo "Current MDT timeout $CUR, worst $WORST"
1901 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1903 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1905 test_66b() #bug 3055
1907 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1909 at_start || return 0
1910 ORIG=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1911 awk '/network/ {print $4}')
1912 $LCTL set_param fail_val=$(($ORIG + 5))
1913 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1914 $LCTL set_param fail_loc=0x50c
1915 ls $DIR/$tfile > /dev/null 2>&1
1916 $LCTL set_param fail_loc=0
1917 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1918 awk '/network/ {print $4}')
1919 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1920 awk '/network/ {print $6}')
1921 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1922 [ $WORST -gt $ORIG ] ||
1923 error "Worst $WORST should be worse than orig $ORIG"
1925 run_test 66b "AT: verify net latency adjusts"
1927 test_67a() #bug 3055
1929 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1931 at_start || return 0
1932 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1933 # sleeping threads may drive values above this
1934 do_facet ost1 "$LCTL set_param fail_val=400"
1935 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1936 do_facet ost1 "$LCTL set_param fail_loc=0x50a"
1937 createmany -o $DIR/$tfile 20 > /dev/null
1938 unlinkmany $DIR/$tfile 20 > /dev/null
1939 do_facet ost1 "$LCTL set_param fail_loc=0"
1940 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1941 ATTEMPTS=$(($CONN2 - $CONN1))
1942 echo "$ATTEMPTS osc reconnect attempts on gradual slow"
1943 [ $ATTEMPTS -gt 0 ] &&
1944 error_ignore bz13721 "AT should have prevented reconnect"
1947 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1949 test_67b() #bug 3055
1951 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1953 at_start || return 0
1954 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1956 # exhaust precreations on ost1
1957 local OST=$(ostname_from_index 0)
1958 local mdtosc=$(get_mdtosc_proc_path mds $OST)
1959 local last_id=$(do_facet $SINGLEMDS lctl get_param -n \
1960 osc.$mdtosc.prealloc_last_id)
1961 local next_id=$(do_facet $SINGLEMDS lctl get_param -n \
1962 osc.$mdtosc.prealloc_next_id)
1964 mkdir -p $DIR/$tdir/${OST} || error "mkdir $DIR/$tdir/${OST} failed"
1965 $SETSTRIPE -i 0 -c 1 $DIR/$tdir/${OST} || error "$SETSTRIPE failed"
1966 echo "Creating to objid $last_id on ost $OST..."
1967 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1968 do_facet ost1 "$LCTL set_param fail_val=20000"
1969 do_facet ost1 "$LCTL set_param fail_loc=0x80000223"
1970 createmany -o $DIR/$tdir/${OST}/f $next_id $((last_id - next_id + 2))
1973 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1975 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1976 ATTEMPTS=$(($CONN2 - $CONN1))
1977 echo "$ATTEMPTS osc reconnect attempts on instant slow"
1978 # do it again; should not timeout
1979 do_facet ost1 "$LCTL set_param fail_loc=0x80000223"
1980 cp /etc/profile $DIR/$tfile || error "cp failed"
1981 do_facet ost1 "$LCTL set_param fail_loc=0"
1983 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1984 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1985 ATTEMPTS=$(($CONN3 - $CONN2))
1986 echo "$ATTEMPTS osc reconnect attempts on 2nd slow"
1987 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
1990 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
1992 test_68 () #bug 13813
1994 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1996 at_start || return 0
1997 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
1998 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
1999 local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min")
2000 [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0
2001 local ENQ_MIN=$(cat $ldlm_enqueue_min)
2002 local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r")
2003 echo $TIMEOUT >> $ldlm_enqueue_min
2004 do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
2006 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2007 $SETSTRIPE --stripe-index=0 -c 1 $DIR/$tdir ||
2008 error "$SETSTRIPE failed for $DIR/$tdir"
2009 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
2010 $LCTL set_param fail_val=$(($TIMEOUT - 1))
2011 $LCTL set_param fail_loc=0x80000312
2012 cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
2013 $LCTL set_param fail_val=$((TIMEOUT * 5 / 4))
2014 $LCTL set_param fail_loc=0x80000312
2015 cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
2016 $LCTL set_param fail_loc=0
2018 echo $ENQ_MIN >> $ldlm_enqueue_min
2019 do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
2023 run_test 68 "AT: verify slowing locks"
2026 # end of AT tests includes above lines
2028 # start multi-client tests
2030 [ -z "$CLIENTS" ] &&
2031 { skip "Need two or more clients." && return; }
2032 [ $CLIENTCOUNT -lt 2 ] &&
2033 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
2035 echo "mount clients $CLIENTS ..."
2036 zconf_mount_clients $CLIENTS $MOUNT
2038 local clients=${CLIENTS//,/ }
2039 echo "Write/read files on $DIR ; clients $CLIENTS ... "
2040 for CLIENT in $clients; do
2041 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
2042 of=$DIR/${tfile}_${CLIENT} 2>/dev/null ||
2043 error "dd failed on $CLIENT"
2046 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
2047 for C in ${CLIENTS//,/ }; do
2048 do_node $prev_client dd if=$DIR/${tfile}_${C} \
2049 of=/dev/null 2>/dev/null ||
2050 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
2056 run_test 70a "check multi client t-f"
2058 check_for_process () {
2063 killall_process $clients "$prog" -0
2067 local clients=${CLIENTS:-$HOSTNAME}
2069 zconf_mount_clients $clients $MOUNT
2072 [ "$SLOW" = "no" ] && duration=120
2073 # set duration to 900 because it takes some time to boot node
2074 [ "$FAILURE_MODE" = HARD ] && duration=900
2077 local start_ts=$(date +%s)
2078 local cmd="rundbench 1 -t $duration"
2080 if [ $MDSCOUNT -ge 2 ]; then
2081 test_mkdir -p -c$MDSCOUNT $DIR/$tdir
2082 $LFS setdirstripe -D -c$MDSCOUNT $DIR/$tdir
2084 do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
2085 PATH=\$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
2086 DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
2087 MOUNT=$MOUNT DIR=$DIR/$tdir/\\\$(hostname) LCTL=$LCTL $cmd" &
2090 #LU-1897 wait for all dbench copies to start
2091 while ! check_for_process $clients dbench; do
2092 elapsed=$(($(date +%s) - start_ts))
2093 if [ $elapsed -gt $duration ]; then
2094 killall_process $clients dbench
2095 error "dbench failed to start on $clients!"
2100 log "Started rundbench load pid=$pid ..."
2102 elapsed=$(($(date +%s) - start_ts))
2103 local num_failovers=0
2105 while [ $elapsed -lt $duration ]; do
2106 if ! check_for_process $clients dbench; then
2107 error_noexit "dbench stopped on some of $clients!"
2108 killall_process $clients dbench
2112 replay_barrier mds$fail_index
2113 sleep 1 # give clients a time to do operations
2114 # Increment the number of failovers
2115 num_failovers=$((num_failovers+1))
2116 log "$TESTNAME fail mds$fail_index $num_failovers times"
2118 elapsed=$(($(date +%s) - start_ts))
2119 if [ $fail_index -ge $MDSCOUNT ]; then
2122 fail_index=$((fail_index+1))
2126 wait $pid || error "rundbench load on $clients failed!"
2128 run_test 70b "dbench ${MDSCOUNT}mdts recovery; $CLIENTCOUNT clients"
2129 # end multi-client tests
2134 local monitor_pid=$3
2136 local start_ts=$(date +%s)
2137 local num_failovers=0
2140 elapsed=$(($(date +%s) - start_ts))
2141 while [ $elapsed -lt $duration ]; do
2142 fail_index=$((RANDOM%max_index+1))
2143 kill -0 $monitor_pid ||
2144 error "$monitor_pid stopped"
2146 replay_barrier mds$fail_index
2148 # Increment the number of failovers
2149 num_failovers=$((num_failovers+1))
2150 log "$TESTNAME fail mds$fail_index $num_failovers times"
2152 elapsed=$(($(date +%s) - start_ts))
2158 rm -f $DIR/replay-single.70c.lck
2163 local clients=${CLIENTS:-$HOSTNAME}
2166 zconf_mount_clients $clients $MOUNT
2169 [ "$SLOW" = "no" ] && duration=180
2170 # set duration to 900 because it takes some time to boot node
2171 [ "$FAILURE_MODE" = HARD ] && duration=600
2174 local start_ts=$(date +%s)
2176 trap cleanup_70c EXIT
2178 while [ ! -e $DIR/replay-single.70c.lck ]; do
2179 test_mkdir -p -c$MDSCOUNT $DIR/$tdir || break
2180 if [ $MDSCOUNT -ge 2 ]; then
2181 $LFS setdirstripe -D -c$MDSCOUNT $DIR/$tdir ||
2182 error "set default dirstripe failed"
2184 cd $DIR/$tdir || break
2185 tar cf - /etc | tar xf - || error "tar failed in loop"
2189 echo "Started tar $tar_70c_pid"
2191 random_fail_mdt $MDSCOUNT $duration $tar_70c_pid
2192 kill -0 $tar_70c_pid || error "tar $tar_70c_pid stopped"
2194 touch $DIR/replay-single.70c.lck
2195 wait $tar_70c_pid || error "$?: tar failed"
2200 run_test 70c "tar ${MDSCOUNT}mdts recovery"
2204 kill -9 $mkdir_70d_pid
2208 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2209 local clients=${CLIENTS:-$HOSTNAME}
2212 zconf_mount_clients $clients $MOUNT
2215 [ "$SLOW" = "no" ] && duration=180
2216 # set duration to 900 because it takes some time to boot node
2217 [ "$FAILURE_MODE" = HARD ] && duration=900
2222 local start_ts=$(date +%s)
2224 trap cleanup_70d EXIT
2227 $LFS mkdir -i0 -c2 $DIR/$tdir/test || {
2231 $LFS mkdir -i1 -c2 $DIR/$tdir/test1 || {
2236 touch $DIR/$tdir/test/a || {
2240 mkdir $DIR/$tdir/test/b || {
2244 rm -rf $DIR/$tdir/test || {
2249 touch $DIR/$tdir/test1/a || {
2253 mkdir $DIR/$tdir/test1/b || {
2258 rm -rf $DIR/$tdir/test1 || {
2265 echo "Started $mkdir_70d_pid"
2267 random_fail_mdt $MDSCOUNT $duration $mkdir_70d_pid
2268 kill -0 $mkdir_70d_pid || error "mkdir/rmdir $mkdir_70d_pid stopped"
2273 run_test 70d "mkdir/rmdir striped dir ${MDSCOUNT}mdts recovery"
2277 kill -9 $rename_70e_pid
2281 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2282 local clients=${CLIENTS:-$HOSTNAME}
2285 echo ha > /proc/sys/lnet/debug
2286 zconf_mount_clients $clients $MOUNT
2289 [ "$SLOW" = "no" ] && duration=180
2290 # set duration to 900 because it takes some time to boot node
2291 [ "$FAILURE_MODE" = HARD ] && duration=900
2294 $LFS mkdir -i0 $DIR/$tdir/test_0
2295 $LFS mkdir -i0 $DIR/$tdir/test_1
2296 touch $DIR/$tdir/test_0/a
2297 touch $DIR/$tdir/test_1/b
2298 trap cleanup_70e EXIT
2301 mrename $DIR/$tdir/test_0/a $DIR/$tdir/test_1/b > \
2307 checkstat $DIR/$tdir/test_0/a && {
2308 echo "a still exists"
2312 checkstat $DIR/$tdir/test_1/b || {
2313 echo "b still exists"
2317 touch $DIR/$tdir/test_0/a || {
2318 echo "touch a fails"
2322 mrename $DIR/$tdir/test_1/b $DIR/$tdir/test_0/a > \
2330 echo "Started $rename_70e_pid"
2332 random_fail_mdt 2 $duration $rename_70e_pid
2333 kill -0 $rename_70e_pid || error "rename $rename_70e_pid stopped"
2338 run_test 70e "rename cross-MDT with random fails"
2340 test_70f_write_and_read(){
2345 echo "Write/read files in: '$DIR/$tdir', clients: '$CLIENTS' ..."
2346 for client in ${CLIENTS//,/ }; do
2347 [ -f $stopflag ] || return
2349 local tgtfile=$DIR/$tdir/$tfile.$client
2350 do_node $client dd $DD_OPTS bs=1M count=10 if=$srcfile \
2351 of=$tgtfile 2>/dev/null ||
2352 error "dd $DD_OPTS bs=1M count=10 if=$srcfile " \
2353 "of=$tgtfile failed on $client, rc=$?"
2356 local prev_client=$(echo ${CLIENTS//,/ } | awk '{ print $NF }')
2359 for client in ${CLIENTS//,/ }; do
2360 [ -f $stopflag ] || return
2362 # flush client cache in case test is running on only one client
2363 # do_node $client cancel_lru_locks osc
2364 do_node $client $LCTL set_param ldlm.namespaces.*.lru_size=clear
2366 tgtfile=$DIR/$tdir/$tfile.$client
2367 local md5=$(do_node $prev_client "md5sum $tgtfile")
2368 [ ${checksum[$index]// */} = ${md5// */} ] ||
2369 error "$tgtfile: checksum doesn't match on $prev_client"
2370 index=$((index + 1))
2380 mkdir -p $DIR/$tdir || error "cannot create $DIR/$tdir directory"
2381 $SETSTRIPE -c -1 $DIR/$tdir || error "cannot $SETSTRIPE $DIR/$tdir"
2384 while [ -f $stopflag ]; do
2385 test_70f_write_and_read $srcfile $stopflag
2386 # use direct IO and buffer cache in turns if loop
2387 [ -n "$DD_OPTS" ] && DD_OPTS="" || DD_OPTS="oflag=direct"
2391 test_70f_cleanup() {
2393 rm -f $TMP/$tfile.stop
2394 do_nodes $CLIENTS rm -f $TMP/$tfile
2395 rm -f $DIR/$tdir/$tfile.*
2399 # [ x$ost1failover_HOST = x$ost_HOST ] &&
2400 # { skip "Failover host not defined" && return; }
2401 # [ -z "$CLIENTS" ] &&
2402 # { skip "CLIENTS are not specified." && return; }
2403 # [ $CLIENTCOUNT -lt 2 ] &&
2404 # { skip "Need 2 or more clients, have $CLIENTCOUNT" && return; }
2406 [[ $(lustre_version_code ost1) -lt $(version_code 2.9.53) ]] &&
2407 skip "Need server version at least 2.9.53" && return
2409 echo "mount clients $CLIENTS ..."
2410 zconf_mount_clients $CLIENTS $MOUNT
2412 local srcfile=$TMP/$tfile
2416 trap test_70f_cleanup EXIT
2417 # create a different source file local to each client node so we can
2418 # detect if the file wasn't written out properly after failover
2419 do_nodes $CLIENTS dd bs=1M count=10 if=/dev/urandom of=$srcfile \
2420 2>/dev/null || error "can't create $srcfile on $CLIENTS"
2421 for client in ${CLIENTS//,/ }; do
2422 checksum[$index]=$(do_node $client "md5sum $srcfile")
2423 index=$((index + 1))
2427 [ "$SLOW" = "no" ] && duration=60
2428 # set duration to 900 because it takes some time to boot node
2429 [ "$FAILURE_MODE" = HARD ] && duration=900
2431 local stopflag=$TMP/$tfile.stop
2432 test_70f_loop $srcfile $stopflag &
2436 local num_failovers=0
2437 local start_ts=$SECONDS
2438 while [ $elapsed -lt $duration ]; do
2442 num_failovers=$((num_failovers + 1))
2443 log "$TESTNAME failing OST $num_failovers times"
2446 elapsed=$((SECONDS - start_ts))
2453 run_test 70f "OSS O_DIRECT recovery with $CLIENTCOUNT clients"
2457 kill -9 $mkdir_71a_pid
2460 random_double_fail_mdt() {
2463 local monitor_pid=$3
2465 local start_ts=$(date +%s)
2466 local num_failovers=0
2470 elapsed=$(($(date +%s) - start_ts))
2471 while [ $elapsed -lt $duration ]; do
2472 fail_index=$((RANDOM%max_index + 1))
2473 if [ $fail_index -eq $max_index ]; then
2476 second_index=$((fail_index + 1))
2478 kill -0 $monitor_pid ||
2479 error "$monitor_pid stopped"
2481 replay_barrier mds$fail_index
2482 replay_barrier mds$second_index
2484 # Increment the number of failovers
2485 num_failovers=$((num_failovers+1))
2486 log "fail mds$fail_index mds$second_index $num_failovers times"
2487 fail mds${fail_index},mds${second_index}
2488 elapsed=$(($(date +%s) - start_ts))
2493 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2494 local clients=${CLIENTS:-$HOSTNAME}
2497 zconf_mount_clients $clients $MOUNT
2500 [ "$SLOW" = "no" ] && duration=180
2501 # set duration to 900 because it takes some time to boot node
2502 [ "$FAILURE_MODE" = HARD ] && duration=900
2507 local start_ts=$(date +%s)
2509 trap cleanup_71a EXIT
2512 $LFS mkdir -i0 -c2 $DIR/$tdir/test
2513 rmdir $DIR/$tdir/test
2517 echo "Started $mkdir_71a_pid"
2519 random_double_fail_mdt 2 $duration $mkdir_71a_pid
2520 kill -0 $mkdir_71a_pid || error "mkdir/rmdir $mkdir_71a_pid stopped"
2525 run_test 71a "mkdir/rmdir striped dir with 2 mdts recovery"
2528 multiop_bg_pause $DIR/$tfile O_tSc ||
2529 error "multiop_bg_pause $DIR/$tfile failed"
2533 replay_barrier $SINGLEMDS
2534 #define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
2535 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
2538 wait $pid || error "multiop pid failed"
2539 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
2542 run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay, close"
2545 multiop_bg_pause $DIR/$tfile O_tSc ||
2546 error "multiop_bg_pause $DIR/$tfile failed"
2550 replay_barrier $SINGLEMDS
2551 #define OBD_FAIL_LDLM_REPLY 0x30c
2552 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
2555 wait $pid || error "multiop pid failed"
2556 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
2559 run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close"
2563 local clients=${CLIENTS:-$HOSTNAME}
2565 zconf_umount_clients $clients $MOUNT
2567 facet_failover $SINGLEMDS
2568 zconf_mount_clients $clients $MOUNT
2570 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
2571 rm $DIR/$tfile || error "rm $DIR/$tfile failed"
2572 clients_up || error "client evicted: $?"
2575 run_test 74 "Ensure applications don't fail waiting for OST recovery"
2577 remote_dir_check_80() {
2582 diridx=$($GETSTRIPE -M $remote_dir) ||
2583 error "$GETSTRIPE -M $remote_dir failed"
2584 [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
2586 createmany -o $remote_dir/f-%d 20 || error "creation failed"
2587 fileidx=$($GETSTRIPE -M $remote_dir/f-1) ||
2588 error "$GETSTRIPE -M $remote_dir/f-1 failed"
2589 [ $fileidx -eq $MDTIDX ] || error "$fileidx != $MDTIDX"
2595 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2596 ([ $FAILURE_MODE == "HARD" ] &&
2597 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2598 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2602 local remote_dir=$DIR/$tdir/remote_dir
2604 mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2605 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2606 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2607 $LFS mkdir -i $MDTIDX $remote_dir &
2613 wait $CLIENT_PID || error "remote creation failed"
2615 remote_dir_check_80 || error "remote dir check failed"
2616 rm -rf $DIR/$tdir || error "rmdir failed"
2620 run_test 80a "DNE: create remote dir, drop update rep from MDT0, fail MDT0"
2623 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2624 ([ $FAILURE_MODE == "HARD" ] &&
2625 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2626 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2630 local remote_dir=$DIR/$tdir/remote_dir
2632 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2633 #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
2634 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2635 $LFS mkdir -i $MDTIDX $remote_dir &
2640 fail mds$((MDTIDX + 1))
2642 wait $CLIENT_PID || error "remote creation failed"
2644 remote_dir_check_80 || error "remote dir check failed"
2645 rm -rf $DIR/$tdir || error "rmdir failed"
2649 run_test 80b "DNE: create remote dir, drop update rep from MDT0, fail MDT1"
2652 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2653 ([ $FAILURE_MODE == "HARD" ] &&
2654 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2655 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2659 local remote_dir=$DIR/$tdir/remote_dir
2661 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2662 #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
2663 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2664 $LFS mkdir -i $MDTIDX $remote_dir &
2670 fail mds$((MDTIDX + 1))
2672 wait $CLIENT_PID || error "remote creation failed"
2674 remote_dir_check_80 || error "remote dir check failed"
2675 rm -rf $DIR/$tdir || error "rmdir failed"
2679 run_test 80c "DNE: create remote dir, drop update rep from MDT1, fail MDT[0,1]"
2682 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2684 local remote_dir=$DIR/$tdir/remote_dir
2686 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2687 #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
2688 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2689 $LFS mkdir -i $MDTIDX $remote_dir &
2692 # sleep 3 seconds to make sure MDTs are failed after
2693 # lfs mkdir -i has finished on all of MDTs.
2698 fail mds${MDTIDX},mds$((MDTIDX + 1))
2700 wait $CLIENT_PID || error "remote creation failed"
2702 remote_dir_check_80 || error "remote dir check failed"
2703 rm -rf $DIR/$tdir || error "rmdir failed"
2707 run_test 80d "DNE: create remote dir, drop update rep from MDT1, fail 2 MDTs"
2710 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2711 ([ $FAILURE_MODE == "HARD" ] &&
2712 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2713 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2717 local remote_dir=$DIR/$tdir/remote_dir
2719 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2720 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2721 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2722 $LFS mkdir -i $MDTIDX $remote_dir &
2725 # sleep 3 seconds to make sure MDTs are failed after
2726 # lfs mkdir -i has finished on all of MDTs.
2732 wait $CLIENT_PID || error "remote creation failed"
2734 remote_dir_check_80 || error "remote dir check failed"
2735 rm -rf $DIR/$tdir || error "rmdir failed"
2739 run_test 80e "DNE: create remote dir, drop MDT1 rep, fail MDT0"
2742 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2743 ([ $FAILURE_MODE == "HARD" ] &&
2744 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2745 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2748 local remote_dir=$DIR/$tdir/remote_dir
2750 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2751 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2752 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2753 $LFS mkdir -i $MDTIDX $remote_dir &
2757 fail mds$((MDTIDX + 1))
2759 wait $CLIENT_PID || error "remote creation failed"
2761 remote_dir_check_80 || error "remote dir check failed"
2762 rm -rf $DIR/$tdir || error "rmdir failed"
2766 run_test 80f "DNE: create remote dir, drop MDT1 rep, fail MDT1"
2769 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2770 ([ $FAILURE_MODE == "HARD" ] &&
2771 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2772 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2776 local remote_dir=$DIR/$tdir/remote_dir
2778 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2779 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2780 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2781 $LFS mkdir -i $MDTIDX $remote_dir &
2784 # sleep 3 seconds to make sure MDTs are failed after
2785 # lfs mkdir -i has finished on all of MDTs.
2791 fail mds$((MDTIDX + 1))
2793 wait $CLIENT_PID || error "remote creation failed"
2795 remote_dir_check_80 || error "remote dir check failed"
2796 rm -rf $DIR/$tdir || error "rmdir failed"
2800 run_test 80g "DNE: create remote dir, drop MDT1 rep, fail MDT0, then MDT1"
2803 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2805 local remote_dir=$DIR/$tdir/remote_dir
2807 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2808 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2809 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2810 $LFS mkdir -i $MDTIDX $remote_dir &
2813 # sleep 3 seconds to make sure MDTs are failed after
2814 # lfs mkdir -i has finished on all of MDTs.
2819 fail mds${MDTIDX},mds$((MDTIDX + 1))
2821 wait $CLIENT_PID || error "remote dir creation failed"
2823 remote_dir_check_80 || error "remote dir check failed"
2824 rm -rf $DIR/$tdir || error "rmdir failed"
2828 run_test 80h "DNE: create remote dir, drop MDT1 rep, fail 2 MDTs"
2831 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2832 ([ $FAILURE_MODE == "HARD" ] &&
2833 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2834 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2838 local remote_dir=$DIR/$tdir/remote_dir
2840 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2841 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2843 touch $remote_dir || error "touch $remote_dir failed"
2844 # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
2845 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2850 fail mds$((MDTIDX + 1))
2852 wait $CLIENT_PID || error "rm remote dir failed"
2854 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2856 rm -rf $DIR/$tdir || error "rmdir failed"
2860 run_test 81a "DNE: unlink remote dir, drop MDT0 update rep, fail MDT1"
2863 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2864 ([ $FAILURE_MODE == "HARD" ] &&
2865 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2866 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2869 local remote_dir=$DIR/$tdir/remote_dir
2871 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2872 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2874 # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
2875 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2882 wait $CLIENT_PID || error "rm remote dir failed"
2884 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2886 rm -rf $DIR/$tdir || error "rmdir failed"
2890 run_test 81b "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0"
2893 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2894 ([ $FAILURE_MODE == "HARD" ] &&
2895 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2896 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2900 local remote_dir=$DIR/$tdir/remote_dir
2902 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2903 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2905 # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
2906 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2913 fail mds$((MDTIDX + 1))
2915 wait $CLIENT_PID || error "rm remote dir failed"
2917 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2919 rm -rf $DIR/$tdir || error "rmdir failed"
2923 run_test 81c "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0,MDT1"
2926 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2928 local remote_dir=$DIR/$tdir/remote_dir
2930 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2931 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2933 # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
2934 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2940 fail mds${MDTIDX},mds$((MDTIDX + 1))
2942 wait $CLIENT_PID || error "rm remote dir failed"
2944 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2946 rm -rf $DIR/$tdir || error "rmdir failed"
2950 run_test 81d "DNE: unlink remote dir, drop MDT0 update reply, fail 2 MDTs"
2953 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2954 ([ $FAILURE_MODE == "HARD" ] &&
2955 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2956 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2960 local remote_dir=$DIR/$tdir/remote_dir
2962 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2963 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2965 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2966 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2969 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
2974 wait $CLIENT_PID || error "rm remote dir failed"
2976 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2978 rm -rf $DIR/$tdir || error "rmdir failed"
2982 run_test 81e "DNE: unlink remote dir, drop MDT1 req reply, fail MDT0"
2985 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2986 ([ $FAILURE_MODE == "HARD" ] &&
2987 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2988 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2992 local remote_dir=$DIR/$tdir/remote_dir
2994 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2995 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2997 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2998 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3003 fail mds$((MDTIDX + 1))
3005 wait $CLIENT_PID || error "rm remote dir failed"
3007 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3009 rm -rf $DIR/$tdir || error "rmdir failed"
3013 run_test 81f "DNE: unlink remote dir, drop MDT1 req reply, fail MDT1"
3016 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3017 ([ $FAILURE_MODE == "HARD" ] &&
3018 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3019 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3023 local remote_dir=$DIR/$tdir/remote_dir
3025 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3026 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
3028 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3029 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3036 fail mds$((MDTIDX + 1))
3038 wait $CLIENT_PID || error "rm remote dir failed"
3040 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3042 rm -rf $DIR/$tdir || error "rmdir failed"
3046 run_test 81g "DNE: unlink remote dir, drop req reply, fail M0, then M1"
3049 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3051 local remote_dir=$DIR/$tdir/remote_dir
3053 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3054 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
3056 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3057 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3063 fail mds${MDTIDX},mds$((MDTIDX + 1))
3065 wait $CLIENT_PID || error "rm remote dir failed"
3067 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3069 rm -rf $DIR/$tdir || error "rmdir failed"
3073 run_test 81h "DNE: unlink remote dir, drop request reply, fail 2 MDTs"
3076 #define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
3077 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000144"
3078 createmany -o $DIR/$tfile- 1 &
3082 client_up || client_up || true # reconnect
3084 run_test 84a "stale open during export disconnect"
3086 test_85a() { #bug 16774
3087 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
3089 for i in $(seq 100); do
3090 echo "tag-$i" > $DIR/$tfile-$i
3091 grep -q "tag-$i" $DIR/$tfile-$i || error "f2-$i"
3094 lov_id=$(lctl dl | grep "clilov")
3095 addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
3096 count=$(lctl get_param -n \
3097 ldlm.namespaces.*MDT0000*$addr.lock_unused_count)
3098 echo "before recovery: unused locks count = $count"
3102 count2=$(lctl get_param -n \
3103 ldlm.namespaces.*MDT0000*$addr.lock_unused_count)
3104 echo "after recovery: unused locks count = $count2"
3106 if [ $count2 -ge $count ]; then
3107 error "unused locks are not canceled"
3110 run_test 85a "check the cancellation of unused locks during recovery(IBITS)"
3112 test_85b() { #bug 16774
3113 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
3115 if ! combined_mgs_mds ; then
3119 create_pool $FSNAME.$TESTNAME ||
3120 error "unable to create pool $TESTNAME"
3121 do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $FSNAME-OST0000 ||
3122 error "unable to add pool $TESTNAME"
3124 $SETSTRIPE -c 1 -p $FSNAME.$TESTNAME $DIR
3126 for i in $(seq 100); do
3127 dd if=/dev/urandom of=$DIR/$tfile-$i bs=4096 \
3128 count=32 >/dev/null 2>&1
3131 cancel_lru_locks osc
3133 for i in $(seq 100); do
3134 dd if=$DIR/$tfile-$i of=/dev/null bs=4096 \
3135 count=32 >/dev/null 2>&1
3138 lov_id=$(lctl dl | grep "clilov")
3139 addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
3140 count=$(lctl get_param -n \
3141 ldlm.namespaces.*OST0000*$addr.lock_unused_count)
3142 echo "before recovery: unused locks count = $count"
3143 [ $count -ne 0 ] || error "unused locks ($count) should be zero"
3147 count2=$(lctl get_param \
3148 -n ldlm.namespaces.*OST0000*$addr.lock_unused_count)
3149 echo "after recovery: unused locks count = $count2"
3151 do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $FSNAME-OST0000 ||
3152 error "unable to remove pool $TESTNAME"
3153 do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
3154 error "unable to destroy the pool $TESTNAME"
3156 if ! combined_mgs_mds ; then
3160 if [ $count2 -ge $count ]; then
3161 error "unused locks are not canceled"
3164 run_test 85b "check the cancellation of unused locks during recovery(EXTENT)"
3167 local clients=${CLIENTS:-$HOSTNAME}
3169 zconf_umount_clients $clients $MOUNT
3170 do_facet $SINGLEMDS lctl set_param mdt.${FSNAME}-MDT*.exports.clear=0
3171 remount_facet $SINGLEMDS
3172 zconf_mount_clients $clients $MOUNT
3174 run_test 86 "umount server after clear nid_stats should not hit LBUG"
3177 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
3180 $SETSTRIPE -i 0 -c 1 $DIR/$tfile
3181 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
3182 error "dd to $DIR/$tfile failed"
3183 cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
3184 cancel_lru_locks osc
3186 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
3187 cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
3188 if [ $cksum != $cksum2 ] ; then
3189 error "New checksum $cksum2 does not match original $cksum"
3192 run_test 87a "write replay"
3195 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
3198 $SETSTRIPE -i 0 -c 1 $DIR/$tfile
3199 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
3200 error "dd to $DIR/$tfile failed"
3201 sleep 1 # Give it a chance to flush dirty data
3202 echo TESTTEST | dd of=$DIR/$tfile bs=1 count=8 seek=64
3203 cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
3204 cancel_lru_locks osc
3206 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
3207 cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
3208 if [ $cksum != $cksum2 ] ; then
3209 error "New checksum $cksum2 does not match original $cksum"
3212 run_test 87b "write replay with changed data (checksum resend)"
3214 test_88() { #bug 17485
3215 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3216 mkdir -p $TMP/$tdir || error "mkdir $TMP/$tdir failed"
3218 $SETSTRIPE -i 0 -c 1 $DIR/$tdir || error "$SETSTRIPE"
3221 replay_barrier $SINGLEMDS
3223 # exhaust precreations on ost1
3224 local OST=$(ostname_from_index 0)
3225 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $OST)
3226 local last_id=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id)
3227 local next_id=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id)
3228 echo "before test: last_id = $last_id, next_id = $next_id"
3230 echo "Creating to objid $last_id on ost $OST..."
3231 createmany -o $DIR/$tdir/f-%d $next_id $((last_id - next_id + 2)) ||
3232 error "createmany create files to last_id failed"
3234 #create some files to use some uncommitted objids
3235 last_id=$(($last_id + 1))
3236 createmany -o $DIR/$tdir/f-%d $last_id 8 ||
3237 error "createmany create files with uncommitted objids failed"
3239 last_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id)
3240 next_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id)
3241 echo "before recovery: last_id = $last_id2, next_id = $next_id2"
3243 # if test uses shutdown_facet && reboot_facet instead of facet_failover ()
3244 # it has to take care about the affected facets, bug20407
3245 local affected_mds1=$(affected_facets mds1)
3246 local affected_ost1=$(affected_facets ost1)
3248 shutdown_facet $SINGLEMDS
3251 reboot_facet $SINGLEMDS
3252 change_active $affected_mds1
3253 wait_for_facet $affected_mds1
3254 mount_facets $affected_mds1 || error "Restart of mds failed"
3257 change_active $affected_ost1
3258 wait_for_facet $affected_ost1
3259 mount_facets $affected_ost1 || error "Restart of ost1 failed"
3263 last_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id)
3264 next_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id)
3265 echo "after recovery: last_id = $last_id2, next_id = $next_id2"
3267 # create new files, which should use new objids, and ensure the orphan
3268 # cleanup phase for ost1 is completed at the same time
3269 for i in $(seq 8); do
3270 file_id=$(($last_id + 10 + $i))
3271 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
3274 # if the objids were not recreated, then "ls" will fail with -ENOENT
3275 ls -l $DIR/$tdir/* || error "can't get the status of precreated files"
3278 # write into previously created files
3279 for i in $(seq 8); do
3280 file_id=$(($last_id + $i))
3281 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
3282 cp -f $DIR/$tdir/f-$file_id $TMP/$tdir/
3285 # compare the content
3286 for i in $(seq 8); do
3287 file_id=$(($last_id + $i))
3288 cmp $TMP/$tdir/f-$file_id $DIR/$tdir/f-$file_id ||
3289 error "the content of file is modified!"
3294 run_test 88 "MDS should not assign same objid to different files "
3297 cancel_lru_locks osc
3298 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3299 rm -f $DIR/$tdir/$tfile
3301 wait_delete_completed
3302 BLOCKS1=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
3303 $SETSTRIPE -i 0 -c 1 $DIR/$tdir/$tfile
3304 dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile
3307 facet_failover $SINGLEMDS
3308 rm $DIR/$tdir/$tfile
3311 zconf_mount $(hostname) $MOUNT || error "mount fails"
3312 client_up || error "client_up failed"
3314 wait_delete_completed
3315 BLOCKS2=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
3316 [ $((BLOCKS2 - BLOCKS1)) -le 4 ] ||
3317 error $((BLOCKS2 - BLOCKS1)) blocks leaked
3319 run_test 89 "no disk space leak on late ost connection"
3326 change_active $facet
3327 wait_for_facet $facet
3328 mount_facet $facet || error "Restart of $facet failed"
3332 test_90() { # bug 19494
3333 local dir=$DIR/$tdir
3334 local ostfail=$(get_random_entry $(get_facets OST))
3336 if [[ $FAILURE_MODE = HARD ]]; then
3337 local affected=$(affected_facets $ostfail);
3338 if [[ "$affected" != $ostfail ]]; then
3339 skip not functional with FAILURE_MODE=$FAILURE_MODE, affected: $affected
3343 # ensure all OSTs are active to allow allocations
3346 mkdir $dir || error "mkdir $dir failed"
3348 echo "Create the files"
3350 # file "f${index}" striped over 1 OST
3351 # file "all" striped over all OSTs
3353 $SETSTRIPE -c $OSTCOUNT $dir/all ||
3354 error "setstripe failed to create $dir/all"
3356 for (( i=0; i<$OSTCOUNT; i++ )); do
3358 $SETSTRIPE -i $i -c 1 $f || error "$SETSTRIPE failed to create $f"
3360 # confirm setstripe actually created the stripe on the requested OST
3361 local uuid=$(ostuuid_from_index $i)
3362 for file in f$i all; do
3363 if [[ $dir/$file != $($LFS find --obd $uuid --name $file $dir) ]]; then
3364 $GETSTRIPE $dir/$file
3365 error wrong stripe: $file, uuid: $uuid
3370 # Before failing an OST, get its obd name and index
3371 local varsvc=${ostfail}_svc
3372 local obd=$(do_facet $ostfail lctl get_param \
3373 -n obdfilter.${!varsvc}.uuid)
3374 local index=$(($(facet_number $ostfail) - 1))
3376 echo "Fail $ostfail $obd, display the list of affected files"
3377 shutdown_facet $ostfail || error "shutdown_facet $ostfail failed"
3379 trap "cleanup_90 $ostfail" EXIT INT
3380 echo "General Query: lfs find $dir"
3381 local list=$($LFS find $dir)
3383 for (( i=0; i<$OSTCOUNT; i++ )); do
3384 list_member "$list" $dir/f$i ||
3385 error_noexit "lfs find $dir: no file f$i"
3387 list_member "$list" $dir/all ||
3388 error_noexit "lfs find $dir: no file all"
3390 # focus on the missing OST,
3391 # we expect to see only two files affected: "f$(index)" and "all"
3393 echo "Querying files on shutdown $ostfail: lfs find --obd $obd"
3394 list=$($LFS find --obd $obd $dir)
3396 for file in all f$index; do
3397 list_member "$list" $dir/$file ||
3398 error_noexit "lfs find does not report the affected $obd for $file"
3401 [[ $(echo $list | wc -w) -eq 2 ]] ||
3402 error_noexit "lfs find reports the wrong list of affected files ${#list[@]}"
3404 echo "Check getstripe: $GETSTRIPE -r --obd $obd"
3405 list=$($GETSTRIPE -r --obd $obd $dir)
3407 for file in all f$index; do
3408 echo "$list" | grep $dir/$file ||
3409 error_noexit "lfs getsripe does not report the affected $obd for $file"
3414 run_test 90 "lfs find identifies the missing striped file segments"
3417 local server_version=$(lustre_version_code $SINGLEMDS)
3418 [[ $server_version -ge $(version_code 2.6.90) ]] ||
3419 [[ $server_version -ge $(version_code 2.5.4) &&
3420 $server_version -lt $(version_code 2.5.50) ]] ||
3421 { skip "Need MDS version 2.5.4+ or 2.6.90+"; return; }
3423 cancel_lru_locks osc
3425 $SETSTRIPE -i 0 -c 1 $DIR/$tfile ||
3426 error "$SETSTRIPE $DIR/$tfile failed"
3427 dd if=/dev/zero of=$DIR/$tfile bs=1024 count=1 ||
3428 error "dd to $DIR/$tfile failed"
3429 #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
3430 # We need to emulate a state that OST is waiting for other clients
3431 # not completing the recovery. Final ping is queued, but reply will be
3432 # sent on the recovery completion. It is done by sleep before
3433 # processing final pings
3434 do_facet ost1 "$LCTL set_param fail_val=40"
3435 do_facet ost1 "$LCTL set_param fail_loc=0x715"
3438 run_test 93a "replay + reconnect"
3441 local server_version=$(lustre_version_code $SINGLEMDS)
3442 [[ $server_version -ge $(version_code 2.7.90) ]] ||
3443 { skip "Need MDS version 2.7.90+"; return; }
3445 cancel_lru_locks mdc
3447 createmany -o $DIR/$tfile 20 ||
3448 error "createmany -o $DIR/$tfile failed"
3450 #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
3451 # We need to emulate a state that MDT is waiting for other clients
3452 # not completing the recovery. Final ping is queued, but reply will be
3453 # sent on the recovery completion. It is done by sleep before
3454 # processing final pings
3455 do_facet mds1 "$LCTL set_param fail_val=80"
3456 do_facet mds1 "$LCTL set_param fail_loc=0x715"
3459 run_test 93b "replay + reconnect on mds"
3461 striped_dir_check_100() {
3462 local striped_dir=$DIR/$tdir/striped_dir
3463 local stripe_count=$($LFS getdirstripe -c $striped_dir)
3465 $LFS getdirstripe $striped_dir
3466 [ $stripe_count -eq 2 ] || error "$stripe_count != 2"
3468 createmany -o $striped_dir/f-%d 20 ||
3469 error "creation failed under striped dir"
3473 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3474 ([ $FAILURE_MODE == "HARD" ] &&
3475 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3476 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3479 local striped_dir=$DIR/$tdir/striped_dir
3482 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3484 #To make sure MDT1 and MDT0 are connected
3485 #otherwise it may create single stripe dir here
3486 $LFS setdirstripe -i1 $DIR/$tdir/remote_dir
3488 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
3489 do_facet mds$((MDTIDX+1)) lctl set_param fail_loc=0x1701
3490 $LFS setdirstripe -i0 -c2 $striped_dir &
3493 fail mds$((MDTIDX + 1))
3495 wait $CLIENT_PID || error "striped dir creation failed"
3497 striped_dir_check_100 || error "striped dir check failed"
3498 rm -rf $DIR/$tdir || error "rmdir failed"
3500 run_test 100a "DNE: create striped dir, drop update rep from MDT1, fail MDT1"
3503 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3504 ([ $FAILURE_MODE == "HARD" ] &&
3505 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3506 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3509 local striped_dir=$DIR/$tdir/striped_dir
3512 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3514 #To make sure MDT1 and MDT0 are connected
3515 #otherwise it may create single stripe dir here
3516 $LFS setdirstripe -i1 $DIR/$tdir/remote_dir
3518 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3519 do_facet mds$MDTIDX lctl set_param fail_loc=0x119
3520 $LFS mkdir -i0 -c2 $striped_dir &
3525 wait $CLIENT_PID || error "striped dir creation failed"
3527 striped_dir_check_100 || error "striped dir check failed"
3528 rm -rf $DIR/$tdir || error "rmdir failed"
3530 run_test 100b "DNE: create striped dir, fail MDT0"
3532 test_101() { #LU-5648
3533 mkdir -p $DIR/$tdir/d1
3534 mkdir -p $DIR/$tdir/d2
3535 touch $DIR/$tdir/file0
3538 replay_barrier $SINGLEMDS
3539 for i in $(seq $num) ; do
3540 echo test$i > $DIR/$tdir/d1/file$i
3543 fail_abort $SINGLEMDS
3544 for i in $(seq $num) ; do
3545 touch $DIR/$tdir/d2/file$i
3546 test -s $DIR/$tdir/d2/file$i &&
3547 ls -al $DIR/$tdir/d2/file$i && error "file$i's size > 0"
3552 run_test 101 "Shouldn't reassign precreated objs to other files after recovery"
3561 [[ $(lctl get_param mdc.*.import |
3562 grep "connect_flags:.*multi_mod_rpc") ]] ||
3563 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3565 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3566 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3567 facet="mds$((0x$idx + 1))"
3569 # get current value of max_mod_rcps_in_flight
3570 num=$($LCTL get_param -n \
3571 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3572 # set default value if client does not support multi mod RPCs
3573 [ -z "$num" ] && num=1
3575 echo "creating $num files ..."
3577 for i in $(seq $num); do
3578 touch $DIR/$tdir/file-$i
3581 # drop request on MDT to force resend
3582 #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
3583 do_facet $facet "$LCTL set_param fail_loc=0x159"
3584 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3585 for i in $(seq $num); do
3586 chmod 0600 $DIR/$tdir/file-$i &
3590 do_facet $facet "$LCTL set_param fail_loc=0"
3591 for pid in $pids; do
3592 wait $pid || error "chmod failed"
3594 echo "done ($(date +%H:%M:%S))"
3596 # check chmod succeed
3597 for i in $(seq $num); do
3598 checkstat -vp 0600 $DIR/$tdir/file-$i
3603 run_test 102a "check resend (request lost) with multiple modify RPCs in flight"
3612 [[ $(lctl get_param mdc.*.import |
3613 grep "connect_flags:.*multi_mod_rpc") ]] ||
3614 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3616 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3617 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3618 facet="mds$((0x$idx + 1))"
3620 # get current value of max_mod_rcps_in_flight
3621 num=$($LCTL get_param -n \
3622 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3623 # set default value if client does not support multi mod RPCs
3624 [ -z "$num" ] && num=1
3626 echo "creating $num files ..."
3628 for i in $(seq $num); do
3629 touch $DIR/$tdir/file-$i
3632 # drop reply on MDT to force reconstruction
3633 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3634 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3635 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3636 for i in $(seq $num); do
3637 chmod 0600 $DIR/$tdir/file-$i &
3641 do_facet $facet "$LCTL set_param fail_loc=0"
3642 for pid in $pids; do
3643 wait $pid || error "chmod failed"
3645 echo "done ($(date +%H:%M:%S))"
3647 # check chmod succeed
3648 for i in $(seq $num); do
3649 checkstat -vp 0600 $DIR/$tdir/file-$i
3654 run_test 102b "check resend (reply lost) with multiple modify RPCs in flight"
3663 [[ $(lctl get_param mdc.*.import |
3664 grep "connect_flags:.*multi_mod_rpc") ]] ||
3665 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3667 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3668 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3669 facet="mds$((0x$idx + 1))"
3671 # get current value of max_mod_rcps_in_flight
3672 num=$($LCTL get_param -n \
3673 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3674 # set default value if client does not support multi mod RPCs
3675 [ -z "$num" ] && num=1
3677 echo "creating $num files ..."
3679 for i in $(seq $num); do
3680 touch $DIR/$tdir/file-$i
3683 replay_barrier $facet
3686 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3687 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3688 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3689 for i in $(seq $num); do
3690 chmod 0600 $DIR/$tdir/file-$i &
3694 do_facet $facet "$LCTL set_param fail_loc=0"
3699 for pid in $pids; do
3700 wait $pid || error "chmod failed"
3702 echo "done ($(date +%H:%M:%S))"
3704 # check chmod succeed
3705 for i in $(seq $num); do
3706 checkstat -vp 0600 $DIR/$tdir/file-$i
3711 run_test 102c "check replay w/o reconstruction with multiple mod RPCs in flight"
3720 [[ $(lctl get_param mdc.*.import |
3721 grep "connect_flags:.*multi_mod_rpc") ]] ||
3722 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3724 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3725 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3726 facet="mds$((0x$idx + 1))"
3728 # get current value of max_mod_rcps_in_flight
3729 num=$($LCTL get_param -n \
3730 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3731 # set default value if client does not support multi mod RPCs
3732 [ -z "$num" ] && num=1
3734 echo "creating $num files ..."
3736 for i in $(seq $num); do
3737 touch $DIR/$tdir/file-$i
3741 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3742 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3743 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3744 for i in $(seq $num); do
3745 chmod 0600 $DIR/$tdir/file-$i &
3750 # write MDT transactions to disk
3751 do_facet $facet "sync; sync; sync"
3753 do_facet $facet "$LCTL set_param fail_loc=0"
3758 for pid in $pids; do
3759 wait $pid || error "chmod failed"
3761 echo "done ($(date +%H:%M:%S))"
3763 # check chmod succeed
3764 for i in $(seq $num); do
3765 checkstat -vp 0600 $DIR/$tdir/file-$i
3770 run_test 102d "check replay & reconstruction with multiple mod RPCs in flight"
3773 remote_mds_nodsh && skip "remote MDS with nodsh" && return
3774 local mds_version=$(lustre_version_code $SINGLEMDS)
3775 [[ $mds_version -gt $(version_code 2.8.54) ]] ||
3776 { skip "Need MDS version 2.8.54+"; return; }
3778 #define OBD_FAIL_MDS_TRACK_OVERFLOW 0x162
3779 do_facet mds1 $LCTL set_param fail_loc=0x80000162
3782 createmany -o $DIR/$tdir/t- 30 ||
3783 error "create files on remote directory failed"
3785 rm -rf $DIR/$tdir/t-*
3787 #MDS should crash with tr->otr_next_id overflow
3790 run_test 103 "Check otr_next_id overflow"
3793 check_striped_dir_110()
3795 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
3796 error "create striped dir failed"
3797 local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir)
3798 [ $stripe_count -eq $MDSCOUNT ] ||
3799 error "$stripe_count != 2 after recovery"
3803 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3804 ([ $FAILURE_MODE == "HARD" ] &&
3805 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3806 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3811 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3814 check_striped_dir_110 || error "check striped_dir failed"
3815 rm -rf $DIR/$tdir || error "rmdir failed"
3819 run_test 110a "DNE: create striped dir, fail MDT1"
3822 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3823 ([ $FAILURE_MODE == "HARD" ] &&
3824 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3825 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3830 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3833 zconf_mount $(hostname) $MOUNT
3834 client_up || return 1
3836 check_striped_dir_110 || error "check striped_dir failed"
3838 rm -rf $DIR/$tdir || error "rmdir failed"
3842 run_test 110b "DNE: create striped dir, fail MDT1 and client"
3845 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3846 ([ $FAILURE_MODE == "HARD" ] &&
3847 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3848 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3853 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3856 check_striped_dir_110 || error "check striped_dir failed"
3858 rm -rf $DIR/$tdir || error "rmdir failed"
3862 run_test 110c "DNE: create striped dir, fail MDT2"
3865 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3866 ([ $FAILURE_MODE == "HARD" ] &&
3867 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3868 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3873 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3876 zconf_mount $(hostname) $MOUNT
3877 client_up || return 1
3879 check_striped_dir_110 || error "check striped_dir failed"
3881 rm -rf $DIR/$tdir || error "rmdir failed"
3885 run_test 110d "DNE: create striped dir, fail MDT2 and client"
3888 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3889 ([ $FAILURE_MODE == "HARD" ] &&
3890 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3891 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3896 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3900 zconf_mount $(hostname) $MOUNT
3901 client_up || return 1
3903 check_striped_dir_110 || error "check striped_dir failed"
3905 rm -rf $DIR/$tdir || error "rmdir failed"
3909 run_test 110e "DNE: create striped dir, uncommit on MDT2, fail client/MDT1/MDT2"
3912 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3913 ([ $FAILURE_MODE == "HARD" ] &&
3914 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3915 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3921 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3924 check_striped_dir_110 || error "check striped_dir failed"
3926 rm -rf $DIR/$tdir || error "rmdir failed"
3930 run_test 110f "DNE: create striped dir, fail MDT1/MDT2"
3933 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3934 ([ $FAILURE_MODE == "HARD" ] &&
3935 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3936 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3941 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3945 zconf_mount $(hostname) $MOUNT
3946 client_up || return 1
3948 check_striped_dir_110 || error "check striped_dir failed"
3950 rm -rf $DIR/$tdir || error "rmdir failed"
3954 run_test 110g "DNE: create striped dir, uncommit on MDT1, fail client/MDT1/MDT2"
3957 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3958 ([ $FAILURE_MODE == "HARD" ] &&
3959 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3960 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3964 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
3966 rm -rf $DIR/$tdir/striped_dir
3969 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
3970 error "striped dir still exists"
3973 run_test 111a "DNE: unlink striped dir, fail MDT1"
3976 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3977 ([ $FAILURE_MODE == "HARD" ] &&
3978 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3979 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3983 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
3985 rm -rf $DIR/$tdir/striped_dir
3988 zconf_mount $(hostname) $MOUNT
3989 client_up || return 1
3991 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
3992 error "striped dir still exists"
3995 run_test 111b "DNE: unlink striped dir, fail MDT2"
3998 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3999 ([ $FAILURE_MODE == "HARD" ] &&
4000 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4001 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4005 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4007 rm -rf $DIR/$tdir/striped_dir
4011 zconf_mount $(hostname) $MOUNT
4012 client_up || return 1
4013 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4014 error "striped dir still exists"
4017 run_test 111c "DNE: unlink striped dir, uncommit on MDT1, fail client/MDT1/MDT2"
4020 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4021 ([ $FAILURE_MODE == "HARD" ] &&
4022 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4023 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4027 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4029 rm -rf $DIR/$tdir/striped_dir
4033 zconf_mount $(hostname) $MOUNT
4034 client_up || return 1
4035 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4036 error "striped dir still exists"
4040 run_test 111d "DNE: unlink striped dir, uncommit on MDT2, fail client/MDT1/MDT2"
4043 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4044 ([ $FAILURE_MODE == "HARD" ] &&
4045 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4046 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4050 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4052 rm -rf $DIR/$tdir/striped_dir
4055 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4056 error "striped dir still exists"
4059 run_test 111e "DNE: unlink striped dir, uncommit on MDT2, fail MDT1/MDT2"
4062 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4063 ([ $FAILURE_MODE == "HARD" ] &&
4064 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4065 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4069 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4071 rm -rf $DIR/$tdir/striped_dir
4074 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4075 error "striped dir still exists"
4078 run_test 111f "DNE: unlink striped dir, uncommit on MDT1, fail MDT1/MDT2"
4081 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4082 ([ $FAILURE_MODE == "HARD" ] &&
4083 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4084 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4088 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4091 rm -rf $DIR/$tdir/striped_dir
4093 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4094 error "striped dir still exists"
4097 run_test 111g "DNE: unlink striped dir, fail MDT1/MDT2"
4099 test_112_rename_prepare() {
4100 mkdir -p $DIR/$tdir/src_dir
4101 $LFS mkdir -i 1 $DIR/$tdir/src_dir/src_child ||
4102 error "create remote source failed"
4104 touch $DIR/$tdir/src_dir/src_child/a
4106 $LFS mkdir -i 2 $DIR/$tdir/tgt_dir ||
4107 error "create remote target dir failed"
4109 $LFS mkdir -i 3 $DIR/$tdir/tgt_dir/tgt_child ||
4110 error "create remote target child failed"
4115 $CHECKSTAT -t dir $DIR/$tdir/src_dir/src_child &&
4116 error "src_child still exists after rename"
4118 $CHECKSTAT -t file $DIR/$tdir/tgt_dir/tgt_child/a ||
4119 error "missing file(a) after rename"
4123 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4124 ([ $FAILURE_MODE == "HARD" ] &&
4125 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4126 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4129 test_112_rename_prepare
4132 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4133 error "rename dir cross MDT failed!"
4137 rm -rf $DIR/$tdir || error "rmdir failed"
4139 run_test 112a "DNE: cross MDT rename, fail MDT1"
4142 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4143 ([ $FAILURE_MODE == "HARD" ] &&
4144 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4145 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4148 test_112_rename_prepare
4151 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4152 error "rename dir cross MDT failed!"
4157 rm -rf $DIR/$tdir || error "rmdir failed"
4159 run_test 112b "DNE: cross MDT rename, fail MDT2"
4162 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4163 ([ $FAILURE_MODE == "HARD" ] &&
4164 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4165 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4168 test_112_rename_prepare
4171 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4172 error "rename dir cross MDT failed!"
4177 rm -rf $DIR/$tdir || error "rmdir failed"
4179 run_test 112c "DNE: cross MDT rename, fail MDT3"
4182 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4183 ([ $FAILURE_MODE == "HARD" ] &&
4184 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4185 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4188 test_112_rename_prepare
4191 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4192 error "rename dir cross MDT failed!"
4197 rm -rf $DIR/$tdir || error "rmdir failed"
4199 run_test 112d "DNE: cross MDT rename, fail MDT4"
4202 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4203 ([ $FAILURE_MODE == "HARD" ] &&
4204 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4205 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4208 test_112_rename_prepare
4212 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4213 error "rename dir cross MDT failed!"
4218 rm -rf $DIR/$tdir || error "rmdir failed"
4220 run_test 112e "DNE: cross MDT rename, fail MDT1 and MDT2"
4223 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4224 ([ $FAILURE_MODE == "HARD" ] &&
4225 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4226 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4229 test_112_rename_prepare
4233 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4234 error "rename dir cross MDT failed!"
4239 rm -rf $DIR/$tdir || error "rmdir failed"
4241 run_test 112f "DNE: cross MDT rename, fail MDT1 and MDT3"
4244 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4245 ([ $FAILURE_MODE == "HARD" ] &&
4246 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4247 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4250 test_112_rename_prepare
4254 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4255 error "rename dir cross MDT failed!"
4260 rm -rf $DIR/$tdir || error "rmdir failed"
4262 run_test 112g "DNE: cross MDT rename, fail MDT1 and MDT4"
4265 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4266 ([ $FAILURE_MODE == "HARD" ] &&
4267 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4268 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4271 test_112_rename_prepare
4275 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4276 error "rename dir cross MDT failed!"
4281 rm -rf $DIR/$tdir || error "rmdir failed"
4283 run_test 112h "DNE: cross MDT rename, fail MDT2 and MDT3"
4286 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4287 ([ $FAILURE_MODE == "HARD" ] &&
4288 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4289 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4292 test_112_rename_prepare
4296 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4297 error "rename dir cross MDT failed!"
4302 rm -rf $DIR/$tdir || error "rmdir failed"
4304 run_test 112i "DNE: cross MDT rename, fail MDT2 and MDT4"
4307 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4308 ([ $FAILURE_MODE == "HARD" ] &&
4309 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4310 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4313 test_112_rename_prepare
4317 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4318 error "rename dir cross MDT failed!"
4323 rm -rf $DIR/$tdir || error "rmdir failed"
4325 run_test 112j "DNE: cross MDT rename, fail MDT3 and MDT4"
4328 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4329 ([ $FAILURE_MODE == "HARD" ] &&
4330 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4331 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4334 test_112_rename_prepare
4339 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4340 error "rename dir cross MDT failed!"
4345 rm -rf $DIR/$tdir || error "rmdir failed"
4347 run_test 112k "DNE: cross MDT rename, fail MDT1,MDT2,MDT3"
4350 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4351 ([ $FAILURE_MODE == "HARD" ] &&
4352 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4353 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4356 test_112_rename_prepare
4361 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4362 error "rename dir cross MDT failed!"
4367 rm -rf $DIR/$tdir || error "rmdir failed"
4369 run_test 112l "DNE: cross MDT rename, fail MDT1,MDT2,MDT4"
4372 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4373 ([ $FAILURE_MODE == "HARD" ] &&
4374 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4375 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4378 test_112_rename_prepare
4383 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4384 error "rename dir cross MDT failed!"
4389 rm -rf $DIR/$tdir || error "rmdir failed"
4391 run_test 112m "DNE: cross MDT rename, fail MDT1,MDT3,MDT4"
4394 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4395 ([ $FAILURE_MODE == "HARD" ] &&
4396 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4397 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4400 test_112_rename_prepare
4405 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4406 error "rename dir cross MDT failed!"
4411 rm -rf $DIR/$tdir || error "rmdir failed"
4413 run_test 112n "DNE: cross MDT rename, fail MDT2,MDT3,MDT4"
4416 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4417 ([ $FAILURE_MODE == "HARD" ] &&
4418 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4419 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4427 for ((j=0;j<$((MDSCOUNT));j++)); do
4428 fail_index=$((fail_index+1))
4429 index=$((fail_index % MDSCOUNT))
4430 replay_barrier mds$((index + 1))
4431 for ((i=0;i<5;i++)); do
4432 test_mkdir -i$index -c$MDSCOUNT $DIR/$tdir/test_$i ||
4433 error "create striped dir $DIR/$tdir/test_$i"
4436 fail mds$((index + 1))
4437 for ((i=0;i<5;i++)); do
4438 checkstat -t dir $DIR/$tdir/test_$i ||
4439 error "$DIR/$tdir/test_$i does not exist!"
4441 rm -rf $DIR/$tdir/test_* ||
4445 run_test 115 "failover for create/unlink striped directory"
4448 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4449 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.7.55) ] &&
4450 skip "Do not support large update log before 2.7.55" &&
4452 ([ $FAILURE_MODE == "HARD" ] &&
4453 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4454 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4461 # OBD_FAIL_SPLIT_UPDATE_REC 0x1702
4462 do_facet mds1 "lctl set_param fail_loc=0x80001702"
4463 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
4466 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
4467 error "stried_dir does not exists"
4469 run_test 116a "large update log master MDT recovery"
4472 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4473 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.7.55) ] &&
4474 skip "Do not support large update log before 2.7.55" &&
4477 ([ $FAILURE_MODE == "HARD" ] &&
4478 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4479 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4486 # OBD_FAIL_SPLIT_UPDATE_REC 0x1702
4487 do_facet mds2 "lctl set_param fail_loc=0x80001702"
4488 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
4491 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
4492 error "stried_dir does not exists"
4494 run_test 116b "large update log slave MDT recovery"
4497 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4498 ([ $FAILURE_MODE == "HARD" ] &&
4499 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4500 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4506 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/remote_dir
4507 $LFS setdirstripe -i1 -c$MDSCOUNT $DIR/$tdir/remote_dir_1
4510 # Let's set rdonly on all MDTs, so client will send
4511 # replay requests on all MDTs and replay these requests
4512 # at the same time. This test will verify the recovery
4513 # will not be deadlock in this case, LU-7531.
4514 for ((index = 0; index < $((MDSCOUNT)); index++)); do
4515 replay_barrier mds$((index + 1))
4516 if [ -z $mds_indexs ]; then
4517 mds_indexs="${mds_indexs}mds$((index+1))"
4519 mds_indexs="${mds_indexs},mds$((index+1))"
4523 rm -rf $DIR/$tdir/remote_dir
4524 rm -rf $DIR/$tdir/remote_dir_1
4528 rm -rf $DIR/$tdir || error "rmdir failed"
4530 run_test 117 "DNE: cross MDT unlink, fail MDT1 and MDT2"
4533 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4534 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.7.64) ] &&
4535 skip "Do not support large update log before 2.7.64" &&
4540 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir ||
4541 error "setdirstripe fails"
4542 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1 ||
4543 error "setdirstripe fails 1"
4544 rm -rf $DIR/$tdir/striped_dir* || error "rmdir fails"
4546 # OBD_FAIL_INVALIDATE_UPDATE 0x1705
4547 do_facet mds1 "lctl set_param fail_loc=0x1705"
4548 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir
4549 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1
4550 do_facet mds1 "lctl set_param fail_loc=0x0"
4553 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir
4554 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1
4559 run_test 118 "invalidate osp update will not cause update log corruption"
4562 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4563 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.7.64) ] &&
4564 skip "Do not support large update log before 2.7.64" &&
4567 local hard_timeout=$(do_facet mds1 \
4568 "lctl get_param -n mdt.$FSNAME-MDT0000.recovery_time_hard")
4570 local clients=${CLIENTS:-$HOSTNAME}
4571 local time_min=$(recovery_time_min)
4574 mkdir $DIR/$tdir/tmp
4575 rmdir $DIR/$tdir/tmp
4578 mkdir $DIR/$tdir/dir_1
4579 for ((i = 0; i < 20; i++)); do
4580 $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i
4587 #define OBD_FAIL_TGT_REPLAY_DELAY 0x714
4588 do_facet mds1 $LCTL set_param fail_loc=0x80000714
4589 #sleep (timeout + 5), so mds will evict the client exports,
4590 #but DNE update recovery will keep going.
4591 do_facet mds1 $LCTL set_param fail_val=$((time_min + 5))
4593 mount_facet mds1 "-o recovery_time_hard=$time_min"
4595 wait_clients_import_state "$clients" mds1 FULL
4597 clients_up || clients_up || error "failover df: $?"
4599 #revert back the hard timeout
4600 do_facet mds1 $LCTL set_param \
4601 mdt.$FSNAME-MDT0000.recovery_time_hard=$hard_timeout
4603 for ((i = 0; i < 20; i++)); do
4604 stripe_count=$($LFS getdirstripe -c $DIR/$tdir/stripe_dir-$i)
4605 [ $stripe_count == 2 ] || {
4606 error "stripe_dir-$i creation replay fails"
4611 run_test 119 "timeout of normal replay does not cause DNE replay fails "
4614 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4615 [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.7.64) ] &&
4616 skip "Do not support large update log before 2.7.64" &&
4620 replay_barrier_nosync mds1
4621 for ((i = 0; i < 20; i++)); do
4622 mkdir $DIR/$tdir/dir-$i || {
4623 error "create dir-$i fails"
4626 $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i || {
4627 error "create stripe_dir-$i fails"
4634 for ((i = 0; i < 20; i++)); do
4635 [ ! -e "$DIR/$tdir/dir-$i" ] || {
4636 error "dir-$i still exists"
4639 [ ! -e "$DIR/$tdir/stripe_dir-$i" ] || {
4640 error "stripe_dir-$i still exists"
4645 run_test 120 "DNE fail abort should stop both normal and DNE replay"
4648 check_and_cleanup_lustre