5 LUSTRE=${LUSTRE:-$(dirname $0)/..}
6 . $LUSTRE/tests/test-framework.sh
10 ALWAYS_EXCEPT="$REPLAY_SINGLE_EXCEPT "
11 # bug number for skipped test: LU-13614
14 if [ "$mds1_FSTYPE" = zfs ]; then
19 # bug number for skipped tests: LU-9795
20 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 121"
22 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
26 CHECK_GRANT=${CHECK_GRANT:-"yes"}
27 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
29 require_dsh_mds || exit 0
30 check_and_setup_lustre
35 rm -rf $DIR/[df][0-9]* $DIR/f.$TESTSUITE.*
37 # LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
38 if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
40 do_facet $SINGLEMDS sync
43 test_0a() { # was test_0
44 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
45 replay_barrier $SINGLEMDS
49 run_test 0a "empty replay"
52 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
54 # this test attempts to trigger a race in the precreation code,
55 # and must run before any other objects are created on the filesystem
57 createmany -o $DIR/$tfile 20 || error "createmany -o $DIR/$tfile failed"
58 unlinkmany $DIR/$tfile 20 || error "unlinkmany $DIR/$tfile failed"
60 run_test 0b "ensure object created after recover exists. (3284)"
63 replay_barrier $SINGLEMDS
66 facet_failover $SINGLEMDS
67 zconf_mount $(hostname) $MOUNT || error "mount fails"
68 client_up || error "post-failover df failed"
69 # file shouldn't exist if replay-barrier works as expected
70 rm $DIR/$tfile && error "File exists and it shouldn't"
73 run_test 0c "check replay-barrier"
76 replay_barrier $SINGLEMDS
78 facet_failover $SINGLEMDS
79 zconf_mount $(hostname) $MOUNT || error "mount fails"
80 client_up || error "post-failover df failed"
82 run_test 0d "expired recovery with no clients"
85 replay_barrier $SINGLEMDS
88 $CHECKSTAT -t file $DIR/$tfile ||
89 error "$CHECKSTAT $DIR/$tfile attribute check failed"
92 run_test 1 "simple create"
95 replay_barrier $SINGLEMDS
98 $CHECKSTAT -t file $DIR/$tfile ||
99 error "$CHECKSTAT $DIR/$tfile attribute check failed"
105 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
106 replay_barrier $SINGLEMDS
109 $CHECKSTAT -t file $DIR/$tfile ||
110 error "$CHECKSTAT $DIR/$tfile attribute check failed"
116 replay_barrier $SINGLEMDS
117 $LFS setstripe -c $OSTCOUNT $DIR/$tfile
119 $CHECKSTAT -t file $DIR/$tfile ||
120 error "$CHECKSTAT $DIR/$tfile check failed"
122 run_test 2c "setstripe replay"
125 [[ "$mds1_FSTYPE" = zfs ]] &&
126 [[ "$MDS1_VERSION" -lt $(version_code 2.12.51) ]] &&
127 skip "requires LU-10143 fix on MDS"
128 replay_barrier $SINGLEMDS
129 $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir
131 $CHECKSTAT -t dir $DIR/$tdir ||
132 error "$CHECKSTAT $DIR/$tdir check failed"
134 run_test 2d "setdirstripe replay"
137 testid=$(echo $TESTNAME | tr '_' ' ')
138 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
139 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000013b"
140 openfile -f O_CREAT:O_EXCL $DIR/$tfile &
142 replay_barrier $SINGLEMDS
145 $CHECKSTAT -t file $DIR/$tfile ||
146 error "$CHECKSTAT $DIR/$tfile attribute check failed"
147 dmesg | tac | sed "/$testid/,$ d" | \
148 grep "Open request replay failed with -17" &&
149 error "open replay failed" || true
151 run_test 2e "O_CREAT|O_EXCL create replay"
154 local file=$DIR/$tfile
155 replay_barrier $SINGLEMDS
157 openfile -f O_DIRECTORY $file
159 $CHECKSTAT -t file $file ||
160 error "$CHECKSTAT $file attribute check failed"
163 run_test 3a "replay failed open(O_DIRECTORY)"
166 replay_barrier $SINGLEMDS
167 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
168 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
170 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
172 $CHECKSTAT -t file $DIR/$tfile &&
173 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
176 run_test 3b "replay failed open -ENOMEM"
179 replay_barrier $SINGLEMDS
180 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
181 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
183 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
186 $CHECKSTAT -t file $DIR/$tfile &&
187 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
190 run_test 3c "replay failed open -ENOMEM"
192 test_4a() { # was test_4
193 replay_barrier $SINGLEMDS
194 for i in $(seq 10); do
195 echo "tag-$i" > $DIR/$tfile-$i
198 for i in $(seq 10); do
199 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
202 run_test 4a "|x| 10 open(O_CREAT)s"
205 for i in $(seq 10); do
206 echo "tag-$i" > $DIR/$tfile-$i
208 replay_barrier $SINGLEMDS
211 $CHECKSTAT -t file $DIR/$tfile-* &&
212 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
215 run_test 4b "|x| rm 10 files"
217 # The idea is to get past the first block of precreated files on both
218 # osts, and then replay.
220 replay_barrier $SINGLEMDS
221 for i in $(seq 220); do
222 echo "tag-$i" > $DIR/$tfile-$i
225 for i in $(seq 220); do
226 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
230 # waiting for commitment of removal
232 run_test 5 "|x| 220 open(O_CREAT)"
234 test_6a() { # was test_6
235 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
236 replay_barrier $SINGLEMDS
237 mcreate $DIR/$tdir/$tfile
239 $CHECKSTAT -t dir $DIR/$tdir ||
240 error "$CHECKSTAT $DIR/$tdir attribute check failed"
241 $CHECKSTAT -t file $DIR/$tdir/$tfile ||
242 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check failed"
244 # waiting for log process thread
246 run_test 6a "mkdir + contained create"
249 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
250 replay_barrier $SINGLEMDS
253 $CHECKSTAT -t dir $DIR/$tdir &&
254 error "$CHECKSTAT $DIR/$tdir attribute check should fail" ||
257 run_test 6b "|X| rmdir"
260 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
261 replay_barrier $SINGLEMDS
262 mcreate $DIR/$tdir/$tfile
264 $CHECKSTAT -t dir $DIR/$tdir ||
265 error "$CHECKSTAT $DIR/$tdir attribute check failed"
266 $CHECKSTAT -t file $DIR/$tdir/$tfile ||
267 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check failed"
270 run_test 7 "mkdir |X| contained create"
273 replay_barrier $SINGLEMDS
274 multiop_bg_pause $DIR/$tfile mo_c ||
275 error "multiop mknod $DIR/$tfile failed"
279 $CHECKSTAT -t file $DIR/$tfile ||
280 error "$CHECKSTAT $DIR/$tfile attribute check failed"
281 kill -USR1 $MULTIPID || error "multiop mknod $MULTIPID not running"
282 wait $MULTIPID || error "multiop mknod $MULTIPID failed"
285 run_test 8 "creat open |X| close"
288 replay_barrier $SINGLEMDS
290 local old_inum=$(ls -i $DIR/$tfile | awk '{print $1}')
292 local new_inum=$(ls -i $DIR/$tfile | awk '{print $1}')
294 echo " old_inum == $old_inum, new_inum == $new_inum"
295 if [ $old_inum -eq $new_inum ] ;
297 echo "old_inum and new_inum match"
299 echo " old_inum and new_inum do not match"
300 error "old index($old_inum) does not match new index($new_inum)"
304 run_test 9 "|X| create (same inum/gen)"
307 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
308 replay_barrier $SINGLEMDS
309 mv $DIR/$tfile $DIR/$tfile-2
312 $CHECKSTAT $DIR/$tfile &&
313 error "$CHECKSTAT $DIR/$tfile attribute check should fail"
314 $CHECKSTAT $DIR/$tfile-2 ||
315 error "$CHECKSTAT $DIR/$tfile-2 attribute check failed"
319 run_test 10 "create |X| rename unlink"
322 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
323 echo "old" > $DIR/$tfile
324 mv $DIR/$tfile $DIR/$tfile-2
325 replay_barrier $SINGLEMDS
326 echo "new" > $DIR/$tfile
328 grep old $DIR/$tfile-2
330 grep new $DIR/$tfile || error "grep $DIR/$tfile failed"
331 grep old $DIR/$tfile-2 || error "grep $DIR/$tfile-2 failed"
333 run_test 11 "create open write rename |X| create-old-name read"
336 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
337 multiop_bg_pause $DIR/$tfile o_tSc ||
338 error "multiop_bg_pause $DIR/$tfile failed"
341 replay_barrier $SINGLEMDS
342 kill -USR1 $pid || error "multiop $pid not running"
343 wait $pid || error "multiop $pid failed"
346 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
349 run_test 12 "open, unlink |X| close"
351 # 1777 - replay open after committed chmod that would make
352 # a regular open a failure
354 mcreate $DIR/$tfile || error "mcreate $DIR/$tfile failed"
355 multiop_bg_pause $DIR/$tfile O_wc ||
356 error "multiop_bg_pause $DIR/$tfile failed"
359 $CHECKSTAT -p 0 $DIR/$tfile ||
360 error "$CHECKSTAT $DIR/$tfile attribute check failed"
361 replay_barrier $SINGLEMDS
363 kill -USR1 $pid || error "multiop $pid not running"
364 wait $pid || error "multiop $pid failed"
366 $CHECKSTAT -s 1 -p 0 $DIR/$tfile ||
367 error "second $CHECKSTAT $DIR/$tfile attribute check failed"
368 rm $DIR/$tfile || error "rm $DIR/$tfile failed"
371 run_test 13 "open chmod 0 |x| write close"
374 multiop_bg_pause $DIR/$tfile O_tSc ||
375 error "multiop_bg_pause $DIR/$tfile failed"
378 replay_barrier $SINGLEMDS
379 kill -USR1 $pid || error "multiop $pid not running"
380 wait $pid || error "multiop $pid failed"
383 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
386 run_test 14 "open(O_CREAT), unlink |X| close"
389 multiop_bg_pause $DIR/$tfile O_tSc ||
390 error "multiop_bg_pause $DIR/$tfile failed"
393 replay_barrier $SINGLEMDS
394 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
395 kill -USR1 $pid || error "multiop $pid not running"
396 wait $pid || error "multiop $pid failed"
399 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
400 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
403 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
406 replay_barrier $SINGLEMDS
409 mcreate $DIR/$tfile-2
411 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
412 [ -e $DIR/$tfile-2 ] || error "file $DIR/$tfile-2 does not exist"
413 munlink $DIR/$tfile-2 || error "munlink $DIR/$tfile-2 failed"
415 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
418 replay_barrier $SINGLEMDS
419 multiop_bg_pause $DIR/$tfile O_c ||
420 error "multiop_bg_pause $DIR/$tfile failed"
423 kill -USR1 $pid || error "multiop $pid not running"
424 wait $pid || error "multiop $pid failed"
425 $CHECKSTAT -t file $DIR/$tfile ||
426 error "$CHECKSTAT $DIR/$tfile attribute check failed"
429 run_test 17 "|X| open(O_CREAT), |replay| close"
432 replay_barrier $SINGLEMDS
433 multiop_bg_pause $DIR/$tfile O_tSc ||
434 error "multiop_bg_pause $DIR/$tfile failed"
437 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
438 echo "pid: $pid will close"
439 kill -USR1 $pid || error "multiop $pid not running"
440 wait $pid || error "multiop $pid failed"
443 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
444 [ -e $DIR/$tfile-2 ] || error "file $DIR/$tfile-2 does not exist"
445 # this touch frequently fails
446 touch $DIR/$tfile-3 || error "touch $DIR/$tfile-3 failed"
447 munlink $DIR/$tfile-2 || error "munlink $DIR/$tfile-2 failed"
448 munlink $DIR/$tfile-3 || error "munlink $DIR/$tfile-3 failed"
451 run_test 18 "open(O_CREAT), unlink, touch new, close, touch, unlink"
453 # bug 1855 (a simpler form of test_11 above)
455 replay_barrier $SINGLEMDS
457 echo "old" > $DIR/$tfile
458 mv $DIR/$tfile $DIR/$tfile-2
459 grep old $DIR/$tfile-2
461 grep old $DIR/$tfile-2 || error "grep $DIR/$tfile-2 failed"
463 run_test 19 "mcreate, open, write, rename "
465 test_20a() { # was test_20
466 replay_barrier $SINGLEMDS
467 multiop_bg_pause $DIR/$tfile O_tSc ||
468 error "multiop_bg_pause $DIR/$tfile failed"
473 kill -USR1 $pid || error "multiop $pid not running"
474 wait $pid || error "multiop $pid failed"
475 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
478 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
480 test_20b() { # bug 10480
481 local wait_timeout=$((TIMEOUT * 4))
482 local extra=$(fs_log_size)
486 save_layout_restore_at_exit $MOUNT
487 $LFS setstripe -i 0 -c 1 $DIR
489 local beforeused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
491 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
492 while [ ! -e $DIR/$tfile ] ; do
493 sleep 0.01 # give dd a chance to start
496 $LFS getstripe $DIR/$tfile || error "$LFS getstripe $DIR/$tfile failed"
498 rm -f $DIR/$tfile || error "rm -f $DIR/$tfile failed"
500 client_up || client_up || true # reconnect
502 do_facet $SINGLEMDS "lctl set_param -n osd*.*MDT*.force_sync=1"
504 fail $SINGLEMDS # start orphan recovery
505 wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
506 wait_delete_completed $wait_timeout || error "delete did not finish"
510 local afterused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
511 log "before $beforeused, after $afterused"
513 (( $beforeused + $extra >= $afterused )) && break
514 n_attempts=$((n_attempts + 1))
515 [ $n_attempts -gt 3 ] &&
516 error "after $afterused > before $beforeused + $extra"
518 wait_zfs_commit $SINGLEMDS 5
523 run_test 20b "write, unlink, eviction, replay (test mds_cleanup_orphans)"
525 test_20c() { # bug 10480
526 multiop_bg_pause $DIR/$tfile Ow_c ||
527 error "multiop_bg_pause $DIR/$tfile failed"
533 client_up || client_up || true # reconnect
535 kill -USR1 $pid || error "multiop $pid not running"
536 wait $pid || error "multiop $pid failed"
537 [ -s $DIR/$tfile ] || error "File was truncated"
541 run_test 20c "check that client eviction does not affect file content"
544 replay_barrier $SINGLEMDS
545 multiop_bg_pause $DIR/$tfile O_tSc ||
546 error "multiop_bg_pause $DIR/$tfile failed"
549 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
552 kill -USR1 $pid || error "multiop $pid not running"
553 wait $pid || error "multiop $pid failed"
554 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
555 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
558 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
561 multiop_bg_pause $DIR/$tfile O_tSc ||
562 error "multiop_bg_pause $DIR/$tfile failed"
565 replay_barrier $SINGLEMDS
569 kill -USR1 $pid || error "multiop $pid not running"
570 wait $pid || error "multiop $pid failed"
571 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
574 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
577 multiop_bg_pause $DIR/$tfile O_tSc ||
578 error "multiop_bg_pause $DIR/$tfile failed"
581 replay_barrier $SINGLEMDS
583 touch $DIR/$tfile-1 || error "touch $DIR/$tfile-1 failed"
586 kill -USR1 $pid || error "multiop $pid not running"
587 wait $pid || error "multiop $pid failed"
588 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
589 touch $DIR/$tfile-2 || error "touch $DIR/$tfile-2 failed"
592 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
595 multiop_bg_pause $DIR/$tfile O_tSc ||
596 error "multiop_bg_pause $DIR/$tfile failed"
599 replay_barrier $SINGLEMDS
602 kill -USR1 $pid || error "multiop $pid not running"
603 wait $pid || error "multiop $pid failed"
604 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
607 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
610 multiop_bg_pause $DIR/$tfile O_tSc ||
611 error "multiop_bg_pause $DIR/$tfile failed"
615 replay_barrier $SINGLEMDS
617 kill -USR1 $pid || error "multiop $pid not running"
618 wait $pid || error "multiop $pid failed"
619 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
622 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
625 replay_barrier $SINGLEMDS
626 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
627 error "multiop_bg_pause $DIR/$tfile-1 failed"
629 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
630 error "multiop_bg_pause $DIR/$tfile-2 failed"
634 kill -USR1 $pid2 || error "second multiop $pid2 not running"
635 wait $pid2 || error "second multiop $pid2 failed"
638 kill -USR1 $pid1 || error "multiop $pid1 not running"
639 wait $pid1 || error "multiop $pid1 failed"
640 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
641 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
644 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
647 replay_barrier $SINGLEMDS
648 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
649 error "multiop_bg_pause $DIR/$tfile-1 failed"
651 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
652 error "multiop_bg_pause $DIR/$tfile-2 failed"
658 kill -USR1 $pid1 || error "multiop $pid1 not running"
659 wait $pid1 || error "multiop $pid1 failed"
660 kill -USR1 $pid2 || error "second multiop $pid2 not running"
661 wait $pid2 || error "second multiop $pid2 failed"
662 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
663 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
666 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
669 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
670 error "multiop_bg_pause $DIR/$tfile-1 failed"
672 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
673 error "multiop_bg_pause $DIR/$tfile-2 failed"
675 replay_barrier $SINGLEMDS
678 kill -USR1 $pid2 || error "second multiop $pid2 not running"
679 wait $pid2 || error "second multiop $pid2 failed"
682 kill -USR1 $pid1 || error "multiop $pid1 not running"
683 wait $pid1 || error "multiop $pid1 failed"
684 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
685 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
688 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
691 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
692 error "multiop_bg_pause $DIR/$tfile-1 failed"
694 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
695 error "multiop_bg_pause $DIR/$tfile-2 failed"
697 replay_barrier $SINGLEMDS
702 kill -USR1 $pid1 || error "multiop $pid1 not running"
703 wait $pid1 || error "multiop $pid1 failed"
704 kill -USR1 $pid2 || error "second multiop $pid2 not running"
705 wait $pid2 || error "second multiop $pid2 failed"
706 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
707 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
710 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
713 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
714 error "multiop_bg_pause $DIR/$tfile-1 failed"
716 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
717 error "multiop_bg_pause $DIR/$tfile-2 failed"
722 replay_barrier $SINGLEMDS
724 kill -USR1 $pid1 || error "multiop $pid1 not running"
725 wait $pid1 || error "multiop $pid1 failed"
726 kill -USR1 $pid2 || error "second multiop $pid2 not running"
727 wait $pid2 || error "second multiop $pid2 failed"
728 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
729 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
732 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
735 multiop_bg_pause $DIR/$tfile-1 O_tSc ||
736 error "multiop_bg_pause $DIR/$tfile-1 failed"
738 multiop_bg_pause $DIR/$tfile-2 O_tSc ||
739 error "multiop_bg_pause $DIR/$tfile-2 failed"
743 replay_barrier $SINGLEMDS
746 kill -USR1 $pid1 || error "multiop $pid1 not running"
747 wait $pid1 || error "multiop $pid1 failed"
748 kill -USR1 $pid2 || error "second multiop $pid2 not running"
749 wait $pid2 || error "second multiop $pid2 failed"
750 [ -e $DIR/$tfile-1 ] && error "file $DIR/$tfile-1 should not exist"
751 [ -e $DIR/$tfile-2 ] && error "file $DIR/$tfile-2 should not exist"
754 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
756 # tests for bug 2104; completion without crashing is success. The close is
757 # stale, but we always return 0 for close, so the app never sees it.
759 multiop_bg_pause $DIR/$tfile O_c ||
760 error "multiop_bg_pause $DIR/$tfile failed"
762 multiop_bg_pause $DIR/$tfile O_c ||
763 error "second multiop_bg_pause $DIR/$tfile failed"
766 client_up || client_up || error "client_up failed"
767 kill -USR1 $pid1 || error "multiop $pid1 not running"
768 kill -USR1 $pid2 || error "second multiop $pid2 not running"
769 wait $pid1 || error "multiop $pid1 failed"
770 wait $pid2 || error "second multiop $pid2 failed"
773 run_test 32 "close() notices client eviction; close() after client eviction"
776 createmany -o $DIR/$tfile-%d 10 ||
777 error "createmany create $DIR/$tfile failed"
778 replay_barrier_nosync $SINGLEMDS
779 fail_abort $SINGLEMDS
780 # recreate shouldn't fail
781 createmany -o $DIR/$tfile--%d 10 ||
782 error "createmany recreate $DIR/$tfile failed"
786 run_test 33a "fid seq shouldn't be reused after abort recovery"
789 #define OBD_FAIL_SEQ_ALLOC 0x1311
790 do_facet $SINGLEMDS "lctl set_param fail_loc=0x1311"
792 createmany -o $DIR/$tfile-%d 10
793 replay_barrier_nosync $SINGLEMDS
794 fail_abort $SINGLEMDS
795 # recreate shouldn't fail
796 createmany -o $DIR/$tfile--%d 10 ||
797 error "createmany recreate $DIR/$tfile failed"
801 run_test 33b "test fid seq allocation"
804 multiop_bg_pause $DIR/$tfile O_c ||
805 error "multiop_bg_pause $DIR/$tfile failed"
809 replay_barrier $SINGLEMDS
810 fail_abort $SINGLEMDS
811 kill -USR1 $pid || error "multiop $pid not running"
812 wait $pid || error "multiop $pid failed"
813 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
817 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
819 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
821 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
823 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
824 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
829 # give a chance to remove from MDS
830 fail_abort $SINGLEMDS
831 $CHECKSTAT -t file $DIR/$tfile &&
832 error "$CHECKSTAT $DIR/$tfile attribute check should fail" ||
835 run_test 35 "test recovery from llog for unlink op"
837 # b=2432 resent cancel after replay uses wrong cookie,
838 # so don't resend cancels
840 replay_barrier $SINGLEMDS
842 checkstat $DIR/$tfile
843 facet_failover $SINGLEMDS
845 if $LCTL dk | grep "stale lock .*cookie"; then
846 error "cancel after replay failed"
849 run_test 36 "don't resend cancel"
852 # directory orphans can't be unlinked from PENDING directory
854 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $tdir failed"
855 rmdir $DIR/$tdir/$tfile 2>/dev/null
856 multiop_bg_pause $DIR/$tdir/$tfile dD_c ||
857 error "multiop_bg_pause $tfile failed"
859 rmdir $DIR/$tdir/$tfile
861 replay_barrier $SINGLEMDS
862 # clear the dmesg buffer so we only see errors from this recovery
863 do_facet $SINGLEMDS dmesg -c >/dev/null
864 fail_abort $SINGLEMDS
865 kill -USR1 $pid || error "multiop $pid not running"
866 do_facet $SINGLEMDS dmesg | grep "error unlinking orphan" &&
867 error "error unlinking files"
868 wait $pid || error "multiop $pid failed"
872 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
875 createmany -o $DIR/$tfile-%d 800 ||
876 error "createmany -o $DIR/$tfile failed"
877 unlinkmany $DIR/$tfile-%d 0 400 || error "unlinkmany $DIR/$tfile failed"
878 replay_barrier $SINGLEMDS
880 unlinkmany $DIR/$tfile-%d 400 400 ||
881 error "unlinkmany $DIR/$tfile 400 failed"
883 $CHECKSTAT -t file $DIR/$tfile-* &&
884 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
887 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
889 test_39() { # bug 4176
890 createmany -o $DIR/$tfile-%d 800 ||
891 error "createmany -o $DIR/$tfile failed"
892 replay_barrier $SINGLEMDS
893 unlinkmany $DIR/$tfile-%d 0 400
895 unlinkmany $DIR/$tfile-%d 400 400 ||
896 error "unlinkmany $DIR/$tfile 400 failed"
898 $CHECKSTAT -t file $DIR/$tfile-* &&
899 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
902 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
905 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
910 # always need connection to MDS to verify layout during IO. LU-2628.
911 lctl get_param mdc.*.connect_flags | grep -q layout_lock &&
912 skip "layout_lock needs MDS connection for IO" && return 0
914 $LCTL mark multiop $MOUNT/$tfile OS_c
915 multiop $MOUNT/$tfile OS_c &
917 writeme -s $MOUNT/${tfile}-2 &
920 facet_failover $SINGLEMDS
921 #define OBD_FAIL_MDS_CONNECT_NET 0x117
922 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
924 stat1=$(count_ost_writes)
926 stat2=$(count_ost_writes)
927 echo "$stat1, $stat2"
928 if [ $stat1 -lt $stat2 ]; then
929 echo "writes continuing during recovery"
932 echo "writes not continuing during recovery, bug 2477"
935 echo "waiting for writeme $WRITE_PID"
939 echo "waiting for multiop $PID"
940 wait $PID || error "multiop $PID failed"
941 do_facet client munlink $MOUNT/$tfile ||
942 error "munlink $MOUNT/$tfile failed"
943 do_facet client munlink $MOUNT/${tfile}-2 ||
944 error "munlink $MOUNT/$tfile-2 failed"
947 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
950 # make sure that a read to one osc doesn't try to double-unlock its page just
951 # because another osc is invalid. trigger_group_io used to mistakenly return
952 # an error if any oscs were invalid even after having successfully put rpcs
953 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
954 # the page, guarnateeing that the unlock from the RPC completion would
955 # assert on trying to unlock the unlocked page.
957 [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return
959 local f=$MOUNT/$tfile
960 # make sure the start of the file is ost1
961 $LFS setstripe -S $((128 * 1024)) -i 0 $f
962 do_facet client dd if=/dev/zero of=$f bs=4k count=1 ||
963 error "dd on client failed"
965 # fail ost2 and read from ost1
966 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost2_svc)
967 local osc2dev=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
968 grep $mdtosc | awk '{print $1}')
969 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" &&
970 lctl get_param -n devices &&
971 error "OST 2 $osc2dev does not exist"
972 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate ||
973 error "deactive device on $SINGLEMDS failed"
974 do_facet client dd if=$f of=/dev/null bs=4k count=1 ||
975 error "second dd on client failed"
976 do_facet $SINGLEMDS $LCTL --device $osc2dev activate ||
977 error "active device on $SINGLEMDS failed"
980 run_test 41 "read from a valid osc while other oscs are invalid"
982 # test MDS recovery after ost failure
984 blocks=$(df -P $MOUNT | tail -n 1 | awk '{ print $2 }')
985 createmany -o $DIR/$tfile-%d 800 ||
986 error "createmany -o $DIR/$tfile failed"
988 unlinkmany $DIR/$tfile-%d 0 400
990 lctl set_param debug=-1
993 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
994 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
995 #[ $blocks_after -lt $blocks ] || return 1
996 echo "wait for MDS to timeout and recover"
997 sleep $((TIMEOUT * 2))
999 unlinkmany $DIR/$tfile-%d 400 400 ||
1000 error "unlinkmany $DIR/$tfile 400 failed"
1001 $CHECKSTAT -t file $DIR/$tfile-* &&
1002 error "$CHECKSTAT $DIR/$tfile-* attribute check should fail" ||
1005 run_test 42 "recovery after ost failure"
1007 # timeout in MDS/OST recovery RPC will LBUG MDS
1008 test_43() { # bug 2530
1009 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1011 replay_barrier $SINGLEMDS
1013 # OBD_FAIL_OST_CREATE_NET 0x204
1014 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1020 run_test 43 "mds osc import failure during recovery; don't LBUG"
1022 test_44a() { # was test_44
1023 local at_max_saved=0
1025 local mdcdev=$($LCTL dl |
1026 awk "/${FSNAME}-MDT0000-mdc-/ {if (\$2 == \"UP\") {print \$1}}")
1027 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc- not UP"
1028 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1029 { $LCTL dl; error "looking for mdcdev=$mdcdev"; }
1031 # adaptive timeouts slow this way down
1032 if at_is_enabled; then
1033 at_max_saved=$(at_max_get mds)
1037 for i in $(seq 1 10); do
1038 echo "$i of 10 ($(date +%s))"
1039 do_facet $SINGLEMDS \
1040 "lctl get_param -n md[ts].*.mdt.timeouts | grep service"
1041 #define OBD_FAIL_TGT_CONN_RACE 0x701
1042 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
1043 # lctl below may fail, it is valid case
1044 $LCTL --device $mdcdev recover
1047 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1048 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
1051 run_test 44a "race in target handle connect"
1054 local mdcdev=$($LCTL dl |
1055 awk "/${FSNAME}-MDT0000-mdc-/ {if (\$2 == \"UP\") {print \$1}}")
1056 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc not up"
1057 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1058 { echo mdcdev=$mdcdev; $LCTL dl;
1059 error "more than one ${FSNAME}-MDT0000-mdc"; }
1061 for i in $(seq 1 10); do
1062 echo "$i of 10 ($(date +%s))"
1063 do_facet $SINGLEMDS \
1064 "lctl get_param -n md[ts].*.mdt.timeouts | grep service"
1065 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
1066 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
1067 # lctl below may fail, it is valid case
1068 $LCTL --device $mdcdev recover
1073 run_test 44b "race in target handle connect"
1076 replay_barrier $SINGLEMDS
1077 createmany -m $DIR/$tfile-%d 100 || error "failed to create directories"
1078 #define OBD_FAIL_TGT_RCVG_FLAG 0x712
1079 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712"
1080 fail_abort $SINGLEMDS
1081 unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail abort"
1083 unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail"
1086 run_test 44c "race in target handle connect"
1088 # Handle failed close
1090 local mdcdev=$($LCTL get_param -n devices |
1091 awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}")
1092 [ "$mdcdev" ] || error "${FSNAME}-MDT0000-mdc not up"
1093 [ $(echo $mdcdev | wc -w) -eq 1 ] ||
1094 { echo mdcdev=$mdcdev; $LCTL dl;
1095 error "more than one ${FSNAME}-MDT0000-mdc"; }
1097 $LCTL --device $mdcdev recover ||
1098 error "$LCTL --device $mdcdev recover failed"
1100 multiop_bg_pause $DIR/$tfile O_c ||
1101 error "multiop_bg_pause $DIR/$tfile failed"
1104 # This will cause the CLOSE to fail before even
1105 # allocating a reply buffer
1106 $LCTL --device $mdcdev deactivate ||
1107 error "$LCTL --device $mdcdev deactivate failed"
1110 kill -USR1 $pid || error "multiop $pid not running"
1111 wait $pid || error "multiop $pid failed"
1113 $LCTL --device $mdcdev activate ||
1114 error "$LCTL --device $mdcdev activate failed"
1117 $CHECKSTAT -t file $DIR/$tfile ||
1118 error "$CHECKSTAT $DIR/$tfile attribute check failed"
1121 run_test 45 "Handle failed close"
1124 drop_reply "touch $DIR/$tfile"
1126 # ironically, the previous test, 45, will cause a real forced close,
1127 # so just look for one for this test
1128 local FID=$($LFS path2fid $tfile)
1129 $LCTL dk | grep -i "force closing file handle $FID" &&
1130 error "found force closing in dmesg"
1133 run_test 46 "Don't leak file handle after open resend (3325)"
1135 test_47() { # bug 2824
1136 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1138 # create some files to make sure precreate has been done on all
1139 # OSTs. (just in case this test is run independently)
1140 createmany -o $DIR/$tfile 20 ||
1141 error "createmany create $DIR/$tfile failed"
1143 # OBD_FAIL_OST_CREATE_NET 0x204
1145 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1146 client_up || error "client_up failed"
1148 # let the MDS discover the OST failure, attempt to recover, fail
1149 # and recover again.
1150 sleep $((3 * TIMEOUT))
1152 # Without 2824, this createmany would hang
1153 createmany -o $DIR/$tfile 20 ||
1154 error "createmany recraete $DIR/$tfile failed"
1155 unlinkmany $DIR/$tfile 20 || error "unlinkmany $DIR/$tfile failed"
1159 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1162 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1163 [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
1165 replay_barrier $SINGLEMDS
1166 createmany -o $DIR/$tfile 20 ||
1167 error "createmany -o $DIR/$tfile failed"
1168 # OBD_FAIL_OST_EROFS 0x216
1169 facet_failover $SINGLEMDS
1170 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1171 client_up || error "client_up failed"
1173 # let the MDS discover the OST failure, attempt to recover, fail
1174 # and recover again.
1175 sleep $((3 * TIMEOUT))
1177 createmany -o $DIR/$tfile 20 20 ||
1178 error "createmany recraete $DIR/$tfile failed"
1179 unlinkmany $DIR/$tfile 40 || error "unlinkmany $DIR/$tfile failed"
1182 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1185 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost1_svc)
1186 local oscdev=$(do_facet $SINGLEMDS "lctl get_param -n devices" |
1187 grep $mdtosc | awk '{print $1}')
1188 [ "$oscdev" ] || error "could not find OSC device on MDS"
1189 do_facet $SINGLEMDS $LCTL --device $oscdev recover ||
1190 error "OSC device $oscdev recovery failed"
1191 do_facet $SINGLEMDS $LCTL --device $oscdev recover ||
1192 error "second OSC device $oscdev recovery failed"
1193 # give the mds_lov_sync threads a chance to run
1196 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1198 # b3764 timed out lock replay
1200 [ "$MDS1_VERSION" -lt $(version_code 2.6.90) ] &&
1201 skip "MDS prior to 2.6.90 handle LDLM_REPLY_NET incorrectly"
1203 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1204 cancel_lru_locks mdc
1206 multiop_bg_pause $DIR/$tfile s_s || error "multiop $DIR/$tfile failed"
1209 #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
1210 lctl set_param -n ldlm.cancel_unused_locks_before_replay "0"
1211 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000157"
1213 fail $SINGLEMDS || error "fail $SINGLEMDS failed"
1215 wait $mpid || error "multiop_bg_pause pid failed"
1217 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1218 lctl set_param fail_loc=0x0
1219 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
1222 run_test 52 "time out lock replay (3764)"
1224 # bug 3462 - simultaneous MDC requests
1226 [[ $(lctl get_param mdc.*.import |
1227 grep "connect_flags:.*multi_mod_rpc") ]] ||
1228 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
1230 cancel_lru_locks mdc # cleanup locks from former test cases
1231 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1232 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1233 multiop $DIR/${tdir}-1/f O_c &
1235 # give multiop a change to open
1238 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1239 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1240 kill -USR1 $close_pid
1241 cancel_lru_locks mdc # force the close
1242 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1244 mcreate $DIR/${tdir}-2/f || error "mcreate $DIR/${tdir}-2/f failed"
1246 # close should still be here
1247 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1249 replay_barrier_nodf $SINGLEMDS
1251 wait $close_pid || error "close_pid $close_pid failed"
1253 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1254 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1255 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1256 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1257 rm -rf $DIR/${tdir}-*
1259 run_test 53a "|X| close request while two MDC requests in flight"
1262 cancel_lru_locks mdc # cleanup locks from former test cases
1264 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1265 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1266 multiop_bg_pause $DIR/${tdir}-1/f O_c ||
1267 error "multiop_bg_pause $DIR/${tdir}-1/f failed"
1270 #define OBD_FAIL_MDS_REINT_NET 0x107
1271 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1272 mcreate $DIR/${tdir}-2/f &
1276 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1277 kill -USR1 $close_pid
1278 cancel_lru_locks mdc # force the close
1279 wait $close_pid || error "close_pid $close_pid failed"
1280 # open should still be here
1281 [ -d /proc/$open_pid ] || error "open_pid doesn't exist"
1283 replay_barrier_nodf $SINGLEMDS
1285 wait $open_pid || error "open_pid failed"
1287 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1288 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1289 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1290 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1291 rm -rf $DIR/${tdir}-*
1293 run_test 53b "|X| open request while two MDC requests in flight"
1296 cancel_lru_locks mdc # cleanup locks from former test cases
1298 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1299 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1300 multiop $DIR/${tdir}-1/f O_c &
1303 #define OBD_FAIL_MDS_REINT_NET 0x107
1304 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1305 mcreate $DIR/${tdir}-2/f &
1309 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1310 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1311 kill -USR1 $close_pid
1312 cancel_lru_locks mdc # force the close
1314 #bz20647: make sure all pids exist before failover
1315 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1316 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1317 replay_barrier_nodf $SINGLEMDS
1318 fail_nodf $SINGLEMDS
1319 wait $open_pid || error "open_pid failed"
1321 # close should be gone
1322 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1323 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1325 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1326 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1327 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1328 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1329 rm -rf $DIR/${tdir}-*
1331 run_test 53c "|X| open request and close request while two MDC requests in flight"
1334 [[ $(lctl get_param mdc.*.import |
1335 grep "connect_flags:.*multi_mod_rpc") ]] ||
1336 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
1338 cancel_lru_locks mdc # cleanup locks from former test cases
1340 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1341 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1342 multiop $DIR/${tdir}-1/f O_c &
1344 # give multiop a chance to open
1347 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1348 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1349 kill -USR1 $close_pid
1350 cancel_lru_locks mdc # force the close
1351 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1352 mcreate $DIR/${tdir}-2/f || error "mcreate $DIR/${tdir}-2/f failed"
1354 # close should still be here
1355 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1357 wait $close_pid || error "close_pid failed"
1359 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1360 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1361 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1362 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1363 rm -rf $DIR/${tdir}-*
1365 run_test 53d "close reply while two MDC requests in flight"
1368 cancel_lru_locks mdc # cleanup locks from former test cases
1370 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1371 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1372 multiop $DIR/${tdir}-1/f O_c &
1375 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1376 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1377 mcreate $DIR/${tdir}-2/f &
1381 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1382 kill -USR1 $close_pid
1383 cancel_lru_locks mdc # force the close
1384 wait $close_pid || error "close_pid failed"
1385 # open should still be here
1386 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1388 replay_barrier_nodf $SINGLEMDS
1390 wait $open_pid || error "open_pid failed"
1392 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1393 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1394 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1395 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1396 rm -rf $DIR/${tdir}-*
1398 run_test 53e "|X| open reply while two MDC requests in flight"
1401 cancel_lru_locks mdc # cleanup locks from former test cases
1403 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1404 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1405 multiop $DIR/${tdir}-1/f O_c &
1408 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1409 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1410 mcreate $DIR/${tdir}-2/f &
1414 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1415 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1416 kill -USR1 $close_pid
1417 cancel_lru_locks mdc # force the close
1419 #bz20647: make sure all pids are exists before failover
1420 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1421 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1422 replay_barrier_nodf $SINGLEMDS
1423 fail_nodf $SINGLEMDS
1424 wait $open_pid || error "open_pid failed"
1426 # close should be gone
1427 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1428 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1430 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1431 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1432 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1433 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1434 rm -rf $DIR/${tdir}-*
1436 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1439 cancel_lru_locks mdc # cleanup locks from former test cases
1441 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1442 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1443 multiop $DIR/${tdir}-1/f O_c &
1446 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1447 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1448 mcreate $DIR/${tdir}-2/f &
1452 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1453 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1454 kill -USR1 $close_pid
1455 cancel_lru_locks mdc # force the close
1456 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1458 #bz20647: make sure all pids are exists before failover
1459 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1460 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1461 replay_barrier_nodf $SINGLEMDS
1462 fail_nodf $SINGLEMDS
1463 wait $open_pid || error "open_pid failed"
1465 # close should be gone
1466 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1468 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1469 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1470 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1471 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1472 rm -rf $DIR/${tdir}-*
1474 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1477 cancel_lru_locks mdc # cleanup locks from former test cases
1479 mkdir_on_mdt0 $DIR/${tdir}-1 || error "mkdir $DIR/${tdir}-1 failed"
1480 mkdir_on_mdt0 $DIR/${tdir}-2 || error "mkdir $DIR/${tdir}-2 failed"
1481 multiop $DIR/${tdir}-1/f O_c &
1484 #define OBD_FAIL_MDS_REINT_NET 0x107
1485 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1486 mcreate $DIR/${tdir}-2/f &
1490 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1491 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1492 kill -USR1 $close_pid
1493 cancel_lru_locks mdc # force the close
1496 #bz20647: make sure all pids are exists before failover
1497 [ -d /proc/$close_pid ] || error "close_pid doesn't exist"
1498 [ -d /proc/$open_pid ] || error "open_pid doesn't exists"
1499 replay_barrier_nodf $SINGLEMDS
1500 fail_nodf $SINGLEMDS
1501 wait $open_pid || error "open_pid failed"
1503 # close should be gone
1504 [ -d /proc/$close_pid ] && error "close_pid should not exist"
1505 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1507 $CHECKSTAT -t file $DIR/${tdir}-1/f ||
1508 error "$CHECKSTAT $DIR/${tdir}-1/f attribute check failed"
1509 $CHECKSTAT -t file $DIR/${tdir}-2/f ||
1510 error "$CHECKSTAT $DIR/${tdir}-2/f attribute check failed"
1511 rm -rf $DIR/${tdir}-*
1513 run_test 53h "open request and close reply while two MDC requests in flight"
1515 #b3761 ASSERTION(hash != 0) failed
1517 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1518 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1520 # give touch a chance to run
1522 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1526 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1528 #b3440 ASSERTION(rec->ur_fid2->id) failed
1530 ln -s foo $DIR/$tfile
1531 replay_barrier $SINGLEMDS
1532 #drop_reply "cat $DIR/$tfile"
1536 run_test 56 "don't replay a symlink open request (3440)"
1538 #recovery one mds-ost setattr from llog
1540 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1541 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1542 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1543 replay_barrier $SINGLEMDS
1545 wait_recovery_complete $SINGLEMDS || error "MDS recovery is not done"
1546 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
1547 $CHECKSTAT -t file $DIR/$tfile ||
1548 error "$CHECKSTAT $DIR/$tfile attribute check failed"
1549 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1552 run_test 57 "test recovery from llog for setattr op"
1555 zconf_umount $(hostname) $MOUNT2
1559 #recovery many mds-ost setattr from llog
1561 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1562 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1563 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1564 createmany -o $DIR/$tdir/$tfile-%d 2500
1565 replay_barrier $SINGLEMDS
1568 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null ||
1569 error "$CHECKSTAT $DIR/$tfile-* attribute check failed"
1570 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1571 unlinkmany $DIR/$tdir/$tfile-%d 2500 ||
1572 error "unlinkmany $DIR/$tfile failed"
1575 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1581 trap cleanup_58 EXIT
1583 large_xattr_enabled &&
1584 orig="$(generate_string $(max_xattr_size))" || orig="bar"
1585 # Original extended attribute can be long. Print a small version of
1586 # attribute if an error occurs
1587 local sm_msg=$(printf "%.9s" $orig)
1589 mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
1590 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1591 touch $DIR/$tdir/$tfile || error "touch $DIR/$tdir/$tfile failed"
1592 replay_barrier $SINGLEMDS
1593 setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile
1595 new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile)
1596 [[ "$new" = "$orig" ]] ||
1597 error "xattr set ($sm_msg...) differs from xattr get ($new)"
1598 rm -f $DIR/$tdir/$tfile
1601 wait_clients_import_state ${CLIENTS:-$HOSTNAME} "mgs" FULL
1603 run_test 58b "test replay of setxattr op"
1605 test_58c() { # bug 16570
1610 trap cleanup_58 EXIT
1612 if large_xattr_enabled; then
1613 local xattr_size=$(max_xattr_size)
1614 orig="$(generate_string $((xattr_size / 2)))"
1615 orig1="$(generate_string $xattr_size)"
1621 # PING_INTERVAL max(obd_timeout / 4, 1U)
1622 sleep $((TIMEOUT / 4))
1624 # Original extended attribute can be long. Print a small version of
1625 # attribute if an error occurs
1626 local sm_msg=$(printf "%.9s" $orig)
1627 local sm_msg1=$(printf "%.9s" $orig1)
1629 mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
1630 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1631 touch $DIR/$tdir/$tfile || error "touch $DIR/$tdir/$tfile failed"
1632 drop_request "setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile" ||
1633 error "drop_request for setfattr failed"
1634 new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile)
1635 [[ "$new" = "$orig" ]] ||
1636 error "xattr set ($sm_msg...) differs from xattr get ($new)"
1637 drop_reint_reply "setfattr -n trusted.foo1 \
1638 -v $orig1 $DIR/$tdir/$tfile" ||
1639 error "drop_reint_reply for setfattr failed"
1640 new=$(get_xattr_value trusted.foo1 $MOUNT2/$tdir/$tfile)
1641 [[ "$new" = "$orig1" ]] ||
1642 error "second xattr set ($sm_msg1...) differs xattr get ($new)"
1643 rm -f $DIR/$tdir/$tfile
1647 run_test 58c "resend/reconstruct setxattr op"
1649 # log_commit_thread vs filter_destroy race used to lead to import use after free
1652 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1654 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1655 createmany -o $DIR/$tdir/$tfile-%d 200 ||
1656 error "createmany create files failed"
1658 unlinkmany $DIR/$tdir/$tfile-%d 200 ||
1659 error "unlinkmany $DIR/$tdir/$tfile failed"
1660 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1661 do_facet ost1 "lctl set_param fail_loc=0x507"
1664 do_facet ost1 "lctl set_param fail_loc=0x0"
1668 run_test 59 "test log_commit_thread vs filter_destroy race"
1670 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1671 # bug 12086: should no oops and No ctxt error for this test
1673 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1674 createmany -o $DIR/$tdir/$tfile-%d 200 ||
1675 error "createmany create files failed"
1676 replay_barrier $SINGLEMDS
1677 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1679 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1680 local no_ctxt=$(dmesg | grep "No ctxt")
1681 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1683 run_test 60 "test llog post recovery init vs llog unlink"
1685 #test race llog recovery thread vs llog cleanup
1686 test_61a() { # was test_61
1687 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1689 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1690 createmany -o $DIR/$tdir/$tfile-%d 800 ||
1691 error "createmany create files failed"
1693 unlinkmany $DIR/$tdir/$tfile-%d 800
1694 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1695 set_nodes_failloc "$(osts_nodes)" 0x80000221
1700 set_nodes_failloc "$(osts_nodes)" 0x0
1702 $CHECKSTAT -t file $DIR/$tdir/$tfile-* &&
1703 error "$CHECKSTAT $DIR/$tdir/$tfile attribute check should fail"
1706 run_test 61a "test race llog recovery vs llog cleanup"
1708 #test race mds llog sync vs llog cleanup
1710 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
1711 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a"
1712 facet_failover $SINGLEMDS
1715 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 ||
1718 run_test 61b "test race mds llog sync vs llog cleanup"
1720 #test race cancel cookie cb vs llog cleanup
1722 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1724 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1725 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
1726 set_nodes_failloc "$(osts_nodes)" 0x80000222
1730 set_nodes_failloc "$(osts_nodes)" 0x0
1732 run_test 61c "test race mds llog sync vs llog cleanup"
1734 test_61d() { # bug 16002 # bug 17466 # bug 22137
1735 # OBD_FAIL_OBD_LLOG_SETUP 0x605
1737 do_facet mgs "lctl set_param fail_loc=0x80000605"
1738 start mgs $(mgsdevname) $MGS_MOUNT_OPTS &&
1739 error "mgs start should have failed"
1740 do_facet mgs "lctl set_param fail_loc=0"
1741 start mgs $(mgsdevname) $MGS_MOUNT_OPTS || error "cannot restart mgs"
1743 run_test 61d "error in llog_setup should cleanup the llog context correctly"
1745 test_62() { # Bug 15756 - don't mis-drop resent replay
1746 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
1747 replay_barrier $SINGLEMDS
1748 createmany -o $DIR/$tdir/$tfile- 25 ||
1749 error "createmany create files failed"
1750 #define OBD_FAIL_TGT_REPLAY_DROP 0x707
1751 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1753 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1754 unlinkmany $DIR/$tdir/$tfile- 25 ||
1755 error "unlinkmany $DIR/$tdir/$tfile failed"
1758 run_test 62 "don't mis-drop resent replay"
1760 #Adaptive Timeouts (bug 3055)
1768 echo "Cleaning up AT ..."
1769 if [ -n "$ATOLDBASE" ]; then
1770 local at_history=$($LCTL get_param -n at_history)
1771 do_facet $SINGLEMDS "lctl set_param at_history=$at_history" || true
1772 do_facet ost1 "lctl set_param at_history=$at_history" || true
1775 if [ $AT_MAX_SET -ne 0 ]; then
1776 for facet in mds client ost; do
1777 var=AT_MAX_SAVE_${facet}
1778 echo restore AT on $facet to saved value ${!var}
1779 at_max_set ${!var} $facet
1780 at_new=$(at_max_get $facet)
1781 echo Restored AT value on $facet $at_new
1782 [ $at_new -eq ${!var} ] ||
1783 error "AT value not restored SAVED ${!var} NEW $at_new"
1790 local at_max_new=600
1792 # Save at_max original values
1794 if [ $AT_MAX_SET -eq 0 ]; then
1795 # Suppose that all osts have the same at_max
1796 for facet in mds client ost; do
1797 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1801 for facet in mds client ost; do
1802 at_max=$(at_max_get $facet)
1803 if [ $at_max -ne $at_max_new ]; then
1804 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1805 at_max_set $at_max_new $facet
1810 if [ -z "$ATOLDBASE" ]; then
1811 ATOLDBASE=$(do_facet $SINGLEMDS "lctl get_param -n at_history")
1812 # speed up the timebase so we can check decreasing AT
1813 do_facet $SINGLEMDS "lctl set_param at_history=8" || true
1814 do_facet ost1 "lctl set_param at_history=8" || true
1816 # sleep for a while to cool down, should be > 8s and also allow
1817 # at least one ping to be sent. simply use TIMEOUT to be safe.
1822 test_65a() #bug 3055
1824 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1826 at_start || return 0
1827 $LCTL dk > /dev/null
1829 $LCTL set_param debug="other"
1830 # Slow down a request to the current service time, this is critical
1831 # because previous tests may have caused this value to increase.
1832 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1833 awk '/portal 12/ {print $5}'`
1834 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1836 do_facet $SINGLEMDS lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1837 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1838 do_facet $SINGLEMDS $LCTL set_param fail_loc=0x8000050a
1839 createmany -o $DIR/$tfile 10 > /dev/null
1840 unlinkmany $DIR/$tfile 10 > /dev/null
1841 # check for log message
1842 $LCTL dk | grep -i "Early reply #" || error "No early reply"
1844 # client should show REQ_DELAY estimates
1845 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1847 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1849 run_test 65a "AT: verify early replies"
1851 test_65b() #bug 3055
1853 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1855 at_start || return 0
1858 $LCTL set_param debug="other trace"
1859 $LCTL dk > /dev/null
1860 # Slow down a request to the current service time, this is critical
1861 # because previous tests may have caused this value to increase.
1862 $LFS setstripe --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
1863 error "$LFS setstripe failed for $DIR/$tfile"
1865 multiop $DIR/$tfile Ow1yc
1866 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1867 awk '/portal 6/ {print $5}'`
1868 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1870 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1871 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1872 do_facet ost1 $LCTL set_param fail_loc=0x224
1875 $LFS setstripe --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
1876 error "$LFS setstripe failed"
1877 # force some real bulk transfer
1878 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1880 do_facet ost1 $LCTL set_param fail_loc=0
1881 # check for log message
1882 $LCTL dk | grep -i "Early reply #" || error "No early reply"
1884 # client should show REQ_DELAY estimates
1885 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1887 run_test 65b "AT: verify early replies on packed reply / bulk"
1889 test_66a() #bug 3055
1891 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1893 at_start || return 0
1894 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1895 # adjust 5s at a time so no early reply is sent (within deadline)
1896 do_facet $SINGLEMDS "$LCTL set_param fail_val=5000"
1897 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1898 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a"
1899 createmany -o $DIR/$tfile 20 > /dev/null
1900 unlinkmany $DIR/$tfile 20 > /dev/null
1901 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1902 do_facet $SINGLEMDS "$LCTL set_param fail_val=10000"
1903 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a"
1904 createmany -o $DIR/$tfile 20 > /dev/null
1905 unlinkmany $DIR/$tfile 20 > /dev/null
1906 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1907 do_facet $SINGLEMDS "$LCTL set_param fail_loc=0"
1909 createmany -o $DIR/$tfile 20 > /dev/null
1910 unlinkmany $DIR/$tfile 20 > /dev/null
1911 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1912 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1913 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1914 echo "Current MDT timeout $CUR, worst $WORST"
1915 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1917 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1919 test_66b() #bug 3055
1921 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1923 at_start || return 0
1924 ORIG=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1925 awk '/network/ {print $4}')
1926 $LCTL set_param fail_val=$(($ORIG + 5))
1927 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1928 $LCTL set_param fail_loc=0x50c
1929 touch $DIR/$tfile > /dev/null 2>&1
1930 $LCTL set_param fail_loc=0
1931 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1932 awk '/network/ {print $4}')
1933 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
1934 awk '/network/ {print $6}')
1935 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1936 [ $WORST -gt $ORIG ] ||
1937 error "Worst $WORST should be worse than orig $ORIG"
1939 run_test 66b "AT: verify net latency adjusts"
1941 test_67a() #bug 3055
1943 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1945 at_start || return 0
1946 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1947 # sleeping threads may drive values above this
1948 do_facet ost1 "$LCTL set_param fail_val=400"
1949 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1950 do_facet ost1 "$LCTL set_param fail_loc=0x50a"
1951 createmany -o $DIR/$tfile 20 > /dev/null
1952 unlinkmany $DIR/$tfile 20 > /dev/null
1953 do_facet ost1 "$LCTL set_param fail_loc=0"
1954 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1955 ATTEMPTS=$(($CONN2 - $CONN1))
1956 echo "$ATTEMPTS osc reconnect attempts on gradual slow"
1957 [ $ATTEMPTS -gt 0 ] &&
1958 error_ignore bz13721 "AT should have prevented reconnect"
1961 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1963 test_67b() #bug 3055
1965 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1967 at_start || return 0
1968 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1970 # exhaust precreations on ost1
1971 local OST=$(ostname_from_index 0)
1972 local mdtosc=$(get_mdtosc_proc_path mds $OST)
1973 local last_id=$(do_facet $SINGLEMDS lctl get_param -n \
1974 osp.$mdtosc.prealloc_last_id)
1975 local next_id=$(do_facet $SINGLEMDS lctl get_param -n \
1976 osp.$mdtosc.prealloc_next_id)
1978 mkdir -p $DIR/$tdir/${OST} || error "mkdir $DIR/$tdir/${OST} failed"
1979 $LFS setstripe -i 0 -c 1 $DIR/$tdir/${OST} ||
1980 error "$LFS setstripe failed"
1981 echo "Creating to objid $last_id on ost $OST..."
1982 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1983 do_facet ost1 "$LCTL set_param fail_val=20000"
1984 do_facet ost1 "$LCTL set_param fail_loc=0x80000223"
1985 createmany -o $DIR/$tdir/${OST}/f $next_id $((last_id - next_id + 2))
1988 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1990 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1991 ATTEMPTS=$(($CONN2 - $CONN1))
1992 echo "$ATTEMPTS osc reconnect attempts on instant slow"
1993 # do it again; should not timeout
1994 do_facet ost1 "$LCTL set_param fail_loc=0x80000223"
1995 cp /etc/profile $DIR/$tfile || error "cp failed"
1996 do_facet ost1 "$LCTL set_param fail_loc=0"
1998 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1999 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
2000 ATTEMPTS=$(($CONN3 - $CONN2))
2001 echo "$ATTEMPTS osc reconnect attempts on 2nd slow"
2002 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
2005 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
2007 test_68 () #bug 13813
2009 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
2011 at_start || return 0
2012 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
2013 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
2014 local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min")
2015 [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0
2016 local ENQ_MIN=$(cat $ldlm_enqueue_min)
2017 local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r")
2018 echo $TIMEOUT >> $ldlm_enqueue_min
2019 do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
2021 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2022 $LFS setstripe --stripe-index=0 -c 1 $DIR/$tdir ||
2023 error "$LFS setstripe failed for $DIR/$tdir"
2024 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
2025 $LCTL set_param fail_val=$(($TIMEOUT - 1))
2026 $LCTL set_param fail_loc=0x80000312
2027 cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
2028 $LCTL set_param fail_val=$((TIMEOUT * 5 / 4))
2029 $LCTL set_param fail_loc=0x80000312
2030 cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
2031 $LCTL set_param fail_loc=0
2033 echo $ENQ_MIN >> $ldlm_enqueue_min
2034 do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
2038 run_test 68 "AT: verify slowing locks"
2041 # end of AT tests includes above lines
2043 # start multi-client tests
2045 [ $CLIENTCOUNT -lt 2 ] &&
2046 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
2048 echo "mount clients $CLIENTS ..."
2049 zconf_mount_clients $CLIENTS $MOUNT
2051 local clients=${CLIENTS//,/ }
2052 echo "Write/read files on $DIR ; clients $CLIENTS ... "
2053 for CLIENT in $clients; do
2054 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
2055 of=$DIR/${tfile}_${CLIENT} 2>/dev/null ||
2056 error "dd failed on $CLIENT"
2059 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
2060 for C in ${CLIENTS//,/ }; do
2061 do_node $prev_client dd if=$DIR/${tfile}_${C} \
2062 of=/dev/null 2>/dev/null ||
2063 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
2069 run_test 70a "check multi client t-f"
2071 check_for_process () {
2076 killall_process $clients "$prog" -0
2080 local clients=${CLIENTS:-$HOSTNAME}
2082 zconf_mount_clients $clients $MOUNT
2085 [ "$SLOW" = "no" ] && duration=120
2086 # set duration to 900 because it takes some time to boot node
2087 [ "$FAILURE_MODE" = HARD ] && duration=900
2090 local start_ts=$(date +%s)
2091 local cmd="rundbench 1 -t $duration"
2093 if [ $MDSCOUNT -ge 2 ]; then
2094 test_mkdir -p -c$MDSCOUNT $DIR/$tdir
2095 $LFS setdirstripe -D -c$MDSCOUNT $DIR/$tdir
2097 do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
2098 PATH=\$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
2099 DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
2100 MOUNT=$MOUNT DIR=$DIR/$tdir/\\\$(hostname) LCTL=$LCTL $cmd" &
2103 #LU-1897 wait for all dbench copies to start
2104 while ! check_for_process $clients dbench; do
2105 elapsed=$(($(date +%s) - start_ts))
2106 if [ $elapsed -gt $duration ]; then
2107 killall_process $clients dbench
2108 error "dbench failed to start on $clients!"
2113 log "Started rundbench load pid=$pid ..."
2115 elapsed=$(($(date +%s) - start_ts))
2116 local num_failovers=0
2118 while [ $elapsed -lt $duration ]; do
2119 if ! check_for_process $clients dbench; then
2120 error_noexit "dbench stopped on some of $clients!"
2121 killall_process $clients dbench
2125 replay_barrier mds$fail_index
2126 sleep 1 # give clients a time to do operations
2127 # Increment the number of failovers
2128 num_failovers=$((num_failovers+1))
2129 log "$TESTNAME fail mds$fail_index $num_failovers times"
2131 elapsed=$(($(date +%s) - start_ts))
2132 if [ $fail_index -ge $MDSCOUNT ]; then
2135 fail_index=$((fail_index+1))
2139 wait $pid || error "rundbench load on $clients failed!"
2141 run_test 70b "dbench ${MDSCOUNT}mdts recovery; $CLIENTCOUNT clients"
2142 # end multi-client tests
2147 local monitor_pid=$3
2149 local start_ts=$(date +%s)
2150 local num_failovers=0
2153 elapsed=$(($(date +%s) - start_ts))
2154 while [ $elapsed -lt $duration ]; do
2155 fail_index=$((RANDOM%max_index+1))
2156 kill -0 $monitor_pid ||
2157 error "$monitor_pid stopped"
2159 replay_barrier mds$fail_index
2161 # Increment the number of failovers
2162 num_failovers=$((num_failovers+1))
2163 log "$TESTNAME fail mds$fail_index $num_failovers times"
2165 elapsed=$(($(date +%s) - start_ts))
2171 rm -f $DIR/replay-single.70c.lck
2176 local clients=${CLIENTS:-$HOSTNAME}
2179 zconf_mount_clients $clients $MOUNT
2182 [ "$SLOW" = "no" ] && duration=180
2183 # set duration to 900 because it takes some time to boot node
2184 [ "$FAILURE_MODE" = HARD ] && duration=600
2187 local start_ts=$(date +%s)
2189 trap cleanup_70c EXIT
2191 while [ ! -e $DIR/replay-single.70c.lck ]; do
2192 test_mkdir -p -c$MDSCOUNT $DIR/$tdir || break
2193 if [ $MDSCOUNT -ge 2 ]; then
2194 $LFS setdirstripe -D -c$MDSCOUNT $DIR/$tdir ||
2195 error "set default dirstripe failed"
2197 cd $DIR/$tdir || break
2198 tar cf - /etc | tar xf - || error "tar failed in loop"
2202 echo "Started tar $tar_70c_pid"
2204 random_fail_mdt $MDSCOUNT $duration $tar_70c_pid
2205 kill -0 $tar_70c_pid || error "tar $tar_70c_pid stopped"
2207 touch $DIR/replay-single.70c.lck
2208 wait $tar_70c_pid || error "$?: tar failed"
2213 run_test 70c "tar ${MDSCOUNT}mdts recovery"
2217 kill -9 $mkdir_70d_pid
2221 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2222 local clients=${CLIENTS:-$HOSTNAME}
2225 zconf_mount_clients $clients $MOUNT
2228 [ "$SLOW" = "no" ] && duration=180
2229 # set duration to 900 because it takes some time to boot node
2230 [ "$FAILURE_MODE" = HARD ] && duration=900
2235 local start_ts=$(date +%s)
2237 trap cleanup_70d EXIT
2240 $LFS mkdir -i0 -c2 $DIR/$tdir/test || {
2244 $LFS mkdir -i1 -c2 $DIR/$tdir/test1 || {
2249 touch $DIR/$tdir/test/a || {
2253 mkdir $DIR/$tdir/test/b || {
2257 rm -rf $DIR/$tdir/test || {
2263 touch $DIR/$tdir/test1/a || {
2267 mkdir $DIR/$tdir/test1/b || {
2272 rm -rf $DIR/$tdir/test1 || {
2274 ls -lR $DIR/$tdir/test1
2280 echo "Started $mkdir_70d_pid"
2282 random_fail_mdt $MDSCOUNT $duration $mkdir_70d_pid
2283 kill -0 $mkdir_70d_pid || error "mkdir/rmdir $mkdir_70d_pid stopped"
2288 run_test 70d "mkdir/rmdir striped dir ${MDSCOUNT}mdts recovery"
2291 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2292 local clients=${CLIENTS:-$HOSTNAME}
2295 lctl set_param debug=+ha
2296 zconf_mount_clients $clients $MOUNT
2299 [ "$SLOW" = "no" ] && duration=180
2300 # set duration to 900 because it takes some time to boot node
2301 [ "$FAILURE_MODE" = HARD ] && duration=900
2304 $LFS mkdir -i0 $DIR/$tdir/test_0
2305 $LFS mkdir -i0 $DIR/$tdir/test_1
2306 touch $DIR/$tdir/test_0/a
2307 touch $DIR/$tdir/test_1/b
2310 mrename $DIR/$tdir/test_0/a $DIR/$tdir/test_1/b > /dev/null || {
2315 checkstat $DIR/$tdir/test_0/a && {
2316 echo "a still exists"
2320 checkstat $DIR/$tdir/test_1/b || {
2321 echo "b still exists"
2325 touch $DIR/$tdir/test_0/a || {
2326 echo "touch a fails"
2330 mrename $DIR/$tdir/test_1/b $DIR/$tdir/test_0/a > /dev/null || {
2337 stack_trap "kill -9 $rename_70e_pid" EXIT
2338 echo "Started PID=$rename_70e_pid"
2340 random_fail_mdt 2 $duration $rename_70e_pid
2341 kill -0 $rename_70e_pid || error "rename $rename_70e_pid stopped"
2343 run_test 70e "rename cross-MDT with random fails"
2345 test_70f_write_and_read(){
2350 echo "Write/read files in: '$DIR/$tdir', clients: '$CLIENTS' ..."
2351 for client in ${CLIENTS//,/ }; do
2352 [ -f $stopflag ] || return
2354 local tgtfile=$DIR/$tdir/$tfile.$client
2355 do_node $client dd $DD_OPTS bs=1M count=10 if=$srcfile \
2356 of=$tgtfile 2>/dev/null ||
2357 error "dd $DD_OPTS bs=1M count=10 if=$srcfile " \
2358 "of=$tgtfile failed on $client, rc=$?"
2361 local prev_client=$(echo ${CLIENTS//,/ } | awk '{ print $NF }')
2364 for client in ${CLIENTS//,/ }; do
2365 [ -f $stopflag ] || return
2367 # flush client cache in case test is running on only one client
2368 # do_node $client cancel_lru_locks osc
2369 do_node $client $LCTL set_param ldlm.namespaces.*.lru_size=clear
2371 tgtfile=$DIR/$tdir/$tfile.$client
2372 local md5=$(do_node $prev_client "md5sum $tgtfile")
2373 [ ${checksum[$index]// */} = ${md5// */} ] ||
2374 error "$tgtfile: checksum doesn't match on $prev_client"
2375 index=$((index + 1))
2385 mkdir -p $DIR/$tdir || error "cannot create $DIR/$tdir directory"
2386 $LFS setstripe -c -1 $DIR/$tdir ||
2387 error "cannot $LFS setstripe $DIR/$tdir"
2390 while [ -f $stopflag ]; do
2391 test_70f_write_and_read $srcfile $stopflag
2392 # use direct IO and buffer cache in turns if loop
2393 [ -n "$DD_OPTS" ] && DD_OPTS="" || DD_OPTS="oflag=direct"
2397 test_70f_cleanup() {
2399 rm -f $TMP/$tfile.stop
2400 do_nodes $CLIENTS rm -f $TMP/$tfile
2401 rm -f $DIR/$tdir/$tfile.*
2405 # [ x$ost1failover_HOST = x$ost_HOST ] &&
2406 # { skip "Failover host not defined" && return; }
2407 # [ $CLIENTCOUNT -lt 2 ] &&
2408 # { skip "Need 2 or more clients, have $CLIENTCOUNT" && return; }
2410 [[ "$OST1_VERSION" -lt $(version_code 2.9.53) ]] &&
2411 skip "Need server version at least 2.9.53"
2413 echo "mount clients $CLIENTS ..."
2414 zconf_mount_clients $CLIENTS $MOUNT
2416 local srcfile=$TMP/$tfile
2420 trap test_70f_cleanup EXIT
2421 # create a different source file local to each client node so we can
2422 # detect if the file wasn't written out properly after failover
2423 do_nodes $CLIENTS dd bs=1M count=10 if=/dev/urandom of=$srcfile \
2424 2>/dev/null || error "can't create $srcfile on $CLIENTS"
2425 for client in ${CLIENTS//,/ }; do
2426 checksum[$index]=$(do_node $client "md5sum $srcfile")
2427 index=$((index + 1))
2431 [ "$SLOW" = "no" ] && duration=60
2432 # set duration to 900 because it takes some time to boot node
2433 [ "$FAILURE_MODE" = HARD ] && duration=900
2435 local stopflag=$TMP/$tfile.stop
2436 test_70f_loop $srcfile $stopflag &
2440 local num_failovers=0
2441 local start_ts=$SECONDS
2442 while [ $elapsed -lt $duration ]; do
2446 num_failovers=$((num_failovers + 1))
2447 log "$TESTNAME failing OST $num_failovers times"
2450 elapsed=$((SECONDS - start_ts))
2457 run_test 70f "OSS O_DIRECT recovery with $CLIENTCOUNT clients"
2461 kill -9 $mkdir_71a_pid
2464 random_double_fail_mdt() {
2467 local monitor_pid=$3
2469 local start_ts=$(date +%s)
2470 local num_failovers=0
2474 elapsed=$(($(date +%s) - start_ts))
2475 while [ $elapsed -lt $duration ]; do
2476 fail_index=$((RANDOM%max_index + 1))
2477 if [ $fail_index -eq $max_index ]; then
2480 second_index=$((fail_index + 1))
2482 kill -0 $monitor_pid ||
2483 error "$monitor_pid stopped"
2485 replay_barrier mds$fail_index
2486 replay_barrier mds$second_index
2488 # Increment the number of failovers
2489 num_failovers=$((num_failovers+1))
2490 log "fail mds$fail_index mds$second_index $num_failovers times"
2491 fail mds${fail_index},mds${second_index}
2492 elapsed=$(($(date +%s) - start_ts))
2497 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2498 local clients=${CLIENTS:-$HOSTNAME}
2501 zconf_mount_clients $clients $MOUNT
2504 [ "$SLOW" = "no" ] && duration=180
2505 # set duration to 900 because it takes some time to boot node
2506 [ "$FAILURE_MODE" = HARD ] && duration=900
2508 mkdir_on_mdt0 $DIR/$tdir
2511 local start_ts=$(date +%s)
2513 trap cleanup_71a EXIT
2516 $LFS mkdir -i0 -c2 $DIR/$tdir/test
2517 rmdir $DIR/$tdir/test
2521 echo "Started $mkdir_71a_pid"
2523 random_double_fail_mdt 2 $duration $mkdir_71a_pid
2524 kill -0 $mkdir_71a_pid || error "mkdir/rmdir $mkdir_71a_pid stopped"
2529 run_test 71a "mkdir/rmdir striped dir with 2 mdts recovery"
2532 multiop_bg_pause $DIR/$tfile O_tSc ||
2533 error "multiop_bg_pause $DIR/$tfile failed"
2537 replay_barrier $SINGLEMDS
2538 #define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
2539 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
2542 wait $pid || error "multiop pid failed"
2543 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
2546 run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay, close"
2549 multiop_bg_pause $DIR/$tfile O_tSc ||
2550 error "multiop_bg_pause $DIR/$tfile failed"
2554 replay_barrier $SINGLEMDS
2555 #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
2556 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000157"
2559 wait $pid || error "multiop pid failed"
2560 [ -e $DIR/$tfile ] && error "file $DIR/$tfile should not exist"
2563 run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close"
2567 local clients=${CLIENTS:-$HOSTNAME}
2569 zconf_umount_clients $clients $MOUNT
2571 facet_failover $SINGLEMDS
2572 zconf_mount_clients $clients $MOUNT
2574 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
2575 rm $DIR/$tfile || error "rm $DIR/$tfile failed"
2576 clients_up || error "client evicted: $?"
2579 run_test 74 "Ensure applications don't fail waiting for OST recovery"
2581 remote_dir_check_80() {
2586 diridx=$($LFS getstripe -m $remote_dir) ||
2587 error "$LFS getstripe -m $remote_dir failed"
2588 [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
2590 createmany -o $remote_dir/f-%d 20 || error "creation failed"
2591 fileidx=$($LFS getstripe -m $remote_dir/f-1) ||
2592 error "$LFS getstripe -m $remote_dir/f-1 failed"
2593 [ $fileidx -eq $mdtidx ] || error "$fileidx != $mdtidx"
2599 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2600 ([ $FAILURE_MODE == "HARD" ] &&
2601 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2602 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2606 local remote_dir=$DIR/$tdir/remote_dir
2608 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2609 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2610 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2611 $LFS mkdir -i $MDTIDX $remote_dir &
2617 wait $CLIENT_PID || error "remote creation failed"
2619 remote_dir_check_80 || error "remote dir check failed"
2620 rm -rf $DIR/$tdir || error "rmdir failed"
2624 run_test 80a "DNE: create remote dir, drop update rep from MDT0, fail MDT0"
2627 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2628 ([ $FAILURE_MODE == "HARD" ] &&
2629 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2630 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2634 local remote_dir=$DIR/$tdir/remote_dir
2636 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2637 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2638 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2639 $LFS mkdir -i $MDTIDX $remote_dir &
2644 fail mds$((MDTIDX + 1))
2646 wait $CLIENT_PID || error "remote creation failed"
2648 remote_dir_check_80 || error "remote dir check failed"
2649 rm -rf $DIR/$tdir || error "rmdir failed"
2653 run_test 80b "DNE: create remote dir, drop update rep from MDT0, fail MDT1"
2656 [[ "$mds1_FSTYPE" = zfs ]] &&
2657 [[ $MDS1_VERSION -lt $(version_code 2.12.51) ]] &&
2658 skip "requires LU-10143 fix on MDS"
2659 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
2660 ([ $FAILURE_MODE == "HARD" ] &&
2661 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2662 skip "MDTs needs to be on diff hosts for HARD fail mode"
2665 local remote_dir=$DIR/$tdir/remote_dir
2667 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2668 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2669 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2670 $LFS mkdir -i $MDTIDX $remote_dir &
2676 fail mds$((MDTIDX + 1))
2678 wait $CLIENT_PID || error "remote creation failed"
2680 remote_dir_check_80 || error "remote dir check failed"
2681 rm -rf $DIR/$tdir || error "rmdir failed"
2685 run_test 80c "DNE: create remote dir, drop update rep from MDT1, fail MDT[0,1]"
2688 [[ "$mds1_FSTYPE" = zfs ]] &&
2689 [[ $MDS1_VERSION -lt $(version_code 2.12.51) ]] &&
2690 skip "requires LU-10143 fix on MDS"
2691 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
2693 local remote_dir=$DIR/$tdir/remote_dir
2695 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2696 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2697 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2698 $LFS mkdir -i $MDTIDX $remote_dir &
2701 # sleep 3 seconds to make sure MDTs are failed after
2702 # lfs mkdir -i has finished on all of MDTs.
2707 fail mds${MDTIDX},mds$((MDTIDX + 1))
2709 wait $CLIENT_PID || error "remote creation failed"
2711 remote_dir_check_80 || error "remote dir check failed"
2712 rm -rf $DIR/$tdir || error "rmdir failed"
2716 run_test 80d "DNE: create remote dir, drop update rep from MDT1, fail 2 MDTs"
2719 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2720 ([ $FAILURE_MODE == "HARD" ] &&
2721 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2722 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2726 local remote_dir=$DIR/$tdir/remote_dir
2728 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2729 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2730 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2731 $LFS mkdir -i $MDTIDX $remote_dir &
2734 # sleep 3 seconds to make sure MDTs are failed after
2735 # lfs mkdir -i has finished on all of MDTs.
2741 wait $CLIENT_PID || error "remote creation failed"
2743 remote_dir_check_80 || error "remote dir check failed"
2744 rm -rf $DIR/$tdir || error "rmdir failed"
2748 run_test 80e "DNE: create remote dir, drop MDT1 rep, fail MDT0"
2751 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2752 ([ $FAILURE_MODE == "HARD" ] &&
2753 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2754 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2757 local remote_dir=$DIR/$tdir/remote_dir
2759 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2760 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2761 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2762 $LFS mkdir -i $MDTIDX $remote_dir &
2766 fail mds$((MDTIDX + 1))
2768 wait $CLIENT_PID || error "remote creation failed"
2770 remote_dir_check_80 || error "remote dir check failed"
2771 rm -rf $DIR/$tdir || error "rmdir failed"
2775 run_test 80f "DNE: create remote dir, drop MDT1 rep, fail MDT1"
2778 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2779 ([ $FAILURE_MODE == "HARD" ] &&
2780 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2781 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2785 local remote_dir=$DIR/$tdir/remote_dir
2787 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2788 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2789 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2790 $LFS mkdir -i $MDTIDX $remote_dir &
2793 # sleep 3 seconds to make sure MDTs are failed after
2794 # lfs mkdir -i has finished on all of MDTs.
2800 fail mds$((MDTIDX + 1))
2802 wait $CLIENT_PID || error "remote creation failed"
2804 remote_dir_check_80 || error "remote dir check failed"
2805 rm -rf $DIR/$tdir || error "rmdir failed"
2809 run_test 80g "DNE: create remote dir, drop MDT1 rep, fail MDT0, then MDT1"
2812 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2814 local remote_dir=$DIR/$tdir/remote_dir
2816 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2817 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2818 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2819 $LFS mkdir -i $MDTIDX $remote_dir &
2822 # sleep 3 seconds to make sure MDTs are failed after
2823 # lfs mkdir -i has finished on all of MDTs.
2828 fail mds${MDTIDX},mds$((MDTIDX + 1))
2830 wait $CLIENT_PID || error "remote dir creation failed"
2832 remote_dir_check_80 || error "remote dir check failed"
2833 rm -rf $DIR/$tdir || error "rmdir failed"
2837 run_test 80h "DNE: create remote dir, drop MDT1 rep, fail 2 MDTs"
2840 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2841 ([ $FAILURE_MODE == "HARD" ] &&
2842 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2843 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2847 local remote_dir=$DIR/$tdir/remote_dir
2849 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2850 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2852 touch $remote_dir || error "touch $remote_dir failed"
2853 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2854 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2859 fail mds$((MDTIDX + 1))
2861 wait $CLIENT_PID || error "rm remote dir failed"
2863 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2865 rm -rf $DIR/$tdir || error "rmdir failed"
2869 run_test 81a "DNE: unlink remote dir, drop MDT0 update rep, fail MDT1"
2872 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2873 ([ $FAILURE_MODE == "HARD" ] &&
2874 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2875 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2878 local remote_dir=$DIR/$tdir/remote_dir
2880 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2881 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2883 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2884 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2891 wait $CLIENT_PID || error "rm remote dir failed"
2893 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2895 rm -rf $DIR/$tdir || error "rmdir failed"
2899 run_test 81b "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0"
2902 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2903 ([ $FAILURE_MODE == "HARD" ] &&
2904 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2905 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2909 local remote_dir=$DIR/$tdir/remote_dir
2911 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2912 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2914 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2915 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2922 fail mds$((MDTIDX + 1))
2924 wait $CLIENT_PID || error "rm remote dir failed"
2926 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2928 rm -rf $DIR/$tdir || error "rmdir failed"
2932 run_test 81c "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0,MDT1"
2935 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2937 local remote_dir=$DIR/$tdir/remote_dir
2939 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2940 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2942 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
2943 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
2949 fail mds${MDTIDX},mds$((MDTIDX + 1))
2951 wait $CLIENT_PID || error "rm remote dir failed"
2953 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2955 rm -rf $DIR/$tdir || error "rmdir failed"
2959 run_test 81d "DNE: unlink remote dir, drop MDT0 update reply, fail 2 MDTs"
2962 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2963 ([ $FAILURE_MODE == "HARD" ] &&
2964 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2965 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
2969 local remote_dir=$DIR/$tdir/remote_dir
2971 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
2972 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
2974 # OBD_FAIL_MDS_REINT_NET_REP 0x119
2975 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
2978 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
2983 wait $CLIENT_PID || error "rm remote dir failed"
2985 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
2987 rm -rf $DIR/$tdir || error "rmdir failed"
2991 run_test 81e "DNE: unlink remote dir, drop MDT1 req reply, fail MDT0"
2994 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
2995 ([ $FAILURE_MODE == "HARD" ] &&
2996 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
2997 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3001 local remote_dir=$DIR/$tdir/remote_dir
3003 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3004 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
3006 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3007 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3012 fail mds$((MDTIDX + 1))
3014 wait $CLIENT_PID || error "rm remote dir failed"
3016 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3018 rm -rf $DIR/$tdir || error "rmdir failed"
3022 run_test 81f "DNE: unlink remote dir, drop MDT1 req reply, fail MDT1"
3025 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3026 ([ $FAILURE_MODE == "HARD" ] &&
3027 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3028 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3032 local remote_dir=$DIR/$tdir/remote_dir
3034 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3035 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
3037 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3038 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3045 fail mds$((MDTIDX + 1))
3047 wait $CLIENT_PID || error "rm remote dir failed"
3049 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3051 rm -rf $DIR/$tdir || error "rmdir failed"
3055 run_test 81g "DNE: unlink remote dir, drop req reply, fail M0, then M1"
3058 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3060 local remote_dir=$DIR/$tdir/remote_dir
3062 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3063 $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
3065 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3066 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
3072 fail mds${MDTIDX},mds$((MDTIDX + 1))
3074 wait $CLIENT_PID || error "rm remote dir failed"
3076 stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
3078 rm -rf $DIR/$tdir || error "rmdir failed"
3082 run_test 81h "DNE: unlink remote dir, drop request reply, fail 2 MDTs"
3085 #define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
3086 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000144"
3087 createmany -o $DIR/$tfile- 1 &
3091 client_up || client_up || true # reconnect
3093 run_test 84a "stale open during export disconnect"
3095 test_85a() { #bug 16774
3096 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
3098 for i in $(seq 100); do
3099 echo "tag-$i" > $DIR/$tfile-$i
3100 grep -q "tag-$i" $DIR/$tfile-$i || error "f2-$i"
3103 lov_id=$(lctl dl | grep "clilov")
3104 addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
3105 count=$(lctl get_param -n \
3106 ldlm.namespaces.*MDT0000*$addr.lock_unused_count)
3107 echo "before recovery: unused locks count = $count"
3111 count2=$(lctl get_param -n \
3112 ldlm.namespaces.*MDT0000*$addr.lock_unused_count)
3113 echo "after recovery: unused locks count = $count2"
3115 if [ $count2 -ge $count ]; then
3116 error "unused locks are not canceled"
3119 run_test 85a "check the cancellation of unused locks during recovery(IBITS)"
3121 test_85b() { #bug 16774
3125 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
3127 $LFS setstripe -c 1 -i 0 $DIR/$tdir
3129 for i in $(seq 100); do
3130 dd if=/dev/urandom of=$DIR/$tdir/$tfile-$i bs=4096 \
3131 count=32 >/dev/null 2>&1
3134 cancel_lru_locks osc
3136 for i in $(seq 100); do
3137 dd if=$DIR/$tdir/$tfile-$i of=/dev/null bs=4096 \
3138 count=32 >/dev/null 2>&1
3141 lov_id=$(lctl dl | grep "clilov")
3142 addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
3143 count=$(lctl get_param -n \
3144 ldlm.namespaces.*OST0000*$addr.lock_unused_count)
3145 echo "before recovery: unused locks count = $count"
3146 [ $count -ne 0 ] || error "unused locks ($count) should be zero"
3150 count2=$(lctl get_param \
3151 -n ldlm.namespaces.*OST0000*$addr.lock_unused_count)
3152 echo "after recovery: unused locks count = $count2"
3154 if [ $count2 -ge $count ]; then
3155 error "unused locks are not canceled"
3160 run_test 85b "check the cancellation of unused locks during recovery(EXTENT)"
3163 local clients=${CLIENTS:-$HOSTNAME}
3165 zconf_umount_clients $clients $MOUNT
3166 do_facet $SINGLEMDS lctl set_param mdt.${FSNAME}-MDT*.exports.clear=0
3167 remount_facet $SINGLEMDS
3168 zconf_mount_clients $clients $MOUNT
3170 run_test 86 "umount server after clear nid_stats should not hit LBUG"
3173 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
3176 $LFS setstripe -i 0 -c 1 $DIR/$tfile
3177 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
3178 error "dd to $DIR/$tfile failed"
3179 cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
3180 cancel_lru_locks osc
3182 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
3183 cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
3184 if [ $cksum != $cksum2 ] ; then
3185 error "New checksum $cksum2 does not match original $cksum"
3188 run_test 87a "write replay"
3191 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
3194 $LFS setstripe -i 0 -c 1 $DIR/$tfile
3195 dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
3196 error "dd to $DIR/$tfile failed"
3197 sleep 1 # Give it a chance to flush dirty data
3198 echo TESTTEST | dd of=$DIR/$tfile bs=1 count=8 seek=64
3199 cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
3200 cancel_lru_locks osc
3202 dd if=$DIR/$tfile of=/dev/null bs=1024k count=8 || error "Cannot read"
3203 cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
3204 if [ $cksum != $cksum2 ] ; then
3205 error "New checksum $cksum2 does not match original $cksum"
3208 run_test 87b "write replay with changed data (checksum resend)"
3210 test_88() { #bug 17485
3211 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3212 mkdir -p $TMP/$tdir || error "mkdir $TMP/$tdir failed"
3214 $LFS setstripe -i 0 -c 1 $DIR/$tdir || error "$LFS setstripe failed"
3217 replay_barrier $SINGLEMDS
3219 # exhaust precreations on ost1
3220 local OST=$(ostname_from_index 0)
3221 local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $OST)
3222 local last_id=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_last_id)
3223 local next_id=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_next_id)
3224 echo "before test: last_id = $last_id, next_id = $next_id"
3226 echo "Creating to objid $last_id on ost $OST..."
3227 createmany -o $DIR/$tdir/f-%d $next_id $((last_id - next_id + 2)) ||
3228 error "createmany create files to last_id failed"
3230 #create some files to use some uncommitted objids
3231 last_id=$(($last_id + 1))
3232 createmany -o $DIR/$tdir/f-%d $last_id 8 ||
3233 error "createmany create files with uncommitted objids failed"
3235 last_id2=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_last_id)
3236 next_id2=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_next_id)
3237 echo "before recovery: last_id = $last_id2, next_id = $next_id2"
3239 # if test uses shutdown_facet && reboot_facet instead of facet_failover ()
3240 # it has to take care about the affected facets, bug20407
3241 local affected_mds1=$(affected_facets mds1)
3242 local affected_ost1=$(affected_facets ost1)
3244 shutdown_facet $SINGLEMDS
3247 reboot_facet $SINGLEMDS
3248 change_active $affected_mds1
3249 wait_for_facet $affected_mds1
3250 mount_facets $affected_mds1 || error "Restart of mds failed"
3253 change_active $affected_ost1
3254 wait_for_facet $affected_ost1
3255 mount_facets $affected_ost1 || error "Restart of ost1 failed"
3259 last_id2=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_last_id)
3260 next_id2=$(do_facet $SINGLEMDS lctl get_param -n osp.$mdtosc.prealloc_next_id)
3261 echo "after recovery: last_id = $last_id2, next_id = $next_id2"
3263 # create new files, which should use new objids, and ensure the orphan
3264 # cleanup phase for ost1 is completed at the same time
3265 for i in $(seq 8); do
3266 file_id=$(($last_id + 10 + $i))
3267 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
3270 # if the objids were not recreated, then "ls" will fail with -ENOENT
3271 ls -l $DIR/$tdir/* || error "can't get the status of precreated files"
3274 # write into previously created files
3275 for i in $(seq 8); do
3276 file_id=$(($last_id + $i))
3277 dd if=/dev/urandom of=$DIR/$tdir/f-$file_id bs=4096 count=128
3278 cp -f $DIR/$tdir/f-$file_id $TMP/$tdir/
3281 # compare the content
3282 for i in $(seq 8); do
3283 file_id=$(($last_id + $i))
3284 cmp $TMP/$tdir/f-$file_id $DIR/$tdir/f-$file_id ||
3285 error "the content of file is modified!"
3290 run_test 88 "MDS should not assign same objid to different files "
3292 function calc_osc_kbytes_used() {
3293 local kbtotal=$(calc_osc_kbytes kbytestotal)
3294 local kbfree=$(calc_osc_kbytes kbytesfree)
3295 echo $((kbtotal-kbfree))
3299 cancel_lru_locks osc
3300 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3301 rm -f $DIR/$tdir/$tfile
3302 wait_mds_ost_sync || error "initial MDS-OST sync timed out"
3303 wait_delete_completed || error "initial wait delete timed out"
3304 local blocks1=$(calc_osc_kbytes_used)
3305 local write_size=$(fs_log_size)
3307 $LFS setstripe -i 0 -c 1 $DIR/$tdir/$tfile
3308 [ $write_size -lt 1024 ] && write_size=1024
3309 dd if=/dev/zero bs=${write_size}k count=10 of=$DIR/$tdir/$tfile
3312 facet_failover $SINGLEMDS
3313 rm $DIR/$tdir/$tfile
3316 zconf_mount $(hostname) $MOUNT || error "mount fails"
3317 client_up || error "client_up failed"
3319 # wait for the remounted client to connect to ost1
3320 local target=$(get_osc_import_name client ost1)
3321 wait_import_state "FULL" "osc.${target}.ost_server_uuid" \
3322 $(max_recovery_time)
3324 wait_mds_ost_sync || error "MDS-OST sync timed out"
3325 wait_delete_completed || error "wait delete timed out"
3326 local blocks2=$(calc_osc_kbytes_used)
3328 [ $((blocks2 - blocks1)) -le $(fs_log_size) ] ||
3329 error $((blocks2 - blocks1)) blocks leaked
3331 run_test 89 "no disk space leak on late ost connection"
3338 change_active $facet
3339 wait_for_facet $facet
3340 mount_facet $facet || error "Restart of $facet failed"
3344 test_90() { # bug 19494
3345 local dir=$DIR/$tdir
3346 local ostfail=$(get_random_entry $(get_facets OST))
3348 if [[ $FAILURE_MODE = HARD ]]; then
3349 local affected=$(affected_facets $ostfail);
3351 [[ "$affected" == $ostfail ]] ||
3352 skip "cannot use FAILURE_MODE=$FAILURE_MODE, affected: $affected"
3354 # ensure all OSTs are active to allow allocations
3357 mkdir $dir || error "mkdir $dir failed"
3359 echo "Create the files"
3361 # file "f${index}" striped over 1 OST
3362 # file "all" striped over all OSTs
3364 $LFS setstripe -c $OSTCOUNT $dir/all ||
3365 error "setstripe failed to create $dir/all"
3367 for ((i = 0; i < $OSTCOUNT; i++)); do
3370 $LFS setstripe -i $i -c 1 $f ||
3371 error "$LFS setstripe failed to create $f"
3373 # confirm setstripe actually created stripe on requested OST
3374 local uuid=$(ostuuid_from_index $i)
3376 for file in f$i all; do
3377 local found=$($LFS find --obd $uuid --name $file $dir)
3379 if [[ $dir/$file != $found ]]; then
3380 $LFS getstripe $dir/$file
3381 error "wrong stripe: $file, uuid: $uuid"
3386 # Before failing an OST, get its obd name and index
3387 local varsvc=${ostfail}_svc
3388 local obd=$(do_facet $ostfail lctl get_param \
3389 -n obdfilter.${!varsvc}.uuid)
3390 local index=$(($(facet_number $ostfail) - 1))
3392 echo "Fail $ostfail $obd, display the list of affected files"
3393 shutdown_facet $ostfail || error "shutdown_facet $ostfail failed"
3395 trap "cleanup_90 $ostfail" EXIT INT
3396 echo "General Query: lfs find $dir"
3397 local list=$($LFS find $dir)
3399 for (( i=0; i<$OSTCOUNT; i++ )); do
3400 list_member "$list" $dir/f$i ||
3401 error_noexit "lfs find $dir: no file f$i"
3403 list_member "$list" $dir/all ||
3404 error_noexit "lfs find $dir: no file all"
3406 # focus on the missing OST,
3407 # we expect to see only two files affected: "f$(index)" and "all"
3409 echo "Querying files on shutdown $ostfail: lfs find --obd $obd"
3410 list=$($LFS find --obd $obd $dir)
3412 for file in all f$index; do
3413 list_member "$list" $dir/$file ||
3414 error_noexit "lfs find does not report the affected $obd for $file"
3417 [[ $(echo $list | wc -w) -eq 2 ]] ||
3418 error_noexit "lfs find reports the wrong list of affected files ${#list[@]}"
3420 echo "Check getstripe: $LFS getstripe -r --obd $obd"
3421 list=$($LFS getstripe -r --obd $obd $dir)
3423 for file in all f$index; do
3424 echo "$list" | grep $dir/$file ||
3425 error_noexit "lfs getsripe does not report the affected $obd for $file"
3430 run_test 90 "lfs find identifies the missing striped file segments"
3433 [[ "$MDS1_VERSION" -ge $(version_code 2.6.90) ]] ||
3434 [[ "$MDS1_VERSION" -ge $(version_code 2.5.4) &&
3435 "$MDS1_VERSION" -lt $(version_code 2.5.50) ]] ||
3436 skip "Need MDS version 2.5.4+ or 2.6.90+"
3438 cancel_lru_locks osc
3440 $LFS setstripe -i 0 -c 1 $DIR/$tfile ||
3441 error "$LFS setstripe $DIR/$tfile failed"
3442 dd if=/dev/zero of=$DIR/$tfile bs=1024 count=1 ||
3443 error "dd to $DIR/$tfile failed"
3444 #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
3445 # We need to emulate a state that OST is waiting for other clients
3446 # not completing the recovery. Final ping is queued, but reply will be
3447 # sent on the recovery completion. It is done by sleep before
3448 # processing final pings
3449 do_facet ost1 "$LCTL set_param fail_val=40"
3450 do_facet ost1 "$LCTL set_param fail_loc=0x715"
3453 run_test 93a "replay + reconnect"
3456 [[ "$MDS1_VERSION" -ge $(version_code 2.7.90) ]] ||
3457 skip "Need MDS version 2.7.90+"
3459 cancel_lru_locks mdc
3461 createmany -o $DIR/$tfile 20 ||
3462 error "createmany -o $DIR/$tfile failed"
3464 #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
3465 # We need to emulate a state that MDT is waiting for other clients
3466 # not completing the recovery. Final ping is queued, but reply will be
3467 # sent on the recovery completion. It is done by sleep before
3468 # processing final pings
3469 do_facet mds1 "$LCTL set_param fail_val=80"
3470 do_facet mds1 "$LCTL set_param fail_loc=0x715"
3473 run_test 93b "replay + reconnect on mds"
3475 striped_dir_check_100() {
3476 local striped_dir=$DIR/$tdir/striped_dir
3477 local stripe_count=$($LFS getdirstripe -c $striped_dir)
3479 $LFS getdirstripe $striped_dir
3480 [ $stripe_count -eq 2 ] || error "$stripe_count != 2"
3482 createmany -o $striped_dir/f-%d 20 ||
3483 error "creation failed under striped dir"
3487 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3488 ([ $FAILURE_MODE == "HARD" ] &&
3489 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3490 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3493 local striped_dir=$DIR/$tdir/striped_dir
3496 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3498 #To make sure MDT1 and MDT0 are connected
3499 #otherwise it may create single stripe dir here
3500 $LFS setdirstripe -i1 $DIR/$tdir/remote_dir
3502 #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
3503 do_facet mds$((MDTIDX+1)) lctl set_param fail_loc=0x1701
3504 $LFS setdirstripe -i0 -c2 $striped_dir &
3507 fail mds$((MDTIDX + 1))
3509 wait $CLIENT_PID || error "striped dir creation failed"
3511 striped_dir_check_100 || error "striped dir check failed"
3512 rm -rf $DIR/$tdir || error "rmdir failed"
3514 run_test 100a "DNE: create striped dir, drop update rep from MDT1, fail MDT1"
3517 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3518 ([ $FAILURE_MODE == "HARD" ] &&
3519 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3520 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3523 local striped_dir=$DIR/$tdir/striped_dir
3526 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3528 #To make sure MDT1 and MDT0 are connected
3529 #otherwise it may create single stripe dir here
3530 $LFS setdirstripe -i1 $DIR/$tdir/remote_dir
3532 # OBD_FAIL_MDS_REINT_NET_REP 0x119
3533 do_facet mds$MDTIDX lctl set_param fail_loc=0x119
3534 $LFS mkdir -i0 -c2 $striped_dir &
3539 wait $CLIENT_PID || error "striped dir creation failed"
3541 striped_dir_check_100 || error "striped dir check failed"
3542 rm -rf $DIR/$tdir || error "rmdir failed"
3544 run_test 100b "DNE: create striped dir, fail MDT0"
3547 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3548 ([ $FAILURE_MODE == "HARD" ] &&
3549 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3550 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3553 local striped_dir=$DIR/$tdir/striped_dir
3555 mkdir_on_mdt0 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3557 #To make sure MDT1 and MDT0 are connected
3558 #otherwise it may create single stripe dir here
3559 $LFS setdirstripe -i1 $DIR/$tdir/remote_dir
3562 $LFS mkdir -i1 -c2 $striped_dir
3564 fail_abort mds2 abort_recov_mdt
3566 createmany -o $striped_dir/f-%d 20 &&
3567 error "createmany -o $DIR/$tfile should fail"
3570 striped_dir_check_100 || error "striped dir check failed"
3571 rm -rf $DIR/$tdir || error "rmdir failed"
3573 run_test 100c "DNE: create striped dir, fail MDT0"
3575 test_101() { #LU-5648
3576 mkdir -p $DIR/$tdir/d1
3577 mkdir -p $DIR/$tdir/d2
3578 touch $DIR/$tdir/file0
3581 replay_barrier $SINGLEMDS
3582 for i in $(seq $num) ; do
3583 echo test$i > $DIR/$tdir/d1/file$i
3586 fail_abort $SINGLEMDS
3587 for i in $(seq $num) ; do
3588 touch $DIR/$tdir/d2/file$i
3589 test -s $DIR/$tdir/d2/file$i &&
3590 ls -al $DIR/$tdir/d2/file$i && error "file$i's size > 0"
3595 run_test 101 "Shouldn't reassign precreated objs to other files after recovery"
3604 [[ $(lctl get_param mdc.*.import |
3605 grep "connect_flags:.*multi_mod_rpc") ]] ||
3606 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3608 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3609 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3610 facet="mds$((0x$idx + 1))"
3612 # get current value of max_mod_rcps_in_flight
3613 num=$($LCTL get_param -n \
3614 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3615 # set default value if client does not support multi mod RPCs
3616 [ -z "$num" ] && num=1
3618 echo "creating $num files ..."
3620 for i in $(seq $num); do
3621 touch $DIR/$tdir/file-$i
3624 # drop request on MDT to force resend
3625 #define OBD_FAIL_MDS_REINT_MULTI_NET 0x159
3626 do_facet $facet "$LCTL set_param fail_loc=0x159"
3627 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3628 for i in $(seq $num); do
3629 chmod 0600 $DIR/$tdir/file-$i &
3633 do_facet $facet "$LCTL set_param fail_loc=0"
3634 for pid in $pids; do
3635 wait $pid || error "chmod failed"
3637 echo "done ($(date +%H:%M:%S))"
3639 # check chmod succeed
3640 for i in $(seq $num); do
3641 checkstat -vp 0600 $DIR/$tdir/file-$i
3646 run_test 102a "check resend (request lost) with multiple modify RPCs in flight"
3655 [[ $(lctl get_param mdc.*.import |
3656 grep "connect_flags:.*multi_mod_rpc") ]] ||
3657 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3659 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3660 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3661 facet="mds$((0x$idx + 1))"
3663 # get current value of max_mod_rcps_in_flight
3664 num=$($LCTL get_param -n \
3665 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3666 # set default value if client does not support multi mod RPCs
3667 [ -z "$num" ] && num=1
3669 echo "creating $num files ..."
3671 for i in $(seq $num); do
3672 touch $DIR/$tdir/file-$i
3675 # drop reply on MDT to force reconstruction
3676 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3677 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3678 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3679 for i in $(seq $num); do
3680 chmod 0600 $DIR/$tdir/file-$i &
3684 do_facet $facet "$LCTL set_param fail_loc=0"
3685 for pid in $pids; do
3686 wait $pid || error "chmod failed"
3688 echo "done ($(date +%H:%M:%S))"
3690 # check chmod succeed
3691 for i in $(seq $num); do
3692 checkstat -vp 0600 $DIR/$tdir/file-$i
3697 run_test 102b "check resend (reply lost) with multiple modify RPCs in flight"
3706 [[ $(lctl get_param mdc.*.import |
3707 grep "connect_flags:.*multi_mod_rpc") ]] ||
3708 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3710 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3711 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3712 facet="mds$((0x$idx + 1))"
3714 # get current value of max_mod_rcps_in_flight
3715 num=$($LCTL get_param -n \
3716 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3717 # set default value if client does not support multi mod RPCs
3718 [ -z "$num" ] && num=1
3720 echo "creating $num files ..."
3722 for i in $(seq $num); do
3723 touch $DIR/$tdir/file-$i
3726 replay_barrier $facet
3729 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3730 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3731 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3732 for i in $(seq $num); do
3733 chmod 0600 $DIR/$tdir/file-$i &
3737 do_facet $facet "$LCTL set_param fail_loc=0"
3742 for pid in $pids; do
3743 wait $pid || error "chmod failed"
3745 echo "done ($(date +%H:%M:%S))"
3747 # check chmod succeed
3748 for i in $(seq $num); do
3749 checkstat -vp 0600 $DIR/$tdir/file-$i
3754 run_test 102c "check replay w/o reconstruction with multiple mod RPCs in flight"
3763 [[ $(lctl get_param mdc.*.import |
3764 grep "connect_flags:.*multi_mod_rpc") ]] ||
3765 { skip "Need MDC with 'multi_mod_rpcs' feature"; return 0; }
3767 $LFS mkdir -c1 $DIR/$tdir || error "mkdir $DIR/$tdir failed"
3768 idx=$(printf "%04x" $($LFS getdirstripe -i $DIR/$tdir))
3769 facet="mds$((0x$idx + 1))"
3771 # get current value of max_mod_rcps_in_flight
3772 num=$($LCTL get_param -n \
3773 mdc.$FSNAME-MDT$idx-mdc-*.max_mod_rpcs_in_flight)
3774 # set default value if client does not support multi mod RPCs
3775 [ -z "$num" ] && num=1
3777 echo "creating $num files ..."
3779 for i in $(seq $num); do
3780 touch $DIR/$tdir/file-$i
3784 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
3785 do_facet $facet "$LCTL set_param fail_loc=0x15a"
3786 echo "launch $num chmod in parallel ($(date +%H:%M:%S)) ..."
3787 for i in $(seq $num); do
3788 chmod 0600 $DIR/$tdir/file-$i &
3793 # write MDT transactions to disk
3794 do_facet $facet "sync; sync; sync"
3796 do_facet $facet "$LCTL set_param fail_loc=0"
3801 for pid in $pids; do
3802 wait $pid || error "chmod failed"
3804 echo "done ($(date +%H:%M:%S))"
3806 # check chmod succeed
3807 for i in $(seq $num); do
3808 checkstat -vp 0600 $DIR/$tdir/file-$i
3813 run_test 102d "check replay & reconstruction with multiple mod RPCs in flight"
3816 remote_mds_nodsh && skip "remote MDS with nodsh"
3817 [[ "$MDS1_VERSION" -gt $(version_code 2.8.54) ]] ||
3818 skip "Need MDS version 2.8.54+"
3820 #define OBD_FAIL_MDS_TRACK_OVERFLOW 0x162
3821 do_facet mds1 $LCTL set_param fail_loc=0x80000162
3824 createmany -o $DIR/$tdir/t- 30 ||
3825 error "create files on remote directory failed"
3827 rm -rf $DIR/$tdir/t-*
3829 #MDS should crash with tr->otr_next_id overflow
3832 run_test 103 "Check otr_next_id overflow"
3835 check_striped_dir_110()
3837 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
3838 error "create striped dir failed"
3839 local stripe_count=$($LFS getdirstripe -c $DIR/$tdir/striped_dir)
3840 [ $stripe_count -eq $MDSCOUNT ] ||
3841 error "$stripe_count != 2 after recovery"
3845 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3846 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3847 skip "Need MDS version at least 2.7.56"
3849 ([ $FAILURE_MODE == "HARD" ] &&
3850 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3851 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3856 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3859 check_striped_dir_110 || error "check striped_dir failed"
3860 rm -rf $DIR/$tdir || error "rmdir failed"
3864 run_test 110a "DNE: create striped dir, fail MDT1"
3867 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3868 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3869 skip "Need MDS version at least 2.7.56"
3871 ([ $FAILURE_MODE == "HARD" ] &&
3872 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3873 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3878 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3881 zconf_mount $(hostname) $MOUNT
3882 client_up || return 1
3884 check_striped_dir_110 || error "check striped_dir failed"
3886 rm -rf $DIR/$tdir || error "rmdir failed"
3890 run_test 110b "DNE: create striped dir, fail MDT1 and client"
3893 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3894 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3895 skip "Need MDS version at least 2.7.56"
3897 ([ $FAILURE_MODE == "HARD" ] &&
3898 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3899 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3904 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3907 check_striped_dir_110 || error "check striped_dir failed"
3909 rm -rf $DIR/$tdir || error "rmdir failed"
3913 run_test 110c "DNE: create striped dir, fail MDT2"
3916 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3917 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3918 skip "Need MDS version at least 2.7.56"
3920 ([ $FAILURE_MODE == "HARD" ] &&
3921 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3922 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3927 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3930 zconf_mount $(hostname) $MOUNT
3931 client_up || return 1
3933 check_striped_dir_110 || error "check striped_dir failed"
3935 rm -rf $DIR/$tdir || error "rmdir failed"
3939 run_test 110d "DNE: create striped dir, fail MDT2 and client"
3942 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3943 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3944 skip "Need MDS version at least 2.7.56"
3946 ([ $FAILURE_MODE == "HARD" ] &&
3947 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3948 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3953 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3957 zconf_mount $(hostname) $MOUNT
3958 client_up || return 1
3960 check_striped_dir_110 || error "check striped_dir failed"
3962 rm -rf $DIR/$tdir || error "rmdir failed"
3966 run_test 110e "DNE: create striped dir, uncommit on MDT2, fail client/MDT1/MDT2"
3969 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3970 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3971 skip "Need MDS version at least 2.7.56"
3973 ([ $FAILURE_MODE == "HARD" ] &&
3974 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3975 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
3981 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
3984 check_striped_dir_110 || error "check striped_dir failed"
3986 rm -rf $DIR/$tdir || error "rmdir failed"
3990 run_test 110f "DNE: create striped dir, fail MDT1/MDT2"
3993 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
3994 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
3995 skip "Need MDS version at least 2.7.56"
3997 ([ $FAILURE_MODE == "HARD" ] &&
3998 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
3999 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4004 $LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
4008 zconf_mount $(hostname) $MOUNT
4009 client_up || return 1
4011 check_striped_dir_110 || error "check striped_dir failed"
4013 rm -rf $DIR/$tdir || error "rmdir failed"
4017 run_test 110g "DNE: create striped dir, uncommit on MDT1, fail client/MDT1/MDT2"
4020 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4021 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4022 skip "Need MDS version at least 2.7.56"
4024 ([ $FAILURE_MODE == "HARD" ] &&
4025 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4026 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4030 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4032 rm -rf $DIR/$tdir/striped_dir
4035 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4036 error "striped dir still exists"
4039 run_test 111a "DNE: unlink striped dir, fail MDT1"
4042 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4043 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4044 skip "Need MDS version at least 2.7.56"
4046 ([ $FAILURE_MODE == "HARD" ] &&
4047 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4048 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4052 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4054 rm -rf $DIR/$tdir/striped_dir
4057 zconf_mount $(hostname) $MOUNT
4058 client_up || return 1
4060 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4061 error "striped dir still exists"
4064 run_test 111b "DNE: unlink striped dir, fail MDT2"
4067 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4068 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4069 skip "Need MDS version at least 2.7.56"
4071 ([ $FAILURE_MODE == "HARD" ] &&
4072 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4073 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4077 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4079 rm -rf $DIR/$tdir/striped_dir
4083 zconf_mount $(hostname) $MOUNT
4084 client_up || return 1
4085 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4086 error "striped dir still exists"
4089 run_test 111c "DNE: unlink striped dir, uncommit on MDT1, fail client/MDT1/MDT2"
4092 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4093 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4094 skip "Need MDS version at least 2.7.56"
4096 ([ $FAILURE_MODE == "HARD" ] &&
4097 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4098 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4102 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4104 rm -rf $DIR/$tdir/striped_dir
4108 zconf_mount $(hostname) $MOUNT
4109 client_up || return 1
4110 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4111 error "striped dir still exists"
4115 run_test 111d "DNE: unlink striped dir, uncommit on MDT2, fail client/MDT1/MDT2"
4118 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4119 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4120 skip "Need MDS version at least 2.7.56"
4122 ([ $FAILURE_MODE == "HARD" ] &&
4123 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4124 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4128 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4130 rm -rf $DIR/$tdir/striped_dir
4133 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4134 error "striped dir still exists"
4137 run_test 111e "DNE: unlink striped dir, uncommit on MDT2, fail MDT1/MDT2"
4140 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4141 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4142 skip "Need MDS version at least 2.7.56"
4144 ([ $FAILURE_MODE == "HARD" ] &&
4145 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4146 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4150 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4152 rm -rf $DIR/$tdir/striped_dir
4155 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4156 error "striped dir still exists"
4159 run_test 111f "DNE: unlink striped dir, uncommit on MDT1, fail MDT1/MDT2"
4162 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4163 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4164 skip "Need MDS version at least 2.7.56"
4166 ([ $FAILURE_MODE == "HARD" ] &&
4167 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4168 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4172 $LFS mkdir -i1 -c2 $DIR/$tdir/striped_dir
4175 rm -rf $DIR/$tdir/striped_dir
4177 $CHECKSTAT -t dir $DIR/$tdir/striped_dir &&
4178 error "striped dir still exists"
4181 run_test 111g "DNE: unlink striped dir, fail MDT1/MDT2"
4183 test_112_rename_prepare() {
4184 mkdir_on_mdt0 $DIR/$tdir
4185 mkdir -p $DIR/$tdir/src_dir
4186 $LFS mkdir -i 1 $DIR/$tdir/src_dir/src_child ||
4187 error "create remote source failed"
4189 touch $DIR/$tdir/src_dir/src_child/a
4191 $LFS mkdir -i 2 $DIR/$tdir/tgt_dir ||
4192 error "create remote target dir failed"
4194 $LFS mkdir -i 3 $DIR/$tdir/tgt_dir/tgt_child ||
4195 error "create remote target child failed"
4200 $CHECKSTAT -t dir $DIR/$tdir/src_dir/src_child &&
4201 error "src_child still exists after rename"
4203 $CHECKSTAT -t file $DIR/$tdir/tgt_dir/tgt_child/a ||
4204 error "missing file(a) after rename"
4208 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4209 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4210 skip "Need MDS version at least 2.7.56"
4212 ([ $FAILURE_MODE == "HARD" ] &&
4213 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4214 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4217 test_112_rename_prepare
4220 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4221 error "rename dir cross MDT failed!"
4225 rm -rf $DIR/$tdir || error "rmdir failed"
4227 run_test 112a "DNE: cross MDT rename, fail MDT1"
4230 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4231 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4232 skip "Need MDS version at least 2.7.56"
4234 ([ $FAILURE_MODE == "HARD" ] &&
4235 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4236 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4239 test_112_rename_prepare
4242 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4243 error "rename dir cross MDT failed!"
4248 rm -rf $DIR/$tdir || error "rmdir failed"
4250 run_test 112b "DNE: cross MDT rename, fail MDT2"
4253 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4254 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4255 skip "Need MDS version at least 2.7.56"
4257 ([ $FAILURE_MODE == "HARD" ] &&
4258 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4259 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4262 test_112_rename_prepare
4265 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4266 error "rename dir cross MDT failed!"
4271 rm -rf $DIR/$tdir || error "rmdir failed"
4273 run_test 112c "DNE: cross MDT rename, fail MDT3"
4276 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4277 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4278 skip "Need MDS version at least 2.7.56"
4280 ([ $FAILURE_MODE == "HARD" ] &&
4281 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4282 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4285 test_112_rename_prepare
4288 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4289 error "rename dir cross MDT failed!"
4294 rm -rf $DIR/$tdir || error "rmdir failed"
4296 run_test 112d "DNE: cross MDT rename, fail MDT4"
4299 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4300 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4301 skip "Need MDS version at least 2.7.56"
4303 ([ $FAILURE_MODE == "HARD" ] &&
4304 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4305 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4308 test_112_rename_prepare
4312 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4313 error "rename dir cross MDT failed!"
4318 rm -rf $DIR/$tdir || error "rmdir failed"
4320 run_test 112e "DNE: cross MDT rename, fail MDT1 and MDT2"
4323 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4324 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4325 skip "Need MDS version at least 2.7.56"
4327 ([ $FAILURE_MODE == "HARD" ] &&
4328 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4329 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4332 test_112_rename_prepare
4336 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4337 error "rename dir cross MDT failed!"
4342 rm -rf $DIR/$tdir || error "rmdir failed"
4344 run_test 112f "DNE: cross MDT rename, fail MDT1 and MDT3"
4347 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4348 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4349 skip "Need MDS version at least 2.7.56"
4351 ([ $FAILURE_MODE == "HARD" ] &&
4352 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4353 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4356 test_112_rename_prepare
4360 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4361 error "rename dir cross MDT failed!"
4366 rm -rf $DIR/$tdir || error "rmdir failed"
4368 run_test 112g "DNE: cross MDT rename, fail MDT1 and MDT4"
4371 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4372 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4373 skip "Need MDS version at least 2.7.56"
4375 ([ $FAILURE_MODE == "HARD" ] &&
4376 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4377 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4380 test_112_rename_prepare
4384 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4385 error "rename dir cross MDT failed!"
4390 rm -rf $DIR/$tdir || error "rmdir failed"
4392 run_test 112h "DNE: cross MDT rename, fail MDT2 and MDT3"
4395 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4396 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4397 skip "Need MDS version at least 2.7.56"
4399 ([ $FAILURE_MODE == "HARD" ] &&
4400 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4401 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4404 test_112_rename_prepare
4408 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4409 error "rename dir cross MDT failed!"
4414 rm -rf $DIR/$tdir || error "rmdir failed"
4416 run_test 112i "DNE: cross MDT rename, fail MDT2 and MDT4"
4419 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4420 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4421 skip "Need MDS version at least 2.7.56"
4423 ([ $FAILURE_MODE == "HARD" ] &&
4424 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4425 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4428 test_112_rename_prepare
4432 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4433 error "rename dir cross MDT failed!"
4438 rm -rf $DIR/$tdir || error "rmdir failed"
4440 run_test 112j "DNE: cross MDT rename, fail MDT3 and MDT4"
4443 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4444 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4445 skip "Need MDS version at least 2.7.56"
4447 ([ $FAILURE_MODE == "HARD" ] &&
4448 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4449 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4452 test_112_rename_prepare
4457 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4458 error "rename dir cross MDT failed!"
4463 rm -rf $DIR/$tdir || error "rmdir failed"
4465 run_test 112k "DNE: cross MDT rename, fail MDT1,MDT2,MDT3"
4468 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4469 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4470 skip "Need MDS version at least 2.7.56"
4472 ([ $FAILURE_MODE == "HARD" ] &&
4473 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4474 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4477 test_112_rename_prepare
4482 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4483 error "rename dir cross MDT failed!"
4488 rm -rf $DIR/$tdir || error "rmdir failed"
4490 run_test 112l "DNE: cross MDT rename, fail MDT1,MDT2,MDT4"
4493 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4494 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4495 skip "Need MDS version at least 2.7.56"
4497 ([ $FAILURE_MODE == "HARD" ] &&
4498 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4499 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4502 test_112_rename_prepare
4507 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4508 error "rename dir cross MDT failed!"
4513 rm -rf $DIR/$tdir || error "rmdir failed"
4515 run_test 112m "DNE: cross MDT rename, fail MDT1,MDT3,MDT4"
4518 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4519 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4520 skip "Need MDS version at least 2.7.56"
4522 ([ $FAILURE_MODE == "HARD" ] &&
4523 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4524 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4527 test_112_rename_prepare
4532 mrename $DIR/$tdir/src_dir/src_child $DIR/$tdir/tgt_dir/tgt_child ||
4533 error "rename dir cross MDT failed!"
4538 rm -rf $DIR/$tdir || error "rmdir failed"
4540 run_test 112n "DNE: cross MDT rename, fail MDT2,MDT3,MDT4"
4543 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4544 [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
4545 skip "Need MDS version at least 2.7.56"
4547 ([ $FAILURE_MODE == "HARD" ] &&
4548 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4549 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4557 for ((j=0;j<$((MDSCOUNT));j++)); do
4558 fail_index=$((fail_index+1))
4559 index=$((fail_index % MDSCOUNT))
4560 replay_barrier mds$((index + 1))
4561 for ((i=0;i<5;i++)); do
4562 test_mkdir -i$index -c$MDSCOUNT $DIR/$tdir/test_$i ||
4563 error "create striped dir $DIR/$tdir/test_$i"
4566 fail mds$((index + 1))
4567 for ((i=0;i<5;i++)); do
4568 checkstat -t dir $DIR/$tdir/test_$i ||
4569 error "$DIR/$tdir/test_$i does not exist!"
4571 rm -rf $DIR/$tdir/test_* ||
4575 run_test 115 "failover for create/unlink striped directory"
4578 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4579 [ "$MDS1_VERSION" -lt $(version_code 2.7.55) ] &&
4580 skip "Do not support large update log before 2.7.55" &&
4582 ([ $FAILURE_MODE == "HARD" ] &&
4583 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4584 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4588 mkdir_on_mdt0 $DIR/$tdir
4591 # OBD_FAIL_SPLIT_UPDATE_REC 0x1702
4592 do_facet mds1 "lctl set_param fail_loc=0x80001702"
4593 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
4596 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
4597 error "stried_dir does not exists"
4599 run_test 116a "large update log master MDT recovery"
4602 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4603 [ "$MDS1_VERSION" -lt $(version_code 2.7.55) ] &&
4604 skip "Do not support large update log before 2.7.55" &&
4607 ([ $FAILURE_MODE == "HARD" ] &&
4608 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4609 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4613 mkdir_on_mdt0 $DIR/$tdir
4616 # OBD_FAIL_SPLIT_UPDATE_REC 0x1702
4617 do_facet mds2 "lctl set_param fail_loc=0x80001702"
4618 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
4621 $CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
4622 error "stried_dir does not exists"
4624 run_test 116b "large update log slave MDT recovery"
4627 [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
4628 ([ $FAILURE_MODE == "HARD" ] &&
4629 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4630 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4636 $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/remote_dir
4637 $LFS setdirstripe -i1 -c$MDSCOUNT $DIR/$tdir/remote_dir_1
4640 # Let's set rdonly on all MDTs, so client will send
4641 # replay requests on all MDTs and replay these requests
4642 # at the same time. This test will verify the recovery
4643 # will not be deadlock in this case, LU-7531.
4644 for ((index = 0; index < $((MDSCOUNT)); index++)); do
4645 replay_barrier mds$((index + 1))
4646 if [ -z $mds_indexs ]; then
4647 mds_indexs="${mds_indexs}mds$((index+1))"
4649 mds_indexs="${mds_indexs},mds$((index+1))"
4653 rm -rf $DIR/$tdir/remote_dir
4654 rm -rf $DIR/$tdir/remote_dir_1
4658 rm -rf $DIR/$tdir || error "rmdir failed"
4660 run_test 117 "DNE: cross MDT unlink, fail MDT1 and MDT2"
4663 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4664 [ "$MDS1_VERSION" -lt $(version_code 2.7.64) ] &&
4665 skip "Do not support large update log before 2.7.64" &&
4670 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir ||
4671 error "setdirstripe fails"
4672 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1 ||
4673 error "setdirstripe fails 1"
4674 rm -rf $DIR/$tdir/striped_dir* || error "rmdir fails"
4676 # OBD_FAIL_INVALIDATE_UPDATE 0x1705
4677 do_facet mds1 "lctl set_param fail_loc=0x1705"
4678 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir
4679 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1
4680 do_facet mds1 "lctl set_param fail_loc=0x0"
4683 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir
4684 $LFS setdirstripe -c2 $DIR/$tdir/striped_dir1
4689 run_test 118 "invalidate osp update will not cause update log corruption"
4692 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4693 [ "$MDS1_VERSION" -lt $(version_code 2.7.64) ] &&
4694 skip "Do not support large update log before 2.7.64" &&
4697 local hard_timeout=$(do_facet mds1 \
4698 "lctl get_param -n mdt.$FSNAME-MDT0000.recovery_time_hard")
4700 local clients=${CLIENTS:-$HOSTNAME}
4701 local time_min=$(recovery_time_min)
4703 mkdir_on_mdt0 $DIR/$tdir
4704 mkdir $DIR/$tdir/tmp
4705 rmdir $DIR/$tdir/tmp
4708 mkdir $DIR/$tdir/dir_1
4709 for ((i = 0; i < 20; i++)); do
4710 $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i
4717 #define OBD_FAIL_TGT_REPLAY_DELAY 0x714
4718 do_facet mds1 $LCTL set_param fail_loc=0x80000714
4719 #sleep (timeout + 5), so mds will evict the client exports,
4720 #but DNE update recovery will keep going.
4721 do_facet mds1 $LCTL set_param fail_val=$((time_min + 5))
4723 mount_facet mds1 "-o recovery_time_hard=$time_min"
4725 wait_clients_import_state "$clients" mds1 FULL
4727 clients_up || clients_up || error "failover df: $?"
4729 #revert back the hard timeout
4730 do_facet mds1 $LCTL set_param \
4731 mdt.$FSNAME-MDT0000.recovery_time_hard=$hard_timeout
4733 for ((i = 0; i < 20; i++)); do
4734 stripe_count=$($LFS getdirstripe -c $DIR/$tdir/stripe_dir-$i)
4735 [ $stripe_count == 2 ] || {
4736 error "stripe_dir-$i creation replay fails"
4741 run_test 119 "timeout of normal replay does not cause DNE replay fails "
4744 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4745 [ "$MDS1_VERSION" -lt $(version_code 2.7.64) ] &&
4746 skip "Do not support large update log before 2.7.64" &&
4749 mkdir_on_mdt0 $DIR/$tdir
4750 replay_barrier_nosync mds1
4751 for ((i = 0; i < 20; i++)); do
4752 mkdir $DIR/$tdir/dir-$i || {
4753 error "create dir-$i fails"
4756 $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i || {
4757 error "create stripe_dir-$i fails"
4764 for ((i = 0; i < 20; i++)); do
4765 [ ! -e "$DIR/$tdir/dir-$i" ] || {
4766 error "dir-$i still exists"
4769 [ ! -e "$DIR/$tdir/stripe_dir-$i" ] || {
4770 error "stripe_dir-$i still exists"
4775 run_test 120 "DNE fail abort should stop both normal and DNE replay"
4778 [ "$MDS1_VERSION" -lt $(version_code 2.10.90) ] &&
4779 skip "Don't support it before 2.11" &&
4782 local at_max_saved=$(at_max_get mds)
4784 touch $DIR/$tfile || error "touch $DIR/$tfile failed"
4785 cancel_lru_locks mdc
4787 multiop_bg_pause $DIR/$tfile s_s || error "multiop $DIR/$tfile failed"
4790 lctl set_param -n ldlm.cancel_unused_locks_before_replay "0"
4796 #define OBD_FAIL_TGT_RECOVERY_REQ_RACE 0x721
4797 do_facet $SINGLEMDS "lctl set_param fail_loc=0x721 fail_val=0"
4801 wait_clients_import_state "$clients" mds1 FULL
4802 clients_up || clients_up || error "failover df: $?"
4805 wait $mpid || error "multiop_bg_pause pid failed"
4807 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
4808 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
4809 at_max_set $at_max_saved mds
4812 run_test 121 "lock replay timed out and race"
4815 [ "$MDS1_VERSION" -lt $(version_code 2.10.90) ] &&
4816 skip "Do not support Data-on-MDT before 2.11"
4818 replay_barrier $SINGLEMDS
4819 $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
4822 [ $($LFS getstripe -L $DIR/$tfile) == "mdt" ] ||
4823 error "Fail to replay DoM file creation"
4825 run_test 130a "DoM file create (setstripe) replay"
4828 [ "$MDS1_VERSION" -lt $(version_code 2.10.90) ] &&
4829 skip "Do not support Data-on-MDT before 2.11"
4831 mkdir_on_mdt0 $DIR/$tdir
4832 $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tdir
4833 replay_barrier $SINGLEMDS
4834 touch $DIR/$tdir/$tfile
4837 [ $($LFS getstripe -L $DIR/$tdir/$tfile) == "mdt" ] ||
4838 error "Fail to replay DoM file creation"
4840 run_test 130b "DoM file create (inherited) replay"
4843 [ "$MDS1_VERSION" -lt $(version_code 2.10.90) ] &&
4844 skip "Do not support Data-on-MDT before 2.11"
4846 $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
4847 replay_barrier $SINGLEMDS
4848 echo "dom_data" | dd of=$DIR/$tfile bs=8 count=1
4849 # lock is not canceled and will be replayed
4852 [ $(cat $DIR/$tfile) == "dom_data" ] ||
4853 error "Wrong file content after failover"
4855 run_test 131a "DoM file write lock replay"
4858 [ "$MDS1_VERSION" -lt $(version_code 2.10.90) ] &&
4859 skip "Do not support Data-on-MDT before 2.11"
4861 # refresh grants so write after replay_barrier doesn't
4863 $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile-2
4864 stack_trap "rm -f $DIR/$tfile-2"
4865 dd if=/dev/zero of=$DIR/$tfile-2 bs=64k count=2 ||
4867 $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
4868 replay_barrier $SINGLEMDS
4869 echo "dom_data" | dd of=$DIR/$tfile bs=8 count=1
4870 cancel_lru_locks mdc
4874 [ $(cat $DIR/$tfile) == "dom_data" ] ||
4875 error "Wrong file content after failover"
4877 run_test 131b "DoM file write replay"
4880 [ "$MDS1_VERSION" -lt $(version_code 2.12.0) ] &&
4881 skip "Need MDS version 2.12.0 or later"
4883 $LFS setstripe -E 1M -c 1 -E EOF -c 2 $DIR/$tfile
4884 replay_barrier $SINGLEMDS
4885 # write over the first component size cause next component instantiation
4886 dd if=/dev/urandom of=$DIR/$tfile bs=1M count=1 seek=1 ||
4887 error "dd to $DIR/$tfile failed"
4888 lfs getstripe $DIR/$tfile
4890 cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
4891 $LFS getstripe -I2 $DIR/$tfile | grep -q lmm_objects ||
4892 error "Component #1 was not instantiated"
4896 lfs getstripe $DIR/$tfile
4897 $LFS getstripe -I2 $DIR/$tfile | grep -q lmm_objects ||
4898 error "Component #1 instantiation was not replayed"
4899 cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
4900 if [ $cksum != $cksum2 ] ; then
4901 error_noexit "New cksum $cksum2 does not match original $cksum"
4904 run_test 132a "PFL new component instantiate replay"
4907 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
4908 ([ $FAILURE_MODE == "HARD" ] &&
4909 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
4910 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
4913 local remote_dir=$DIR/$tdir/remote_dir
4915 mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir failed"
4916 $LFS mkdir -i 1 $remote_dir
4919 do_facet mds2 $LCTL set_param seq.srv*MDT0001.space=clear
4921 zconf_mount $(hostname) $MOUNT
4922 client_up || return 1
4924 #define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
4926 do_facet mds1 $LCTL set_param fail_val=700 fail_loc=0x80000123
4927 cp /etc/hosts $remote_dir/file &
4933 wait $pid || error "cp failed"
4934 rm -rf $DIR/$tdir || error "rmdir failed"
4938 run_test 133 "check resend of ongoing requests for lwp during failover"
4941 [ $OSTCOUNT -lt 2 ] && skip "needs >= 2 OSTs" && return 0
4942 (( $MDS1_VERSION >= $(version_code 2.13.56) )) ||
4943 skip "need MDS version >= 2.13.56"
4946 pool_add_targets pool_134 1 1
4948 mkdir -p $DIR/$tdir/{A,B}
4949 $LFS setstripe -p pool_134 $DIR/$tdir/A
4950 $LFS setstripe -E EOF -p pool_134 $DIR/$tdir/B
4954 touch $DIR/$tdir/A/$tfile || error "touch non-pfl file failed"
4955 touch $DIR/$tdir/B/$tfile || error "touch pfl failed"
4959 [ -f $DIR/$tdir/A/$tfile ] || error "non-pfl file does not exist"
4960 [ -f $DIR/$tdir/B/$tfile ] || error "pfl file does not exist"
4962 run_test 134 "replay creation of a file created in a pool"
4966 mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
4969 $LFS setstripe -S $((128 * 1024)) -i 0 $DIR/$tdir
4973 # Create 20 files so we have 20 ost locks
4974 for i in $(seq 20) ; do
4975 echo blah > $DIR/$tdir/file.${i}
4983 #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x32d
4984 # Make sure lock replay server side never completes and errors out.
4985 do_facet ost1 "$LCTL set_param fail_val=20"
4986 do_facet ost1 "$LCTL set_param fail_loc=0x32d"
4990 # Now make sure we notice
4993 sleep 20 # should we do something proactive to make reconnects go?
4994 kill -0 $PID || error "Unexpected sync success"
5001 do_facet ost1 "$LCTL set_param fail_loc=0"
5003 echo blah > $DIR/$tdir/file.test2
5007 run_test 135 "Server failure in lock replay phase"
5010 check_and_cleanup_lustre