5 PTLDEBUG=${PTLDEBUG:--1}
6 MOUNT_2=${MOUNT_2:-"yes"}
7 LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"}
9 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
10 . $LUSTRE/tests/test-framework.sh
14 remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
16 ALWAYS_EXCEPT="$REPLAY_DUAL_EXCEPT "
17 # bug number for skipped test: LU-2012 LU-8333
18 ALWAYS_EXCEPT+=" 14b 21b"
19 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
21 [[ "$mds1_FSTYPE" == zfs ]] &&
22 # bug number for skipped test: LU-2230
23 ALWAYS_EXCEPT="$ALWAYS_EXCEPT 21b"
26 [ "$SLOW" = "no" ] && EXCEPT_SLOW="21b "
29 check_and_setup_lustre
31 MOUNTED=$(mounted_lustre_filesystems)
32 if ! $(echo $MOUNTED' ' | grep -w -q $MOUNT2' '); then
33 zconf_mount $HOSTNAME $MOUNT2
38 rm -rf $DIR/[df][0-9]*
40 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
42 # if there is no CLIENT1 defined, some tests can be ran on localhost
43 CLIENT1=${CLIENT1:-$HOSTNAME}
44 # if CLIENT2 doesn't exist then use CLIENT1 instead
45 # All tests should use CLIENT2 with MOUNT2 only therefore it will work if
47 # Exception is the test which need two separate nodes
48 CLIENT2=${CLIENT2:-$CLIENT1}
50 # new sequence needed for MDS < v2_15_61-226-gf00d2467fc
51 if (( $MDS1_VERSION < $(version_code 2.15.61.226) )); then
55 LU482_FAILED=$(mktemp -u $TMP/$TESTSUITE.lu482.XXXXXX)
57 echo "Check file is LU482_FAILED=$LU482_FAILED"
58 touch $MOUNT2/$tfile-A # force sync FLD/SEQ update before barrier
59 replay_barrier $SINGLEMDS
60 #define OBD_FAIL_PTLRPC_FINISH_REPLAY | CFS_FAIL_ONCE
62 createmany -o $MOUNT1/$tfile- 50
63 $LCTL set_param fail_loc=0x80000514
64 facet_failover $SINGLEMDS
65 [ -f "$LU482_FAILED" ] && skip "LU-482 failure" && return 0
66 client_up || (sleep 10; client_up) || (sleep 10; client_up) ||
67 error "reconnect failed"
69 client_up || (sleep 10; client_up) || (sleep 10; client_up) ||
70 error "reconnect failed"
71 zconf_mount `hostname` $MOUNT2 || error "mount2 failed"
72 unlinkmany $MOUNT1/$tfile- 50 || errot "unlinkmany failed"
73 rm $MOUNT2/$tfile || error "rm $MOUNT2/$tfile failed"
74 rm $MOUNT2/$tfile-A || error "rm $MOUNT2/$tfile-A failed"
76 run_test 0a "expired recovery with lost client"
78 if [ -f "$LU482_FAILED" ]; then
79 log "Found check file $LU482_FAILED, aborting test script"
80 rm -vf "$LU482_FAILED"
81 complete_test $SECONDS
82 do_nodes $CLIENTS umount -f $MOUNT2 || true
83 do_nodes $CLIENTS umount -f $MOUNT || true
84 # copied from stopall, but avoid the MDS recovery
85 for num in `seq $OSTCOUNT`; do
87 rm -f $TMP/ost${num}active
89 if ! combined_mgs_mds ; then
97 replay_barrier $SINGLEMDS
99 touch $MOUNT1/$tfile-2
101 facet_failover $SINGLEMDS
103 zconf_mount `hostname` $MOUNT1 || error "mount1 fais"
104 zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
105 # it is uncertain if file-2 exists or not, remove it if it does
106 checkstat $MOUNT1/$tfile-2 && rm $MOUNT1/$tfile-2
107 checkstat $MOUNT2/$tfile && return 2
110 run_test 0b "lost client during waiting for next transno"
114 replay_barrier $SINGLEMDS
118 checkstat $MOUNT2/a || return 1
119 checkstat $MOUNT1/b || return 2
120 rm $MOUNT2/a $MOUNT1/b
121 checkstat $MOUNT1/a && return 3
122 checkstat $MOUNT2/b && return 4
126 run_test 1 "|X| simple create"
130 replay_barrier $SINGLEMDS
134 checkstat $MOUNT2/adir || return 1
136 checkstat $MOUNT2/adir && return 2
139 run_test 2 "|X| mkdir adir"
142 replay_barrier $SINGLEMDS
144 mkdir $MOUNT2/adir/bdir
147 checkstat $MOUNT2/adir || return 1
148 checkstat $MOUNT1/adir/bdir || return 2
149 rmdir $MOUNT2/adir/bdir $MOUNT1/adir
150 checkstat $MOUNT1/adir && return 3
151 checkstat $MOUNT2/adir/bdir && return 4
154 run_test 3 "|X| mkdir adir, mkdir adir/bdir "
157 mkdir_on_mdt0 $MOUNT1/adir
158 replay_barrier $SINGLEMDS
159 mkdir $MOUNT1/adir && return 1
160 mkdir $MOUNT2/adir/bdir
163 checkstat $MOUNT2/adir || return 2
164 checkstat $MOUNT1/adir/bdir || return 3
166 rmdir $MOUNT2/adir/bdir $MOUNT1/adir
167 checkstat $MOUNT1/adir && return 4
168 checkstat $MOUNT2/adir/bdir && return 5
171 run_test 4 "|X| mkdir adir (-EEXIST), mkdir adir/bdir "
175 # multiclient version of replay_single.sh/test_8
177 multiop_bg_pause $MOUNT2/a o_tSc || return 1
180 replay_barrier $SINGLEMDS
182 wait $pid || return 1
185 [ -e $MOUNT2/a ] && return 2
188 run_test 5 "open, unlink |X| close"
193 multiop_bg_pause $MOUNT2/a o_c || return 1
195 multiop_bg_pause $MOUNT1/a o_c || return 1
198 replay_barrier $SINGLEMDS
200 wait $pid1 || return 1
204 wait $pid2 || return 1
205 [ -e $MOUNT2/a ] && return 2
208 run_test 6 "open1, open2, unlink |X| close1 [fail $SINGLEMDS] close2"
211 replay_barrier $SINGLEMDS
212 drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1
214 checkstat $MOUNT2/$tfile || return 2
215 rm $MOUNT1/$tfile || return 3
219 run_test 8 "replay of resent request"
222 replay_barrier $SINGLEMDS
223 mcreate $MOUNT1/$tfile-1
224 mcreate $MOUNT2/$tfile-2
225 # drop first reint reply
226 do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
228 do_facet $SINGLEMDS lctl set_param fail_loc=0
230 rm $MOUNT1/$tfile-[1,2] || return 1
234 run_test 9 "resending a replayed create"
237 mcreate $MOUNT1/$tfile-1
238 replay_barrier $SINGLEMDS
239 unlink $MOUNT1/$tfile-1
240 mcreate $MOUNT2/$tfile-2
241 # drop first reint reply
242 do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
244 do_facet $SINGLEMDS lctl set_param fail_loc=0
246 checkstat $MOUNT1/$tfile-1 && return 1
247 checkstat $MOUNT1/$tfile-2 || return 2
252 run_test 10 "resending a replayed unlink"
255 replay_barrier $SINGLEMDS
256 mcreate $DIR1/$tfile-1
257 mcreate $DIR2/$tfile-2
258 mcreate $DIR1/$tfile-3
259 mcreate $DIR2/$tfile-4
260 mcreate $DIR1/$tfile-5
261 # drop all reint replies for a while
262 do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0119
263 # note that with this fail_loc set, facet_failover df will fail
264 facet_failover $SINGLEMDS
266 local clients=${CLIENTS:-$HOSTNAME}
267 wait_clients_import_state "$clients" $SINGLEMDS FULL
269 do_facet $SINGLEMDS $LCTL set_param fail_loc=0
271 rm $DIR1/$tfile-[1-5] || return 1
275 run_test 11 "both clients timeout during replay"
278 replay_barrier $SINGLEMDS
280 multiop_bg_pause $DIR/$tfile mo_c || return 1
283 #define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
284 do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302
285 facet_failover $SINGLEMDS
286 do_facet $SINGLEMDS lctl set_param fail_loc=0
287 clients_up || return 1
290 kill -USR1 $MULTIPID || return 3
291 wait $MULTIPID || return 4
292 $CHECKSTAT -t file $DIR/$tfile || return 2
297 run_test 12 "open resend timeout"
300 multiop_bg_pause $DIR/$tfile mo_c || return 1
303 replay_barrier $SINGLEMDS
305 kill -USR1 $MULTIPID || return 3
306 wait $MULTIPID || return 4
309 do_facet $SINGLEMDS lctl set_param fail_loc=0x80000115
310 facet_failover $SINGLEMDS
311 do_facet $SINGLEMDS lctl set_param fail_loc=0
312 clients_up || return 1
315 $CHECKSTAT -t file $DIR/$tfile || return 2
320 run_test 13 "close resend timeout"
322 # test 14a removed after 18143 because it shouldn't fail anymore and do the same
327 wait_delete_completed
329 local beforeused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
331 mkdir -p $MOUNT1/$tdir
332 $LFS setstripe -i 0 $MOUNT1/$tdir
333 replay_barrier $SINGLEMDS
334 createmany -o $MOUNT1/$tdir/$tfile- 5
336 $LFS setstripe -i 0 $MOUNT2/$tfile-2
337 dd if=/dev/zero of=$MOUNT2/$tfile-2 bs=1M count=5
338 createmany -o $MOUNT1/$tdir/$tfile-3- 5
342 wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
344 # first set of files should have been replayed
345 unlinkmany $MOUNT1/$tdir/$tfile- 5 || error "first unlinks failed"
346 unlinkmany $MOUNT1/$tdir/$tfile-3- 5 || error "second unlinks failed"
348 zconf_mount $HOSTNAME $MOUNT2 || error "mount $MOUNT2 failed"
349 [ -f $MOUNT2/$tfile-2 ] && error "$MOUNT2/$tfile-2 exists!"
351 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
352 wait_delete_completed || error "wait_delete_complete failed"
354 local afterused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
355 log "before $beforeused, after $afterused"
356 # leave some margin for some files/dirs to be modified (OI, llog, etc)
357 [ $afterused -le $((beforeused + $(fs_log_size))) ] ||
358 error "after $afterused > before $beforeused"
360 run_test 14b "delete ost orphans if gap occured in objids due to VBR"
362 test_15a() { # was test_15
363 replay_barrier $SINGLEMDS
364 createmany -o $MOUNT1/$tfile- 25
365 createmany -o $MOUNT2/$tfile-2- 1
370 unlinkmany $MOUNT1/$tfile- 25 || return 2
371 [ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists"
373 zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
376 run_test 15a "timeout waiting for lost client during replay, 1 client completes"
379 replay_barrier $SINGLEMDS
380 for ((i = 0; i < 2000; i++)); do
381 echo "data" > "$MOUNT2/${tfile}-$i" ||
382 error "create ${tfile}-$i failed"
388 zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
391 run_test 15c "remove multiple OST orphans"
394 replay_barrier $SINGLEMDS
395 createmany -o $MOUNT1/$tfile- 25
396 createmany -o $MOUNT2/$tfile-2- 1
399 facet_failover $SINGLEMDS
403 unlinkmany $MOUNT1/$tfile- 25 || return 2
405 zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
408 run_test 16 "fail MDS during recovery (3571)"
411 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
413 createmany -o $MOUNT1/$tfile- 25
414 createmany -o $MOUNT2/$tfile-2- 1
416 # Make sure the disconnect is lost
424 unlinkmany $MOUNT1/$tfile- 25 || return 2
426 zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
430 run_test 17 "fail OST during recovery (3571)"
432 # cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for it
435 test_18() { # bug 3822 - evicting client with enqueued lock
437 local DLMTRACE=$(do_facet $SINGLEMDS lctl get_param debug)
438 do_facet $SINGLEMDS lctl set_param debug=+dlmtrace
439 mkdir_on_mdt0 $MOUNT1/$tdir || error "mkdir $MOUNT1/$tdir failed"
440 touch $MOUNT1/$tdir/${tfile}0 || error "touch file failed"
441 statmany -s $MOUNT1/$tdir/$tfile 1 500 &
444 #define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
445 do_facet $SINGLEMDS lctl set_param fail_loc=0x8000030b # hold enqueue
447 #define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
448 do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=0
449 do_facet client lctl set_param fail_loc=0x80000305 # drop cb, evict
451 sleep 0.1 # wait to ensure first client is one that will be evicted
452 openfile -f O_RDONLY $MOUNT2/$tdir/$tfile
454 do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
455 do_facet $SINGLEMDS lctl debug_kernel |
456 grep "not entering recovery" && error "client not evicted"
457 do_facet client "lctl set_param fail_loc=0"
458 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
460 run_test 18 "ldlm_handle_enqueue succeeds on evicted export (3822)"
462 test_19() { # Bug 10991 - resend of open request does not fail assertion.
463 replay_barrier $SINGLEMDS
464 drop_mdt_ldlm_reply "createmany -o $DIR/$tfile 1" || return 1
466 checkstat $DIR2/${tfile}0 || return 2
467 rm $DIR/${tfile}0 || return 3
471 run_test 19 "resend of open request"
474 local before=$SECONDS
475 replay_barrier $SINGLEMDS
481 zconf_mount $HOSTNAME $DIR2 || error "mount $DIR2 fail"
482 local tier1=$((SECONDS - before))
485 replay_barrier $SINGLEMDS
491 zconf_mount $HOSTNAME $DIR2 || error "mount $DIR2 fail"
492 local tier2=$((SECONDS - before))
494 # timeout is more than 1.5x original timeout
495 ((tier2 < tier1 * 6 / 4)) ||
496 error "recovery time $tier2 >= 1.5x original time $tier1"
498 run_test 20 "recovery time is not increasing"
500 # commit on sharing tests
502 local param_file=$TMP/$tfile-params
504 save_lustre_params $SINGLEMDS "mdt.*.commit_on_sharing" > $param_file
505 do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=1
506 touch $MOUNT1/$tfile-1
507 mv $MOUNT2/$tfile-1 $MOUNT2/$tfile-2
508 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
509 replay_barrier_nosync $SINGLEMDS
512 facet_failover $SINGLEMDS
514 # all renames are replayed
515 unlink $MOUNT1/$tfile-3 || return 2
517 zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
519 do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=0
520 rm -rf $MOUNT1/$tfile-*
521 restore_lustre_params < $param_file
525 run_test 21a "commit on sharing"
529 do_node $CLIENT1 rm -f $MOUNT1/$tfile-*
532 do_node $CLIENT1 touch $MOUNT1/$tfile-1
533 do_node $CLIENT2 mv $MOUNT1/$tfile-1 $MOUNT1/$tfile-2
534 do_node $CLIENT1 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
536 replay_barrier_nosync $mds
537 shutdown_client $CLIENT2 $MOUNT1
541 # were renames replayed?
543 echo UNLINK $MOUNT1/$tfile-3
544 do_node $CLIENT1 unlink $MOUNT1/$tfile-3 ||
545 { echo "unlink $tfile-3 fail!" && rc=1; }
548 zconf_mount_clients $CLIENT2 $MOUNT1 ||
549 error "mount $CLIENT2 $MOUNT1 fail"
555 [ $CLIENTCOUNT -lt 2 ] &&
556 { skip "Need 2+ clients, have $CLIENTCOUNT" && return; }
558 if [ "$FAILURE_MODE" = "HARD" ] && mixed_mdt_devs; then
559 skip "Several MDTs on one MDS with FAILURE_MODE=$FAILURE_MODE"
563 zconf_umount_clients $CLIENTS $MOUNT2
564 zconf_mount_clients $CLIENTS $MOUNT1
566 local param_file=$TMP/$tfile-params
568 local mdtidx=$($LFS getstripe -m $MOUNT1)
569 local facet=mds$((mdtidx + 1))
571 save_lustre_params $facet "mdt.*.commit_on_sharing" > $param_file
575 do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
577 test_21b_sub $facet || error "Not all renames are replayed. COS=$COS"
579 # there is still a window when transactions may be written to disk
580 # before the mds device is set R/O. To avoid such a rare test failure,
581 # the check is repeated several times.
585 # COS disabled (should fail)
586 do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
588 test_21b_sub $facet || break
589 n_attempts=$((n_attempts + 1))
590 [ $n_attempts -gt 3 ] &&
591 error "can't check if COS works: rename replied w/o COS"
593 zconf_mount_clients $CLIENTS $MOUNT2
594 restore_lustre_params < $param_file
598 run_test 21b "commit on sharing, two clients"
601 checkstat $MOUNT1/$remote_dir || return 1
602 checkstat $MOUNT1/$remote_dir/dir || return 2
603 checkstat $MOUNT1/$remote_dir/$tfile-1 || return 3
604 checkstat $MOUNT1/$remote_dir/dir/$tfile-1 || return 4
608 create_remote_dir_files_22() {
609 do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir/dir || return 1
610 do_node $CLIENT1 createmany -o $MOUNT1/$remote_dir/dir/$tfile- 2 ||
612 do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 ||
618 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
619 ([ $FAILURE_MODE == "HARD" ] &&
620 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
621 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
625 local remote_dir=$tdir/remote_dir
627 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
628 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
630 # OBD_FAIL_MDS_REINT_NET_REP 0x119
631 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
632 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
636 fail mds$((MDTIDX + 1))
637 wait $CLIENT_PID || error "lfs mkdir -i $MDTIDX failed"
639 replay_barrier mds$MDTIDX
640 create_remote_dir_files_22 || error "Remote creation failed $?"
643 checkstat_22 || error "check stat failed $?"
645 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
648 run_test 22a "c1 lfs mkdir -i 1 dir1, M1 drop reply & fail, c2 mkdir dir1/dir"
651 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
653 local remote_dir=$tdir/remote_dir
655 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
656 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
658 # OBD_FAIL_MDS_REINT_NET_REP 0x119
659 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
660 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
664 fail mds${MDTIDX},mds$((MDTIDX + 1))
665 wait $CLIENT_PID || error "lfs mkdir -i $MDTIDX failed"
667 replay_barrier mds$MDTIDX
668 create_remote_dir_files_22 || error "Remote creation failed $?"
671 checkstat_22 || error "check stat failed $?"
673 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
676 run_test 22b "c1 lfs mkdir -i 1 d1, M1 drop reply & fail M0/M1, c2 mkdir d1/dir"
679 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
680 ([ $FAILURE_MODE == "HARD" ] &&
681 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
682 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
685 local remote_dir=$tdir/remote_dir
687 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
688 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
690 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
691 do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
692 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
695 do_facet mds$MDTIDX lctl set_param fail_loc=0
698 wait $CLIENT_PID || error "lfs mkdir -i $MDTIDX failed"
700 replay_barrier mds$MDTIDX
701 create_remote_dir_files_22 || error "Remote creation failed $?"
704 checkstat_22 || error "check stat failed $?"
706 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
709 run_test 22c "c1 lfs mkdir -i 1 d1, M1 drop update & fail M1, c2 mkdir d1/dir"
712 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
714 local remote_dir=$tdir/remote_dir
716 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
717 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
719 # let previous transactions to complete
720 # (distributed llog cancels, etc)
721 do_nodes $(comma_list $(mdts_nodes)) \
722 "$LCTL set_param -n osd*.*MDT*.force_sync=1"
725 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
726 do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
727 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
730 do_facet mds$MDTIDX lctl set_param fail_loc=0
732 fail mds${MDTIDX},mds$((MDTIDX + 1))
733 wait $CLIENT_PID || error "lfs mkdir -i $MDTIDX failed"
735 replay_barrier mds$MDTIDX
736 create_remote_dir_files_22 || error "Remote creation failed $?"
739 checkstat_22 || error "check stat failed $?"
741 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
744 run_test 22d "c1 lfs mkdir -i 1 d1, M1 drop update & fail M0/M1,c2 mkdir d1/dir"
747 checkstat $MOUNT1/$remote_dir || return 1
748 checkstat $MOUNT1/$remote_dir/$tfile-1 || return 2
752 create_remote_dir_files_23() {
753 do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir || return 1
754 do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || return 2
759 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
760 ([ $FAILURE_MODE == "HARD" ] &&
761 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
762 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
765 local remote_dir=$tdir/remote_dir
767 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
768 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
769 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
770 error "lfs mkdir -i $MDTIDX failed"
771 # OBD_FAIL_MDS_REINT_NET_REP 0x119
772 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
773 do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
776 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
778 fail mds$((MDTIDX + 1))
779 wait $CLIENT_PID || error "rmdir remote dir failed"
781 replay_barrier mds${MDTIDX}
782 create_remote_dir_files_23 || error "Remote creation failed $?"
785 checkstat_23 || error "check stat failed $?"
787 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
790 run_test 23a "c1 rmdir d1, M1 drop reply and fail, client2 mkdir d1"
793 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
795 local remote_dir=$tdir/remote_dir
797 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
798 error "lfs mkdir -i 0 $MOUNT/$tdir failed"
799 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
800 error "lfs mkdir -i $MDTIDX failed"
802 # OBD_FAIL_MDS_REINT_NET_REP 0x119
803 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
804 do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
807 do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
809 fail mds${MDTIDX},mds$((MDTIDX + 1))
810 wait $CLIENT_PID || error "rmdir remote dir failed"
812 replay_barrier mds${MDTIDX}
813 create_remote_dir_files_23 || error "Remote creation failed $?"
816 checkstat_23 || error "check stat failed $?"
818 rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
821 run_test 23b "c1 rmdir d1, M1 drop reply and fail M0/M1, c2 mkdir d1"
824 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
826 ([ $FAILURE_MODE == "HARD" ] &&
827 [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
828 skip "MDTs needs to be on diff hosts for HARD fail mode" &&
831 local remote_dir=$tdir/remote_dir
833 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
834 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
835 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
836 error "lfs mkdir -i $MDTIDX failed"
838 # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
839 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
840 do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
843 do_facet mds${MDTIDX} lctl set_param fail_loc=0
846 wait $CLIENT_PID || error "rmdir remote dir failed"
848 replay_barrier mds${MDTIDX}
849 create_remote_dir_files_23 || error "Remote creation failed $?"
852 checkstat_23 || error "check stat failed $?"
854 rm -rf $MOUNT1/$tdir || return 6
857 run_test 23c "c1 rmdir d1, M0 drop update reply and fail M0, c2 mkdir d1"
860 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
862 local remote_dir=$tdir/remote_dir
864 do_node $CLIENT1 $LFS mkdir -i 0 $MOUNT1/$tdir ||
865 error "lfs mkdir -i 0 $MOUNT1/$tdir failed"
866 do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
867 error "lfs mkdir -i $MDTIDX failed"
869 # let previous transactions to complete
870 # (distributed llog cancels, etc)
871 do_nodes $(mdts_nodes) "$LCTL set_param -n osd*.*MDT*.force_sync=1"
874 # OBD_FAIL_UPDATE_OBJ_NET 0x1701
875 do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
876 do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
879 do_facet mds${MDTIDX} lctl set_param fail_loc=0
881 fail mds${MDTIDX},mds$((MDTIDX + 1))
882 wait $CLIENT_PID || error "rmdir remote dir failed"
884 replay_barrier mds${MDTIDX}
885 create_remote_dir_files_23 || error "Remote creation failed $?"
888 checkstat_23 || error "check stat failed $?"
890 rm -rf $MOUNT1/$tdir || return 6
893 run_test 23d "c1 rmdir d1, M0 drop update reply and fail M0/M1, c2 mkdir d1"
896 [[ "$MDS1_VERSION" -gt $(version_code 2.5.2) ]] ||
897 skip "Need MDS version newer than 2.5.2"
900 stat $MOUNT/$tfile >&/dev/null
901 # OBD_FAIL_MDS_REINT_NET_REP
902 do_facet $SINGLEMDS $LCTL set_param fail_loc=0x119
903 $TRUNCATE $MOUNT/$tfile 100 &
906 do_facet $SINGLEMDS lctl set_param fail_loc=0
907 # sync to release rep-ack lock quickly
908 do_nodes $(mdts_nodes) "lctl set_param -n osd*.*MDT*.force_sync 1"
912 run_test 24 "reconstruct on non-existing object"
914 # end commit on sharing tests
919 $LFS setstripe -i 0 -c 1 $DIR/$tfile
921 # get lock for the 1st client
922 dd if=/dev/zero of=$DIR/$tfile count=1 >/dev/null ||
923 error "failed to write data"
925 # get waiting locks for the 2nd client
926 drop_ldlm_cancel "multiop $DIR2/$tfile Ow512" &
929 # failover, replay and resend replayed waiting locks
930 if [ "$OST1_VERSION" -ge $(version_code 2.6.90) ]; then
931 #define OBD_FAIL_LDLM_SRV_CP_AST 0x325
932 do_facet ost1 lctl set_param fail_loc=0x80000325
934 #define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
935 do_facet ost1 lctl set_param fail_loc=0x80000213
940 # multiop does not finish because CP AST is skipped;
941 # it is ok to kill it in the test, because CP AST is already re-sent
942 # and it does not hung forever in real life
943 killall -r "(multiop)$"
946 run_test 25 "replay|resend"
954 for pid_26 in "${pids_26[@]}"; do
955 if [[ -n "$pid_26" ]]; then
956 kill -0 "$pid_26" && kill "$pid_26" && \
957 wait "$pid_26" || true
961 for dir_26 in "${dirs_26[@]}"; do
962 if [[ -n "$dir_26" && -d "$dir_26" ]]; then
969 local clients=${CLIENTS:-$HOSTNAME}
971 zconf_mount_clients $clients $MOUNT
974 [[ "$SLOW" == "no" ]] && duration=200
975 # set duration to 900 because it takes some time to boot node
976 [[ "$FAILURE_MODE" == HARD ]] && duration=900
978 local start_ts=$SECONDS
981 stack_trap cleanup_26
983 local tar_dir=$DIR/$tdir/run_tar
987 stack_trap 'set +e; jobs -p | xargs -r kill; wait; exit' \
991 test_mkdir -p -c$MDSCOUNT $tar_dir || break
993 if (( MDSCOUNT >= 2 )); then
994 $LFS setdirstripe -D -c$MDSCOUNT $tar_dir ||
995 error "set default dirstripe failed"
999 tar -C / -cf - etc | tar -xf - &
1004 wait $tar_pid || tar_rc=$?
1006 if (( tar_rc > 0 && tar_rc <= 128 )); then
1007 error "tar failed with rc $tar_rc"
1010 cd $DIR/$tdir || break
1011 rm -rf $tar_dir || break
1017 echo "Started tar loop with pid $tar_26_pid"
1018 pids_26+=($tar_26_pid)
1020 local dbench_dir=$DIR2/$tdir/run_dbench
1022 dirs_26+=($dbench_dir)
1024 stack_trap 'set +e; jobs -p | xargs -r kill; wait; exit' \
1028 test_mkdir -p -c$MDSCOUNT $dbench_dir || break
1030 if (( MDSCOUNT >= 2 )); then
1031 $LFS setdirstripe -D -c$MDSCOUNT $dbench_dir ||
1032 error "set default dirstripe failed"
1035 cd $dbench_dir || break
1036 bash rundbench 1 -D $dbench_dir -t 100 &
1041 wait $dbench_pid || dbench_rc=$?
1043 if (( dbench_rc > 0 && dbench_rc <= 128 )); then
1044 error "dbench failed with rc $dbench_rc"
1047 cd $DIR/$tdir || break
1048 rm -rf $dbench_dir || break
1052 local dbench_26_pid=$!
1054 echo "Started dbench loop with $dbench_26_pid"
1055 pids_26+=($dbench_26_pid)
1057 local num_failovers=0
1060 while (( (SECONDS - start_ts) < duration )); do
1061 kill -0 $tar_26_pid || error "tar $tar_26_pid missing"
1062 kill -0 $dbench_26_pid || error "dbench $dbench_26_pid missing"
1064 replay_barrier mds$fail_index
1065 sleep 2 # give clients a time to do operations
1066 # Increment the number of failovers
1067 num_failovers=$((num_failovers + 1))
1068 log "$TESTNAME fail mds$fail_index $num_failovers times"
1070 if (( fail_index < MDSCOUNT )); then
1071 fail_index=$((fail_index + 1))
1077 # stop the client loads
1078 kill -0 $tar_26_pid || error "tar $tar_26_pid stopped"
1079 kill -0 $dbench_26_pid || error "dbench $dbench_26_pid stopped"
1083 run_test 26 "dbench and tar with mds failover"
1086 wait_delete_completed
1088 $LFS setstripe -i 0 -c 1 $DIR2/$tfile
1089 dd if=/dev/zero of=$DIR2/$tfile bs=4096 count=1
1091 #define OBD_FAIL_LDLM_SRV_BL_AST 0x324
1092 do_facet ost1 $LCTL set_param fail_loc=0x80000324
1094 dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 &
1098 #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
1099 do_facet ost1 $LCTL set_param fail_loc=0x32a
1104 cancel_lru_locks OST0000-osc
1105 wait $pid || error "dd failed"
1107 run_test 28 "lock replay should be ordered: waiting after granted"
1110 local dir0=$DIR/$tdir/d0
1111 local dir1=$DIR/$tdir/d1
1113 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1114 [ $CLIENTCOUNT -lt 2 ] && skip "needs >= 2 clients" && return 0
1115 [ "$CLIENT1" == "$CLIENT2" ] &&
1116 skip "clients must be on different nodes" && return 0
1119 $LFS mkdir -i0 $dir0
1120 $LFS mkdir -i1 $dir1
1124 # create a remote dir, drop reply
1125 #define OBD_FAIL_PTLRPC_ROUND_XID 0x530
1126 $LCTL set_param fail_loc=0x530 fail_val=36
1127 #define OBD_FAIL_MDS_REINT_MULTI_NET_REP 0x15a
1128 do_facet mds2 $LCTL set_param fail_loc=0x8000015a
1129 echo make remote dir d0 for $dir0
1130 $LFS mkdir -i1 -c1 $dir0/d3 &
1133 echo make local dir d1 for $dir1
1134 do_node $CLIENT2 $LCTL set_param fail_loc=0x530 fail_val=36
1135 do_node $CLIENT2 mkdir $dir1/d4
1139 run_test 29 "replay vs update with the same xid"
1142 $LFS setstripe -E 1m -L mdt -E -1 $DIR/$tfile
1143 #first write to have no problems with grants
1144 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10 ||
1145 error "dd on client failed"
1146 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10 seek=10 ||
1147 error "dd on client failed"
1149 #define OBD_FAIL_LDLM_REPLAY_PAUSE 0x32e
1150 lctl set_param fail_loc=0x32e fail_val=4
1151 dd of=/dev/null if=$DIR2/$tfile &
1157 wait $pid || error "dd on client failed"
1159 run_test 30 "layout lock replay is not blocked on IO"
1162 mkdir_on_mdt0 $DIR1/$tdir
1163 $LFS setstripe -c 1 -i 0 $DIR1/$tdir
1164 for (( i=0; i < 10; i++ )) ; do
1165 mkdir -p $DIR1/$tdir/d.${i}
1167 mkdir $DIR1/$tdir/mdtdir
1168 $LFS setstripe -E 1M -L mdt $DIR1/$tdir/mdtdir
1170 # failover has to take longer than blocking timeout involved
1171 # by second multiop below which is set to obd_timeout/2 by
1173 local timeout=$(do_facet mds1 $LCTL get_param -n timeout)
1175 timeout=$((timeout / 2 + 5))
1176 fail ost1 $timeout &
1181 # consume preallocated objects, precreate thread will be awakened
1182 consume_precreations $DIR1/$tdir mds1 0 1
1184 # disable AT so that blocking timeout gets set to obd_timeout/2
1185 local amm=$(at_max_get mds1)
1188 stack_trap "at_max_set $amm mds1"
1192 #define OBD_FAIL_LLITE_XATTR_PAUSE 0x1420
1193 $LCTL set_param fail_loc=0x80001420
1194 $MULTIOP $DIR1/$tdir/mdtdir/$tfile Osw4096c &
1197 for (( i=0; i<10; i++ )); do
1198 if [ -w $DIR2/$tdir/mdtdir/$tfile ]; then
1199 echo "file $DIR2/$tdir/mdtdir/$tfile is ready"
1202 echo "file $DIR2/$tdir/mdtdir/$tfile is not ready, wait 0.5 second..."
1207 $MULTIOP $DIR2/$tdir/mdtdir/$tfile oO_WRONLY:w4096c &
1211 local mmrif=$($LCTL get_param -n \
1212 mdc.$FSNAME-MDT0000-mdc-*.max_mod_rpcs_in_flight | tail -1)
1213 # these are blocked by precreation until ost failover is in progress
1214 for (( i=0; i < $mmrif; i++ )) ; do
1215 $MULTIOP $DIR1/$tdir/d.${i}/parallel Oc &
1221 echo "pids: ${multiops[@]}"
1222 for pid in "${multiops[@]}"; do
1226 if (( $rc != 0 )); then
1227 echo "wait $pid failed, rc = $rc"
1235 if (( failed != 0 )); then
1236 local bigly=($(du -sk $MOUNT/* | sort -nr | head -10))
1238 for ((i = 0; i < ${#bigly[*]}; i += 2)); do
1239 echo -e "${bigly[i]}\t${bigly[$((i+1))]}"
1242 local biggest=${bigly[1]}
1244 [[ -f $biggest ]] || du -sk $biggest/* | sort -nr | head -10
1245 error "$failed multiops failed due to '$biggest'"
1248 run_test 31 "deadlock on file_remove_privs and occupied mod rpc slots"
1251 (( $MDSCOUNT < 2 )) && skip_env "needs >= 2 MDTs"
1253 # inject a gap with 10th transaction
1254 #define OBD_FAIL_LLOG_ADD_GAP 0x131d
1255 do_facet $SINGLEMDS $LCTL set_param fail_loc=0x0000131d fail_val=10
1256 for ((i=0; i < 20; i++)); do
1257 $LFS setdirstripe -i1 $DIR/$tdir-$i ||
1258 error "can't mkdir $DIR/$tdir-$i"
1261 # prevent update llog cancellation, so next boot MDS has
1262 # process the update llog with gap injected
1263 #define OBD_FAIL_TGT_TXN_NO_CANCEL 0x726
1264 $LCTL set_param fail_loc=0x726
1269 $LCTL set_param fail_loc=0
1276 client_evicted $CLIENT1 &&
1277 error "client got evicted due to aborted recovery"
1280 run_test 32 "gap in update llog shouldn't break recovery"
1282 last_rcvd_check_incompat_flag() {
1284 local flag2check="$2"
1285 local dev=$(facet_device $facet)
1288 incompat=$(do_facet $facet $LR_READER $dev |
1289 awk '/feature_incompat:/ {print $2}')
1290 echo "last_rcvd in $dev: incompat = $incompat"
1292 return $(( (incompat & flag2check) != flag2check ))
1296 test_33() { # LU-15935
1297 (( $MDS1_VERSION >= $(version_code 2.15.52.86) )) ||
1298 (( $MDS1_VERSION >= $(version_code 2.15.2) &&
1299 $MDS1_VERSION < $(version_code 2.15.50) )) ||
1300 skip "Need MDS version at least 2.15.52.86 or 2.15.2"
1302 [[ "$mds1_FSTYPE" == "ldiskfs" ]] || skip "ldiskfs only test"
1303 (( MDSCOUNT > 1 )) || skip "needs >= 2 MDTs"
1306 at_min_old=$(at_min_get ost1)
1308 stack_trap "at_min_set $at_min_old ost || true"
1311 cancel_lru_locks mdc
1316 # check for OBD_INCOMPAT_MULTI_RPCS (0x400) in last_rcvd
1317 last_rcvd_check_incompat_flag mds2 0x400 ||
1318 error "1st failover: OBD_INCOMPAT_MULTI_RPCS is not set on MDT0000 last_rcvd"
1320 # lose 1 client while the MDT failover
1324 wait_clients_import_state "$HOSTNAME" mds2 "REPLAY_WAIT"
1326 do_facet mds2 $LCTL --device $(convert_facet2label mds2) abort_recovery
1327 wait_clients_import_state "$HOSTNAME" mds2 "FULL"
1328 wait_recovery_complete mds2
1332 last_rcvd_check_incompat_flag mds2 0x400 ||
1333 error "2sd failover: OBD_INCOMPAT_MULTI_RPCS is not set on MDT0000 last_rcvd"
1337 zconf_mount $HOSTNAME $MOUNT2 || error "Fail to mount $MOUNT2"
1338 wait_clients_import_state "$HOSTNAME" mds2 "FULL"
1339 wait_recovery_complete mds2
1341 run_test 33 "Check for OBD_INCOMPAT_MULTI_RPCS in last_rcvd after abort_recovery"
1343 complete_test $SECONDS
1344 SLEEP=$((SECONDS - $NOW))
1345 [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
1346 [ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
1347 check_and_cleanup_lustre