+
+ zconf_umount_clients $CLIENTS $MOUNT2
+ zconf_mount_clients $CLIENTS $MOUNT1
+
+ local param_file=$TMP/$tfile-params
+
+ local mdtidx=$($LFS getstripe -M $MOUNT1)
+ local facet=mds$((mdtidx + 1))
+
+ save_lustre_params $facet "mdt.*.commit_on_sharing" > $param_file
+
+ # COS enabled
+ local COS=1
+ do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
+
+ test_21b_sub $facet || error "Not all renames are replayed. COS=$COS"
+
+ # there is still a window when transactions may be written to disk
+ # before the mds device is set R/O. To avoid such a rare test failure,
+ # the check is repeated several times.
+ COS=0
+ local n_attempts=1
+ while true; do
+ # COS disabled (should fail)
+ do_facet $facet lctl set_param mdt.*.commit_on_sharing=$COS
+
+ test_21b_sub $facet || break
+ n_attempts=$((n_attempts + 1))
+ [ $n_attempts -gt 3 ] &&
+ error "can't check if COS works: rename replied w/o COS"
+ done
+ zconf_mount_clients $CLIENTS $MOUNT2
+ restore_lustre_params < $param_file
+ rm -f $param_file
+ return 0
+}
+run_test 21b "commit on sharing, two clients"
+
+checkstat_22() {
+ checkstat $MOUNT1/$remote_dir || return 1
+ checkstat $MOUNT1/$remote_dir/dir || return 2
+ checkstat $MOUNT1/$remote_dir/$tfile-1 || return 3
+ checkstat $MOUNT1/$remote_dir/dir/$tfile-1 || return 4
+ return 0
+}
+
+create_remote_dir_files_22() {
+ do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir/dir || return 1
+ do_node $CLIENT1 createmany -o $MOUNT1/$remote_dir/dir/$tfile- 2 ||
+ return 2
+ do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 ||
+ return 3
+ return 0
+}
+
+test_22a () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+
+ fail mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds$MDTIDX
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22a "c1 lfs mkdir -i 1 dir1, M1 drop reply & fail, c2 mkdir dir1/dir"
+
+test_22b () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22b "c1 lfs mkdir -i 1 d1, M1 drop reply & fail M0/M1, c2 mkdir d1/dir"
+
+test_22c () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds$MDTIDX lctl set_param fail_loc=0
+
+ fail mds$MDTIDX
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds$MDTIDX
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22c "c1 lfs mkdir -i 1 d1, M1 drop update & fail M1, c2 mkdir d1/dir"
+
+test_22d () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ do_facet mds$MDTIDX lctl set_param fail_loc=0x1701
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds$MDTIDX lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds$MDTIDX
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22d "c1 lfs mkdir -i 1 d1, M1 drop update & fail M0/M1,c2 mkdir d1/dir"
+
+checkstat_23() {
+ checkstat $MOUNT1/$remote_dir || return 1
+ checkstat $MOUNT1/$remote_dir/$tfile-1 || return 2
+ return 0
+}
+
+create_remote_dir_files_23() {
+ do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir || return 1
+ do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || return 2
+ return 0
+}
+
+test_23a () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 23a "c1 rmdir d1, M1 drop reply and fail, client2 mkdir d1"
+
+test_23b () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 23b "c1 rmdir d1, M1 drop reply and fail M0/M1, c2 mkdir d1"
+
+test_23c () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || return 6
+ return 0
+}
+run_test 23c "c1 rmdir d1, M0 drop update reply and fail M0, c2 mkdir d1"
+
+test_23d () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1701
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || return 6
+ return 0
+}
+run_test 23d "c1 rmdir d1, M0 drop update reply and fail M0/M1, c2 mkdir d1"
+
+test_24 () {
+ [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.5.2) ]] ||
+ { skip "Need MDS version newer than 2.5.2"; return 0; }
+
+ touch $MOUNT/$tfile
+ stat $MOUNT/$tfile >&/dev/null
+# OBD_FAIL_MDS_REINT_NET_REP
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x119
+ $TRUNCATE $MOUNT/$tfile 100 &
+ PID=$!
+ sleep 1
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
+ # sync to release rep-ack lock quickly
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "lctl set_param -n osd*.*MDT*.force_sync 1"
+ rm $MOUNT2/$tfile
+ wait
+}
+run_test 24 "reconstruct on non-existing object"
+
+# end commit on sharing tests
+
+test_25() {
+ cancel_lru_locks osc
+
+ $SETSTRIPE -i 0 -c 1 $DIR/$tfile
+
+ # get lock for the 1st client
+ dd if=/dev/zero of=$DIR/$tfile count=1 >/dev/null ||
+ error "failed to write data"
+
+ # get waiting locks for the 2nd client
+ drop_ldlm_cancel "multiop $DIR2/$tfile Ow512" &
+ sleep 1
+
+ # failover, replay and resend replayed waiting locks
+ if [ $(lustre_version_code ost1) -ge $(version_code 2.6.90) ]; then
+ #define OBD_FAIL_LDLM_SRV_CP_AST 0x325
+ do_facet ost1 lctl set_param fail_loc=0x80000325
+ else
+ #define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
+ do_facet ost1 lctl set_param fail_loc=0x80000213
+ fi
+
+ fail ost1
+
+ # multiop does not finish because CP AST is skipped;
+ # it is ok to kill it in the test, because CP AST is already re-sent
+ # and it does not hung forever in real life
+ killall multiop
+ wait
+}
+run_test 25 "replay|resend"
+
+cleanup_26() {
+ trap 0
+ kill -9 $tar_26_pid
+ kill -9 $dbench_26_pid
+ killall -9 dbench
+}
+
+test_26() {
+ local clients=${CLIENTS:-$HOSTNAME}
+
+ zconf_mount_clients $clients $MOUNT
+
+ local duration=600
+ [ "$SLOW" = "no" ] && duration=200
+ # set duration to 900 because it takes some time to boot node
+ [ "$FAILURE_MODE" = HARD ] && duration=900
+
+ local start_ts=$SECONDS
+ local rc=0
+
+ trap cleanup_26 EXIT
+ (
+ local tar_dir=$DIR/$tdir/run_tar
+ while true; do
+ test_mkdir -p -c$MDSCOUNT $tar_dir || break
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS setdirstripe -D -c$MDSCOUNT $tar_dir ||
+ error "set default dirstripe failed"
+ fi
+ cd $tar_dir || break
+ tar cf - /etc | tar xf - || error "tar failed"
+ cd $DIR/$tdir || break
+ rm -rf $tar_dir || break
+ done
+ )&
+ tar_26_pid=$!
+ echo "Started tar $tar_26_pid"
+
+ (
+ local dbench_dir=$DIR2/$tdir/run_dbench
+ while true; do
+ test_mkdir -p -c$MDSCOUNT $dbench_dir || break
+ if [ $MDSCOUNT -ge 2 ]; then
+ $LFS setdirstripe -D -c$MDSCOUNT $dbench_dir ||
+ error "set default dirstripe failed"
+ fi
+ cd $dbench_dir || break
+ rundbench 1 -D $dbench_dir -t 100 > /dev/null 2&>1 ||
+ break
+ cd $DIR/$tdir || break
+ rm -rf $dbench_dir || break
+ done
+ )&
+ dbench_26_pid=$!
+ echo "Started dbench $dbench_26_pid"
+
+ local num_failovers=0
+ local fail_index=1
+ while [ $((SECONDS - start_ts)) -lt $duration ]; do
+ kill -0 $tar_26_pid || error "tar $tar_26_pid missing"
+ kill -0 $dbench_26_pid || error "dbench $dbench_26_pid missing"
+ sleep 2
+ replay_barrier mds$fail_index
+ sleep 2 # give clients a time to do operations
+ # Increment the number of failovers
+ num_failovers=$((num_failovers + 1))
+ log "$TESTNAME fail mds$fail_index $num_failovers times"
+ fail mds$fail_index
+ if [ $fail_index -ge $MDSCOUNT ]; then
+ fail_index=1
+ else
+ fail_index=$((fail_index + 1))
+ fi
+ done
+ # stop the client loads
+ kill -0 $tar_26_pid || error "tar $tar_26_pid stopped"
+ kill -0 $dbench_26_pid || error "dbench $dbench_26_pid stopped"
+ cleanup_26 || true
+}
+run_test 26 "dbench and tar with mds failover"
+
+test_28() {
+ $SETSTRIPE -i 0 -c 1 $DIR2/$tfile
+ dd if=/dev/zero of=$DIR2/$tfile bs=4096 count=1
+
+ #define OBD_FAIL_LDLM_SRV_BL_AST 0x324
+ do_facet ost1 $LCTL set_param fail_loc=0x80000324
+
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 &
+ local pid=$!
+ sleep 2
+
+ #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
+ do_facet ost1 $LCTL set_param fail_loc=0x32a
+
+ fail ost1
+
+ sleep 2
+ cancel_lru_locks OST0000-osc
+ wait $pid || error "dd failed"
+}
+run_test 28 "lock replay should be ordered: waiting after granted"
+
+complete $SECONDS
+SLEEP=$((SECONDS - $NOW))
+[ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
+[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
+check_and_cleanup_lustre
+exit_status