check_timeout || return 1
+ # make sure all imports are connected and not IDLE
+ do_facet client lfs df > /dev/null
# OBD_FAIL_PTLRPC_DROP_RPC 0x505
do_facet client lctl set_param fail_loc=0x505
local before=$(date +%s)
# the loser might have to wait for the next ping.
sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
do_facet client lctl set_param fail_loc=0x0
- do_facet client df > /dev/null
+ do_facet client lfs df > /dev/null
local oscs=$(lctl dl | awk '/-osc-/ {print $4}')
check_clients_evicted $before ${oscs[@]}
# = 9 * PING_INTERVAL + PING_INTERVAL
# = 10 PING_INTERVAL = 10 obd_timeout / 4 = 2.5 obd_timeout
# let's wait $((TIMEOUT * 3)) # bug 19887
- local rc=0
- wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || \
- error "Client was not evicted by ost" rc=1
- wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || \
+ wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) ||
+ error "Client was not evicted by ost"
+ wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) ||
error "Client was not evicted by mds"
}
run_test 26b "evict dead exports"
[[ "$MDS1_VERSION" -ge $(version_code 2.12.55) ]] ||
skip "Need MDS version at least 2.12.55"
- stop mds2 || error "stop mds2 failed"
umount $MOUNT
+ stop mds2 || error "stop mds2 failed"
#define OBD_FAIL_FLD_QUERY_REQ 0x1103
do_facet mds2 lctl set_param fail_loc=0x1103
}
run_test 110k "FID_QUERY failed during recovery"
+test_110m () {
+ (( $(lustre_version_code $SINGLEMDS) >= $(version_code 2.14.52) )) ||
+ skip "Need MDS version at least 2.14.52"
+ (( $MDSCOUNT >= 2 )) || skip "needs at least 2 MDTs"
+ local remote_dir=$DIR/$tdir/remote_dir
+ local mdccli
+ local uuid
+ local diridx
+
+ mkdir_on_mdt0 $DIR/$tdir
+
+#define OBD_FAIL_PTLRPC_RESEND_RACE 0x0525
+ do_facet mds1 $LCTL set_param fail_loc=0x80000525
+ $LFS mkdir -i 1 -c2 $remote_dir &
+ mkdir_pid=$!
+ sleep 3
+ # initiate the re-connect & re-send
+ mdccli=$(do_facet mds2 $LCTL dl |
+ awk '/MDT0000-osp-MDT0001/ {print $4;}')
+ uuid=$(do_facet mds2 $LCTL get_param -n osp.$mdccli.mdt_conn_uuid)
+ echo "conn_uuid=$uuid"
+ do_facet mds2 $LCTL set_param "osp.$mdccli.import=connection=$uuid"
+
+ wait $mkdir_pid
+ (( $? == 0 )) || error "mkdir failed"
+
+ diridx=$($LFS getstripe -m $remote_dir)
+ (( $diridx == 1 )) || error "$diridx != 1"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110m "update resent vs original RPC race"
+
# LU-2844 mdt prepare fail should not cause umount oops
test_111 ()
{
sleep 55
stop $SINGLEMDS || error "stop MDS failed"
do_facet $SINGLEMDS $LCTL set_param fail_loc=0
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) ||
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
error "start MDS failed"
zconf_mount_clients $CLIENTS $MOUNT
}
mount_mds_client
local cnt
- cnt=$(do_facet mds1 $LCTL get_param "mdt.*.exports.*.export" |
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
grep export_flags.*no_recovery | wc -l)
echo "$cnt clients with recovery disabled"
umount_mds_client
do_facet mds1 $LCTL set_param mdt.*.local_recovery=1
mount_mds_client
- cnt=$(do_facet mds1 $LCTL get_param "mdt.*.exports.*.export" |
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
grep export_flags.*no_recovery | wc -l)
echo "$cnt clients with recovery disabled"
umount_mds_client
replay_barrier mds1
umount_mds_client
fail mds1
+ # Lustre: tfs-MDT0000: Recovery over after 0:03, of 2 clients 2 rec...
local recovery=$(do_facet mds1 dmesg |
- awk -F: '/Recovery over after/ { print $4 }' |
- cut -d, -f1 | tail -1)
- (( $recovery < $TIMEOUT*2 )) ||
- error "recovery took too long $recovery > $((TIMEOUT * 2))"
+ awk '/Recovery over after/ { print $6 }' | tail -1 |
+ awk -F: '{ print $1 * 60 + $2 }')
+ (( recovery < TIMEOUT * 2 + 5 )) ||
+ error "recovery took too long $recovery > $((TIMEOUT * 2 + 5))"
}
run_test 140b "local mount is excluded from recovery"
}
run_test 148 "data corruption through resend"
+test_149() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+
+ test_mkdir -i 0 -c $MDSCOUNT $DIR/$tdir || error "mkdir $tdir failed"
+
+ # make an orphan striped dir
+ $MULTIOP $DIR/$tdir D_c &
+ local PID=$!
+ sleep 0.3
+ rmdir $DIR/$tdir || error "can't rmdir"
+
+ # stop a slave MDT where one ons stripe is located
+ stop mds2 -f
+
+ # stopping should not cause orphan as another MDT can
+ # be stopped yet
+ stop mds1 -f
+
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail"
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS || error "mds1 start fail"
+
+ kill -USR1 $PID
+ wait $PID
+
+ clients_up
+ return 0
+}
+run_test 149 "skip orphan removal at umount"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status