init_logging
ALWAYS_EXCEPT="$RECOVERY_SMALL_EXCEPT "
-if $SHARED_KEY; then
- # bug number for skipped test: LU-12896
- ALWAYS_EXCEPT+=" 110k"
- # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-fi
build_test_filter
check_timeout || return 1
+ # make sure all imports are connected and not IDLE
+ do_facet client lfs df > /dev/null
# OBD_FAIL_PTLRPC_DROP_RPC 0x505
do_facet client lctl set_param fail_loc=0x505
local before=$(date +%s)
# the loser might have to wait for the next ping.
sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4))
do_facet client lctl set_param fail_loc=0x0
- do_facet client df > /dev/null
+ do_facet client lfs df > /dev/null
local oscs=$(lctl dl | awk '/-osc-/ {print $4}')
check_clients_evicted $before ${oscs[@]}
# = 9 * PING_INTERVAL + PING_INTERVAL
# = 10 PING_INTERVAL = 10 obd_timeout / 4 = 2.5 obd_timeout
# let's wait $((TIMEOUT * 3)) # bug 19887
- local rc=0
- wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || \
- error "Client was not evicted by ost" rc=1
- wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || \
+ wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) ||
+ error "Client was not evicted by ost"
+ wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) ||
error "Client was not evicted by mds"
}
run_test 26b "evict dead exports"
[[ "$MDS1_VERSION" -ge $(version_code 2.12.55) ]] ||
skip "Need MDS version at least 2.12.55"
- stop mds2 || error "stop mds2 failed"
umount $MOUNT
+ stop mds2 || error "stop mds2 failed"
#define OBD_FAIL_FLD_QUERY_REQ 0x1103
do_facet mds2 lctl set_param fail_loc=0x1103
- start mds2 $(mdsdevname 2) -o abort_recovery ||
+ local OPTS="$MDS_MOUNT_OPTS -o abort_recovery"
+ start mds2 $(mdsdevname 2) $OPTS ||
error "start MDS with abort_recovery should succeed"
do_facet mds2 lctl set_param fail_loc=0
# cleanup
stop mds2 || error "cleanup: stop mds2 failed"
- start mds2 $(mdsdevname 2) || error "cleanup: start mds2 failed"
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS ||
+ error "cleanup: start mds2 failed"
zconf_mount $(hostname) $MOUNT || error "cleanup: mount failed"
client_up || error "post-failover df failed"
}
run_test 110k "FID_QUERY failed during recovery"
+test_110m () {
+ (( $(lustre_version_code $SINGLEMDS) >= $(version_code 2.14.52) )) ||
+ skip "Need MDS version at least 2.14.52"
+ (( $MDSCOUNT >= 2 )) || skip "needs at least 2 MDTs"
+ local remote_dir=$DIR/$tdir/remote_dir
+ local mdccli
+ local uuid
+ local diridx
+
+ mkdir_on_mdt0 $DIR/$tdir
+
+#define OBD_FAIL_PTLRPC_RESEND_RACE 0x0525
+ do_facet mds1 $LCTL set_param fail_loc=0x80000525
+ $LFS mkdir -i 1 -c2 $remote_dir &
+ mkdir_pid=$!
+ sleep 3
+ # initiate the re-connect & re-send
+ mdccli=$(do_facet mds2 $LCTL dl |
+ awk '/MDT0000-osp-MDT0001/ {print $4;}')
+ uuid=$(do_facet mds2 $LCTL get_param -n osp.$mdccli.mdt_conn_uuid)
+ echo "conn_uuid=$uuid"
+ do_facet mds2 $LCTL set_param "osp.$mdccli.import=connection=$uuid"
+
+ wait $mkdir_pid
+ (( $? == 0 )) || error "mkdir failed"
+
+ diridx=$($LFS getstripe -m $remote_dir)
+ (( $diridx == 1 )) || error "$diridx != 1"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110m "update resent vs original RPC race"
+
# LU-2844 mdt prepare fail should not cause umount oops
test_111 ()
{
#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
do_facet $SINGLEMDS lctl set_param fail_loc=0x151
stop $SINGLEMDS || error "stop MDS failed"
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS &&
error "start MDS should fail"
do_facet $SINGLEMDS lctl set_param fail_loc=0
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) ||
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
error "start MDS failed"
}
run_test 111 "mdd setup fail should not cause umount oops"
sleep 55
stop $SINGLEMDS || error "stop MDS failed"
do_facet $SINGLEMDS $LCTL set_param fail_loc=0
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) ||
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
error "start MDS failed"
zconf_mount_clients $CLIENTS $MOUNT
}
}
run_test 139 "corrupted catid won't cause crash"
+test_140a() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ # so local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+ mount_mds_client
+
+ local cnt
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] && error "no clients with recovery disabled"
+
+ # enable recovery for local clients
+ # so no local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=1
+ mount_mds_client
+
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] || error "$cnt clients with recovery disabled"
+}
+run_test 140a "local mount is flagged properly"
+
+test_140b() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+
+ mount_mds_client
+ replay_barrier mds1
+ umount_mds_client
+ fail mds1
+ # Lustre: tfs-MDT0000: Recovery over after 0:03, of 2 clients 2 rec...
+ local recovery=$(do_facet mds1 dmesg |
+ awk '/Recovery over after/ { print $6 }' | tail -1 |
+ awk -F: '{ print $1 * 60 + $2 }')
+ (( recovery < TIMEOUT * 2 + 5 )) ||
+ error "recovery took too long $recovery > $((TIMEOUT * 2 + 5))"
+}
+run_test 140b "local mount is excluded from recovery"
+
+test_141() {
+ local oldc
+ local newc
+
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ combined_mgs_mds || skip "needs combined MGS/MDT"
+ ( local_mode || from_build_tree ) &&
+ skip "cannot run in local mode or from build tree"
+
+ # some get_param have a bug to handle dot in param name
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ oldc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+ fail $SINGLEMDS
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ newc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+
+ [ $oldc -eq $newc ] || error "mgc lost locks ($oldc != $newc)"
+ return 0
+}
+run_test 141 "do not lose locks on MGS restart"
+
+test_142() {
+ [ $MDS1_VERSION -lt $(version_code 2.11.56) ] &&
+ skip "Need MDS version at least 2.11.56"
+
+ #define OBD_FAIL_MDS_ORPHAN_DELETE 0x165
+ do_facet mds1 $LCTL set_param fail_loc=0x165
+ $MULTIOP $DIR/$tfile Ouc || error "multiop failed"
+
+ stop mds1
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 142 "orphan name stub can be cleaned up in startup"
+
+test_143() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.13.00) ] &&
+ skip "Need MDS version at least 2.13.00"
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ stop mds1
+ mount_fstype $SINGLEMDS || error "mount as fstype $SINGLEMDS failed"
+ do_facet $SINGLEMDS touch $mntpt/PENDING/$tfile
+ unmount_fstype $SINGLEMDS
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail"
+
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 143 "orphan cleanup thread shouldn't be blocked even delete failed"
+
+test_145() {
+ [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs"
+ [ $(facet_active_host mds2) = $(facet_active_host mds3) ] &&
+ skip "needs mds2 and mds3 on separate nodes"
+
+ replay_barrier mds1
+
+ touch $DIR/$tfile
+
+#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
+ echo block mds_connect from mds2
+ do_facet mds2 "$LCTL set_param fail_loc=0x507"
+
+#define OBD_FAIL_OUT_UPDATE_DROP 0x1707
+ echo block recovery updates from mds3
+ do_facet mds3 "$LCTL set_param fail_loc=0x1707"
+
+ local hard_timeout=\
+$(do_facet mds1 $LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_hard)
+
+ fail mds1 &
+
+ local get_soft_timeout_cmd=\
+"$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_soft 2>/dev/null"
+
+ echo wait until mds1 recovery_time_soft is $hard_timeout
+ wait_update $(facet_host mds1) "$get_soft_timeout_cmd" \
+"$hard_timeout" $hard_timeout
+
+ echo unblock mds_connect from mds2
+ do_facet mds2 "$LCTL set_param fail_loc=0"
+
+ echo upblock recovery updates from mds3
+ do_facet mds3 "$LCTL set_param fail_loc=0"
+
+ wait
+ [ -f $DIR/$tfile ] || error "$DIR/$tfile does not exist"
+}
+run_test 145 "connect mdtlovs and process update logs after recovery expire"
+
+test_147() {
+ local obd_timeout=200
+ local old=$($LCTL get_param -n timeout)
+ local f=$DIR/$tfile
+ local connection_count
+
+ $LFS setstripe -i 0 -c 1 $f
+ stripe_index=$($LFS getstripe -i $f)
+ if [ $stripe_index -ne 0 ]; then
+ $LFS getstripe $f
+ error "$f: stripe_index $stripe_index != 0" && return
+ fi
+
+ $LCTL set_param timeout=$obd_timeout
+ stack_trap "$LCTL set_param timeout=$old && client_reconnect" EXIT
+
+ # OBD_FAIL_OST_CONNECT_NET2
+ # lost reply to connect request
+ do_facet ost1 lctl set_param fail_loc=0x00000225 timeout=$obd_timeout
+ stack_trap "do_facet ost1 $LCTL set_param fail_loc=0 timeout=$old" EXIT
+
+
+ ost_evict_client
+ # force reconnect
+ $LFS df $MOUNT > /dev/null 2>&1 &
+ sleep $((obd_timeout * 3 / 4))
+
+ $LCTL get_param osc.$FSNAME-OST0000-osc-*.state
+ connection_count=$($LCTL get_param osc.$FSNAME-OST0000-osc-*.state |
+ tac | sed "/FULL/,$ d" | grep CONNECTING | wc -l)
+
+ echo $connection_count
+ (($connection_count >= 6)) || error "Client reconnected too slow"
+}
+run_test 147 "Check client reconnect"
+
+test_148() {
+ local wce_param="obdfilter.$FSNAME-OST0000.writethrough_cache_enable"
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+ local amc=$(at_max_get client)
+ local amo=$(at_max_get ost1)
+ local timeout
+
+ at_max_set 0 client
+ at_max_set 0 ost1
+ timeout=$(request_timeout client)
+
+ [ "$(facet_fstype ost1)" = "ldiskfs" ] && {
+ # save old r/o cache settings
+ save_lustre_params ost1 $wce_param > $p
+
+ # disable r/o cache
+ do_facet ost1 "$LCTL set_param -n $wce_param=0"
+ }
+
+ $LFS setstripe -i 0 -c 1 $DIR/$tfile
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 oflag=direct
+ cp $DIR/$tfile $TMP/$tfile
+ #define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227
+ do_facet ost1 $LCTL set_param fail_loc=0x80000227
+ do_facet ost1 $LCTL set_param fail_val=$((timeout+2))
+ dd if=/dev/urandom of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync
+ dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync
+ sleep 2
+ cancel_lru_locks osc
+ cmp -b $DIR/$tfile $TMP/$tfile || error "wrong data"
+
+ rm -f $DIR/$tfile $TMP/$tfile
+
+ at_max_set $amc client
+ at_max_set $amo ost1
+
+ [ "$(facet_fstype ost1)" = "ldiskfs" ] && {
+ # restore initial r/o cache settings
+ restore_lustre_params < $p
+ }
+
+ return 0
+}
+run_test 148 "data corruption through resend"
+
+test_149() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
+
+ test_mkdir -i 0 -c $MDSCOUNT $DIR/$tdir || error "mkdir $tdir failed"
+
+ # make an orphan striped dir
+ $MULTIOP $DIR/$tdir D_c &
+ local PID=$!
+ sleep 0.3
+ rmdir $DIR/$tdir || error "can't rmdir"
+
+ # stop a slave MDT where one ons stripe is located
+ stop mds2 -f
+
+ # stopping should not cause orphan as another MDT can
+ # be stopped yet
+ stop mds1 -f
+
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail"
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS || error "mds1 start fail"
+
+ kill -USR1 $PID
+ wait $PID
+
+ clients_up
+ return 0
+}
+run_test 149 "skip orphan removal at umount"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status