X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-small.sh;h=c5166e917c234a2af1b2e00ac2f9278289afad3c;hp=891ac23294c38fb48d770bcbee429b749ee99448;hb=49acd1450fff0f710924b008ab85fdb3f5f68015;hpb=fba6abdb9818b01d02ac7663e4ac9881258c8ead diff --git a/lustre/tests/recovery-small.sh b/lustre/tests/recovery-small.sh index 891ac23..c5166e9 100755 --- a/lustre/tests/recovery-small.sh +++ b/lustre/tests/recovery-small.sh @@ -2,27 +2,18 @@ set -e -export MULTIOP=${MULTIOP:-multiop} PTLDEBUG=${PTLDEBUG:--1} -LUSTRE=${LUSTRE:-`dirname $0`/..} +LUSTRE=${LUSTRE:-$(dirname $0)/..} . $LUSTRE/tests/test-framework.sh init_test_env $@ -. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} init_logging ALWAYS_EXCEPT="$RECOVERY_SMALL_EXCEPT " -# bug number for skipped test: -ALWAYS_EXCEPT+=" " -# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! - -require_dsh_mds || exit 0 - -# also long tests: 19, 21a, 21e, 21f, 23, 27 - -[ "$SLOW" = "no" ] && EXCEPT_SLOW="" build_test_filter +require_dsh_mds || exit 0 + # Allow us to override the setup if we already have a mounted system by # setting SETUP=" " and CLEANUP=" " SETUP=${SETUP:-""} @@ -1078,6 +1069,8 @@ test_26a() { # was test_26 bug 5921 - evict dead exports by pinger check_timeout || return 1 + # make sure all imports are connected and not IDLE + do_facet client lfs df > /dev/null # OBD_FAIL_PTLRPC_DROP_RPC 0x505 do_facet client lctl set_param fail_loc=0x505 local before=$(date +%s) @@ -1088,7 +1081,7 @@ test_26a() { # was test_26 bug 5921 - evict dead exports by pinger # the loser might have to wait for the next ping. sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4)) do_facet client lctl set_param fail_loc=0x0 - do_facet client df > /dev/null + do_facet client lfs df > /dev/null local oscs=$(lctl dl | awk '/-osc-/ {print $4}') check_clients_evicted $before ${oscs[@]} @@ -1126,10 +1119,9 @@ test_26b() { # bug 10140 - evict dead exports by pinger # = 9 * PING_INTERVAL + PING_INTERVAL # = 10 PING_INTERVAL = 10 obd_timeout / 4 = 2.5 obd_timeout # let's wait $((TIMEOUT * 3)) # bug 19887 - local rc=0 - wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || \ - error "Client was not evicted by ost" rc=1 - wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || \ + wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 3)) || + error "Client was not evicted by ost" + wait_client_evicted $SINGLEMDS $MDS_NEXP $((TIMEOUT * 3)) || error "Client was not evicted by mds" } run_test 26b "evict dead exports" @@ -1233,7 +1225,7 @@ test_51() { for i in $SEQ do #echo failover in $i sec - log "test_$testnum: failover in $i sec" + log "$TESTNAME: failover in $i sec" sleep $i facet_failover $SINGLEMDS done @@ -1534,8 +1526,8 @@ test_65() { mount_client $DIR2 #grant lock1, export2 - $SETSTRIPE -i -0 $DIR2/$tfile || return 1 - $MULTIOP $DIR2/$tfile Ow || return 2 + $LFS setstripe -i -0 $DIR2/$tfile || error "setstripe failed" + $MULTIOP $DIR2/$tfile Ow || error "multiop failed" #define OBD_FAIL_LDLM_BL_EVICT 0x31e do_facet ost $LCTL set_param fail_loc=0x31e @@ -1932,7 +1924,7 @@ test_105() # Since the client just mounted, its last_rcvd entry is not on disk. # Send an RPC so exp_need_sync forces last_rcvd to commit this export # so the client can reconnect during OST recovery (LU-924, LU-1582) - $SETSTRIPE -i 0 $DIR/$tfile + $LFS setstripe -i 0 $DIR/$tfile dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=sync # make sure MGS's state is Partial @@ -2034,7 +2026,7 @@ run_test 107 "drop reint reply, then restart MDT" test_108() { mkdir -p $DIR/$tdir - $SETSTRIPE -c 1 -i 0 $DIR/$tdir + $LFS setstripe -c 1 -i 0 $DIR/$tdir dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=256 & local dd_pid=$! @@ -2097,7 +2089,7 @@ test_110c () { drop_update_reply $mdtidx "$LFS mkdir -i $mdtidx -c2 $remote_dir" || error "lfs mkdir failed" - diridx=$($GETSTRIPE -m $remote_dir) + diridx=$($LFS getstripe -m $remote_dir) [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx" rm -rf $DIR/$tdir || error "rmdir failed" @@ -2266,13 +2258,15 @@ test_110k() { #define OBD_FAIL_FLD_QUERY_REQ 0x1103 do_facet mds2 lctl set_param fail_loc=0x1103 - start mds2 $(mdsdevname 2) -o abort_recovery || + local OPTS="$MDS_MOUNT_OPTS -o abort_recovery" + start mds2 $(mdsdevname 2) $OPTS || error "start MDS with abort_recovery should succeed" do_facet mds2 lctl set_param fail_loc=0 # cleanup stop mds2 || error "cleanup: stop mds2 failed" - start mds2 $(mdsdevname 2) || error "cleanup: start mds2 failed" + start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS || + error "cleanup: start mds2 failed" zconf_mount $(hostname) $MOUNT || error "cleanup: mount failed" client_up || error "post-failover df failed" } @@ -2287,10 +2281,10 @@ test_111 () #define OBD_FAIL_MDS_CHANGELOG_INIT 0x151 do_facet $SINGLEMDS lctl set_param fail_loc=0x151 stop $SINGLEMDS || error "stop MDS failed" - start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) && + start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS && error "start MDS should fail" do_facet $SINGLEMDS lctl set_param fail_loc=0 - start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) || + start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS || error "start MDS failed" } run_test 111 "mdd setup fail should not cause umount oops" @@ -2672,7 +2666,7 @@ test_132() { rm -f $DIR/$tfile # get a lock on client so that export would reach the stale list - $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed" + $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed" dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=fsync || error "dd failed" @@ -2704,7 +2698,7 @@ test_131() { rm -f $DIR/$tfile # get a lock on client so that export would reach the stale list - $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed" + $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed" dd if=/dev/zero of=$DIR/$tfile count=1 || error "dd failed" # another IO under the same lock @@ -2876,6 +2870,290 @@ test_137() { } run_test 137 "late resend must be skipped if already applied" +test_138() { + remote_mds_nodsh && skip "remote MDS with nodsh" + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + [[ "$MDS1_VERSION" -ge $(version_code 2.12.59) ]] || + skip "Need server version newer than 2.12.59" + + zconf_umount_clients $CLIENTS $MOUNT + +#define OBD_FAIL_TGT_RECOVERY_CONNECT 0x724 + #delay a first step of recovey when MDS waiting clients + #and failing to get osp logs + do_facet $SINGLEMDS $LCTL set_param fail_loc=0x724 fail_val=5 + + facet_failover $SINGLEMDS + + #waiting failover and recovery timer + #the valuse is based on target_recovery_overseer() wait_event timeout + sleep 55 + stop $SINGLEMDS || error "stop MDS failed" + do_facet $SINGLEMDS $LCTL set_param fail_loc=0 + start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS || + error "start MDS failed" + zconf_mount_clients $CLIENTS $MOUNT +} +run_test 138 "Umount MDT during recovery" + +test_139() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + [ $MDS1_VERSION -lt $(version_code 2.13.50) ] && + skip "Need MDS version at least 2.13.50" + + mdt_dev=$(mdsdevname 1) + + stop $SINGLEMDS || error "stop $SINGLEMDS failed" + +#define OBD_FAIL_OSP_INVALID_LOGID 0x2106 + do_facet $SINGLEMDS $LCTL set_param fail_val=0x68 fail_loc=0x80002106 + start $SINGLEMDS $mdt_dev $MDS_MOUNT_OPTS || error "Fail to start MDT" +} +run_test 139 "corrupted catid won't cause crash" + +test_140a() { + [ $MDS1_VERSION -lt $(version_code 2.12.58) ] && + skip "Need MDS version at least 2.13.50" + + [ "$SHARED_KEY" = true ] && + skip "server local client incompatible with SSK keys installed" + + slr=$(do_facet mds1 \ + $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery) + stack_trap "do_facet mds1 $LCTL set_param \ + mdt.*.local_recovery=$slr" EXIT + + # disable recovery for local clients + # so local clients should be marked with no_recovery flag + do_facet mds1 $LCTL set_param mdt.*.local_recovery=0 + mount_mds_client + + local cnt + cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" | + grep export_flags.*no_recovery | wc -l) + echo "$cnt clients with recovery disabled" + umount_mds_client + [ $cnt -eq 0 ] && error "no clients with recovery disabled" + + # enable recovery for local clients + # so no local clients should be marked with no_recovery flag + do_facet mds1 $LCTL set_param mdt.*.local_recovery=1 + mount_mds_client + + cnt=$(do_facet mds1 $LCTL get_param "mdt.*MDT0000.exports.*.export" | + grep export_flags.*no_recovery | wc -l) + echo "$cnt clients with recovery disabled" + umount_mds_client + [ $cnt -eq 0 ] || error "$cnt clients with recovery disabled" +} +run_test 140a "local mount is flagged properly" + +test_140b() { + [ $MDS1_VERSION -lt $(version_code 2.12.58) ] && + skip "Need MDS version at least 2.13.50" + + [ "$SHARED_KEY" = true ] && + skip "server local client incompatible with SSK keys installed" + + slr=$(do_facet mds1 \ + $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery) + stack_trap "do_facet mds1 $LCTL set_param \ + mdt.*.local_recovery=$slr" EXIT + + # disable recovery for local clients + do_facet mds1 $LCTL set_param mdt.*.local_recovery=0 + + mount_mds_client + replay_barrier mds1 + umount_mds_client + fail mds1 + # Lustre: tfs-MDT0000: Recovery over after 0:03, of 2 clients 2 rec... + local recovery=$(do_facet mds1 dmesg | + awk '/Recovery over after/ { print $6 }' | tail -1 | + awk -F: '{ print $1 * 60 + $2 }') + (( recovery < TIMEOUT * 2 + 5 )) || + error "recovery took too long $recovery > $((TIMEOUT * 2 + 5))" +} +run_test 140b "local mount is excluded from recovery" + +test_141() { + local oldc + local newc + + [ $PARALLEL == "yes" ] && skip "skip parallel run" + combined_mgs_mds || skip "needs combined MGS/MDT" + ( local_mode || from_build_tree ) && + skip "cannot run in local mode or from build tree" + + # some get_param have a bug to handle dot in param name + do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC + oldc=$(do_facet $SINGLEMDS $LCTL get_param -n \ + 'ldlm.namespaces.MGC*.lock_count') + fail $SINGLEMDS + do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC + newc=$(do_facet $SINGLEMDS $LCTL get_param -n \ + 'ldlm.namespaces.MGC*.lock_count') + + [ $oldc -eq $newc ] || error "mgc lost locks ($oldc != $newc)" + return 0 +} +run_test 141 "do not lose locks on MGS restart" + +test_142() { + [ $MDS1_VERSION -lt $(version_code 2.11.56) ] && + skip "Need MDS version at least 2.11.56" + + #define OBD_FAIL_MDS_ORPHAN_DELETE 0x165 + do_facet mds1 $LCTL set_param fail_loc=0x165 + $MULTIOP $DIR/$tfile Ouc || error "multiop failed" + + stop mds1 + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS + + wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" || + error "MDD orphan cleanup thread not quit" +} +run_test 142 "orphan name stub can be cleaned up in startup" + +test_143() { + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.13.00) ] && + skip "Need MDS version at least 2.13.00" + [ $PARALLEL == "yes" ] && skip "skip parallel run" + + local mntpt=$(facet_mntpt $SINGLEMDS) + stop mds1 + mount_fstype $SINGLEMDS || error "mount as fstype $SINGLEMDS failed" + do_facet $SINGLEMDS touch $mntpt/PENDING/$tfile + unmount_fstype $SINGLEMDS + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail" + + wait_recovery_complete $SINGLEMDS || error "MDS recovery not done" + wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" || + error "MDD orphan cleanup thread not quit" +} +run_test 143 "orphan cleanup thread shouldn't be blocked even delete failed" + +test_145() { + [ $MDSCOUNT -lt 3 ] && skip "needs >= 3 MDTs" + [ $(facet_active_host mds2) = $(facet_active_host mds3) ] && + skip "needs mds2 and mds3 on separate nodes" + + replay_barrier mds1 + + touch $DIR/$tfile + +#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507 + echo block mds_connect from mds2 + do_facet mds2 "$LCTL set_param fail_loc=0x507" + +#define OBD_FAIL_OUT_UPDATE_DROP 0x1707 + echo block recovery updates from mds3 + do_facet mds3 "$LCTL set_param fail_loc=0x1707" + + local hard_timeout=\ +$(do_facet mds1 $LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_hard) + + fail mds1 & + + local get_soft_timeout_cmd=\ +"$LCTL get_param -n mdt.$FSNAME-MDT0000.recovery_time_soft 2>/dev/null" + + echo wait until mds1 recovery_time_soft is $hard_timeout + wait_update $(facet_host mds1) "$get_soft_timeout_cmd" \ +"$hard_timeout" $hard_timeout + + echo unblock mds_connect from mds2 + do_facet mds2 "$LCTL set_param fail_loc=0" + + echo upblock recovery updates from mds3 + do_facet mds3 "$LCTL set_param fail_loc=0" + + wait + [ -f $DIR/$tfile ] || error "$DIR/$tfile does not exist" +} +run_test 145 "connect mdtlovs and process update logs after recovery expire" + +test_147() { + local obd_timeout=200 + local old=$($LCTL get_param -n timeout) + local f=$DIR/$tfile + local connection_count + + $LFS setstripe -i 0 -c 1 $f + stripe_index=$($LFS getstripe -i $f) + if [ $stripe_index -ne 0 ]; then + $LFS getstripe $f + error "$f: stripe_index $stripe_index != 0" && return + fi + + $LCTL set_param timeout=$obd_timeout + stack_trap "$LCTL set_param timeout=$old && client_reconnect" EXIT + + # OBD_FAIL_OST_CONNECT_NET2 + # lost reply to connect request + do_facet ost1 lctl set_param fail_loc=0x00000225 timeout=$obd_timeout + stack_trap "do_facet ost1 $LCTL set_param fail_loc=0 timeout=$old" EXIT + + + ost_evict_client + # force reconnect + $LFS df $MOUNT > /dev/null 2>&1 & + sleep $((obd_timeout * 3 / 4)) + + $LCTL get_param osc.$FSNAME-OST0000-osc-*.state + connection_count=$($LCTL get_param osc.$FSNAME-OST0000-osc-*.state | + tac | sed "/FULL/,$ d" | grep CONNECTING | wc -l) + + echo $connection_count + (($connection_count >= 6)) || error "Client reconnected too slow" +} +run_test 147 "Check client reconnect" + +test_148() { + local wce_param="obdfilter.$FSNAME-OST0000.writethrough_cache_enable" + local p="$TMP/$TESTSUITE-$TESTNAME.parameters" + local amc=$(at_max_get client) + local amo=$(at_max_get ost1) + local timeout + + at_max_set 0 client + at_max_set 0 ost1 + timeout=$(request_timeout client) + + [ "$(facet_fstype ost1)" = "ldiskfs" ] && { + # save old r/o cache settings + save_lustre_params ost1 $wce_param > $p + + # disable r/o cache + do_facet ost1 "$LCTL set_param -n $wce_param=0" + } + + $LFS setstripe -i 0 -c 1 $DIR/$tfile + dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 oflag=direct + cp $DIR/$tfile $TMP/$tfile + #define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227 + do_facet ost1 $LCTL set_param fail_loc=0x80000227 + do_facet ost1 $LCTL set_param fail_val=$((timeout+2)) + dd if=/dev/urandom of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync + dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=notrunc,fdatasync + sleep 2 + cancel_lru_locks osc + cmp -b $DIR/$tfile $TMP/$tfile || error "wrong data" + + rm -f $DIR/$tfile $TMP/$tfile + + at_max_set $amc client + at_max_set $amo ost1 + + [ "$(facet_fstype ost1)" = "ldiskfs" ] && { + # restore initial r/o cache settings + restore_lustre_params < $p + } + + return 0 +} +run_test 148 "data corruption through resend" + complete $SECONDS check_and_cleanup_lustre exit_status