set -e
-export MULTIOP=${MULTIOP:-multiop}
PTLDEBUG=${PTLDEBUG:--1}
-LUSTRE=${LUSTRE:-`dirname $0`/..}
+LUSTRE=${LUSTRE:-$(dirname $0)/..}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
ALWAYS_EXCEPT="$RECOVERY_SMALL_EXCEPT "
-# bug number for skipped test:
-ALWAYS_EXCEPT+=" "
-# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-
-require_dsh_mds || exit 0
-
-# also long tests: 19, 21a, 21e, 21f, 23, 27
-
-[ "$SLOW" = "no" ] && EXCEPT_SLOW=""
+if $SHARED_KEY; then
+ # bug number for skipped test: LU-12896
+ ALWAYS_EXCEPT+=" 110k"
+ # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+fi
build_test_filter
+require_dsh_mds || exit 0
+
# Allow us to override the setup if we already have a mounted system by
# setting SETUP=" " and CLEANUP=" "
SETUP=${SETUP:-""}
for i in $SEQ
do
#echo failover in $i sec
- log "test_$testnum: failover in $i sec"
+ log "$TESTNAME: failover in $i sec"
sleep $i
facet_failover $SINGLEMDS
done
mount_client $DIR2
#grant lock1, export2
- $SETSTRIPE -i -0 $DIR2/$tfile || return 1
- $MULTIOP $DIR2/$tfile Ow || return 2
+ $LFS setstripe -i -0 $DIR2/$tfile || error "setstripe failed"
+ $MULTIOP $DIR2/$tfile Ow || error "multiop failed"
#define OBD_FAIL_LDLM_BL_EVICT 0x31e
do_facet ost $LCTL set_param fail_loc=0x31e
do_nodes $list $LCTL set_param fail_loc=0x80000136
#initiate the re-connect & re-send
- local mdccli=$($LCTL dl | awk '/-MDT0000-mdc-/ {print $4;}')
+ local mdtname="MDT0000"
+ local mdccli=$($LCTL dl | grep "${mdtname}-mdc" | awk '{print $4;}')
local conn_uuid=$($LCTL get_param -n mdc.${mdccli}.conn_uuid)
$LCTL set_param "mdc.${mdccli}.import=connection=${conn_uuid}"
sleep 2
# Since the client just mounted, its last_rcvd entry is not on disk.
# Send an RPC so exp_need_sync forces last_rcvd to commit this export
# so the client can reconnect during OST recovery (LU-924, LU-1582)
- $SETSTRIPE -i 0 $DIR/$tfile
+ $LFS setstripe -i 0 $DIR/$tfile
dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=sync
# make sure MGS's state is Partial
test_108() {
mkdir -p $DIR/$tdir
- $SETSTRIPE -c 1 -i 0 $DIR/$tdir
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=256 &
local dd_pid=$!
drop_update_reply $mdtidx "$LFS mkdir -i $mdtidx -c2 $remote_dir" ||
error "lfs mkdir failed"
- diridx=$($GETSTRIPE -m $remote_dir)
+ diridx=$($LFS getstripe -m $remote_dir)
[ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
rm -rf $DIR/$tdir || error "rmdir failed"
#define OBD_FAIL_FLD_QUERY_REQ 0x1103
do_facet mds2 lctl set_param fail_loc=0x1103
- start mds2 $(mdsdevname 2) -o abort_recovery ||
+ local OPTS="$MDS_MOUNT_OPTS -o abort_recovery"
+ start mds2 $(mdsdevname 2) $OPTS ||
error "start MDS with abort_recovery should succeed"
do_facet mds2 lctl set_param fail_loc=0
# cleanup
stop mds2 || error "cleanup: stop mds2 failed"
- start mds2 $(mdsdevname 2) || error "cleanup: start mds2 failed"
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS ||
+ error "cleanup: start mds2 failed"
zconf_mount $(hostname) $MOUNT || error "cleanup: mount failed"
client_up || error "post-failover df failed"
}
#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
do_facet $SINGLEMDS lctl set_param fail_loc=0x151
stop $SINGLEMDS || error "stop MDS failed"
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) &&
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS &&
error "start MDS should fail"
do_facet $SINGLEMDS lctl set_param fail_loc=0
- start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) ||
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
error "start MDS failed"
}
run_test 111 "mdd setup fail should not cause umount oops"
rm -f $DIR/$tfile
# get a lock on client so that export would reach the stale list
- $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed"
+ $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed"
dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=fsync ||
error "dd failed"
rm -f $DIR/$tfile
# get a lock on client so that export would reach the stale list
- $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed"
+ $LFS setstripe -i 0 $DIR/$tfile || error "setstripe failed"
dd if=/dev/zero of=$DIR/$tfile count=1 || error "dd failed"
# another IO under the same lock
}
run_test 137 "late resend must be skipped if already applied"
+test_138() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ "$MDS1_VERSION" -ge $(version_code 2.12.59) ]] ||
+ skip "Need server version newer than 2.12.59"
+
+ zconf_umount_clients $CLIENTS $MOUNT
+
+#define OBD_FAIL_TGT_RECOVERY_CONNECT 0x724
+ #delay a first step of recovey when MDS waiting clients
+ #and failing to get osp logs
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0x724 fail_val=5
+
+ facet_failover $SINGLEMDS
+
+ #waiting failover and recovery timer
+ #the valuse is based on target_recovery_overseer() wait_event timeout
+ sleep 55
+ stop $SINGLEMDS || error "stop MDS failed"
+ do_facet $SINGLEMDS $LCTL set_param fail_loc=0
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) ||
+ error "start MDS failed"
+ zconf_mount_clients $CLIENTS $MOUNT
+}
+run_test 138 "Umount MDT during recovery"
+
+test_139() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDS1_VERSION -lt $(version_code 2.13.50) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ mdt_dev=$(mdsdevname 1)
+
+ stop $SINGLEMDS || error "stop $SINGLEMDS failed"
+
+#define OBD_FAIL_OSP_INVALID_LOGID 0x2106
+ do_facet $SINGLEMDS $LCTL set_param fail_val=0x68 fail_loc=0x80002106
+ start $SINGLEMDS $mdt_dev $MDS_MOUNT_OPTS || error "Fail to start MDT"
+}
+run_test 139 "corrupted catid won't cause crash"
+
+test_140a() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ # so local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+ mount_mds_client
+
+ local cnt
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] && error "no clients with recovery disabled"
+
+ # enable recovery for local clients
+ # so no local clients should be marked with no_recovery flag
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=1
+ mount_mds_client
+
+ cnt=$(do_facet mds1 $LCTL get_param "mdt.*.exports.*.export" |
+ grep export_flags.*no_recovery | wc -l)
+ echo "$cnt clients with recovery disabled"
+ umount_mds_client
+ [ $cnt -eq 0 ] || error "$cnt clients with recovery disabled"
+}
+run_test 140a "local mount is flagged properly"
+
+test_140b() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.58) ] &&
+ skip "Need MDS version at least 2.13.50"
+
+ [ "$SHARED_KEY" = true ] &&
+ skip "server local client incompatible with SSK keys installed"
+
+ slr=$(do_facet mds1 \
+ $LCTL get_param -n mdt.$FSNAME-MDT0000.local_recovery)
+ stack_trap "do_facet mds1 $LCTL set_param \
+ mdt.*.local_recovery=$slr" EXIT
+
+ # disable recovery for local clients
+ do_facet mds1 $LCTL set_param mdt.*.local_recovery=0
+
+ mount_mds_client
+ replay_barrier mds1
+ umount_mds_client
+ fail mds1
+ local recovery=$(do_facet mds1 dmesg |
+ awk -F: '/Recovery over after/ { print $4 }' |
+ cut -d, -f1 | tail -1)
+ (( $recovery < $TIMEOUT*2 )) ||
+ error "recovery took too long $recovery > $((TIMEOUT * 2))"
+}
+run_test 140b "local mount is excluded from recovery"
+
+test_141() {
+ local oldc
+ local newc
+
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ combined_mgs_mds || skip "needs combined MGS/MDT"
+ ( local_mode || from_build_tree ) &&
+ skip "cannot run in local mode or from build tree"
+
+ # some get_param have a bug to handle dot in param name
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ oldc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+ fail $SINGLEMDS
+ do_rpc_nodes $(facet_active_host $SINGLEMDS) cancel_lru_locks MGC
+ newc=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ 'ldlm.namespaces.MGC*.lock_count')
+
+ [ $oldc -eq $newc ] || error "mgc lost locks ($oldc != $newc)"
+ return 0
+}
+run_test 141 "do not lose locks on MGS restart"
+
+test_142() {
+ [ $MDS1_VERSION -lt $(version_code 2.11.56) ] &&
+ skip "Need MDS version at least 2.11.56"
+
+ #define OBD_FAIL_MDS_ORPHAN_DELETE 0x165
+ do_facet mds1 $LCTL set_param fail_loc=0x165
+ $MULTIOP $DIR/$tfile Ouc || error "multiop failed"
+
+ stop mds1
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 142 "orphan name stub can be cleaned up in startup"
+
+test_143() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.13.00) ] &&
+ skip "Need MDS version at least 2.13.00"
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+
+ local mntpt=$(facet_mntpt $SINGLEMDS)
+ stop mds1
+ mount_fstype $SINGLEMDS || error "mount as fstype $SINGLEMDS failed"
+ do_facet $SINGLEMDS touch $mntpt/PENDING/$tfile
+ unmount_fstype $SINGLEMDS
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || error "mds1 start fail"
+
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
+ wait_update_facet mds1 "pgrep orph_.*-MDD | wc -l" "0" ||
+ error "MDD orphan cleanup thread not quit"
+}
+run_test 143 "orphan cleanup thread shouldn't be blocked even delete failed"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status