# bug number for skipped test:
# a tool to create lustre filesystem images
ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
+if $SHARED_KEY; then
+# bug number for skipped tests: LU-9795 (all below)
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0 31 32a 32d 35a"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 53a 53b 54b 76a 76b"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 76c 76d 78 103"
+fi
SRCDIR=$(dirname $0)
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
}
t32_verify_quota() {
- local node=$1
+ local facet=$1
local fsname=$2
local mnt=$3
local fstype=$(facet_fstype $SINGLEMDS)
# verification in 32b. The object quota usage should be accurate after
# zfs-0.7.0 is released.
[ $fstype == "zfs" ] && {
- local zfs_version=$(do_node $node cat /sys/module/zfs/version)
+ local zfs_version=$(do_facet $facet cat /sys/module/zfs/version)
[ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
echo "Skip quota verify for zfs: $zfs_version"
return 1
}
- set_persistent_param_and_check $node \
+ set_persistent_param_and_check $facet \
"osd-$fstype.$fsname-MDT0000.quota_slave.enabled" \
- $fsname.quota.mdt" ug
+ "$fsname.quota.mdt" ug
- set_persistent_param_and_check $node \
+ set_persistent_param_and_check $facet \
"osd-$fstype.$fsname-OST0000.quota_slave.enabled" \
- $fsname.quota.ost" ug
+ "$fsname.quota.ost" ug
chmod 0777 $mnt
runas -u $T32_QID -g $T32_QID dd if=/dev/zero of=$mnt/t32_qf_new \
shall_cleanup_lustre=true
$r $LCTL set_param debug="$PTLDEBUG"
+ # Leave re-enabling this to a separate patch for LU-11558
+ # t32_verify_quota $SINGLEMDS $fsname $tmp/mnt/lustre || {
+ # error_noexit "verify quota failed"
+ # return 1
+ #}
+
if $r test -f $tmp/list; then
#
# There is not a Test Framework API to copy files to or
}
nrpcs=$((nrpcs_orig + 5))
- set_persistent_param_and_check $HOSTNAME \
+ set_persistent_param_and_check client \
"mdc.$fsname-MDT0000*.max_rpcs_in_flight" \
"$fsname-MDT0000.mdc.max_rpcs_in_flight" $nrpcs || {
error_noexit "Changing \"max_rpcs_in_flight\""
run_test 49b "check PARAM_SYS_LDLM_TIMEOUT option of mkfs.lustre"
lazystatfs() {
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
# Test both statfs and lfs df and fail if either one fails
multiop_bg_pause $1 f_
- RC1=$?
+ RC=$?
PID=$!
killall -USR1 multiop
- [ $RC1 -ne 0 ] && log "lazystatfs multiop failed"
- wait $PID || { RC1=$?; log "multiop return error "; }
+ [ $RC -ne 0 ] && log "lazystatfs multiop failed"
+ wait $PID || { RC=$?; log "multiop return error "; }
+ # wait long enough to exceed OBD_STATFS_CACHE_SECONDS = 1
+ sleep 2
$LFS df -l &
PID=$!
sleep 5
- kill -s 0 $PID
- RC2=$?
- if [ $RC2 -eq 0 ]; then
- kill -s 9 $PID
- log "lazystatfs df failed"
+ if kill -s 0 $PID; then
+ RC=1
+ kill -s 9 $PID
+ log "lazystatfs lfs df failed to complete in 5s"
fi
- RC=0
- [[ $RC1 -ne 0 || $RC2 -eq 0 ]] && RC=1
return $RC
}
# Wait for client to detect down OST
stop_ost || error "Unable to stop OST1"
- wait_osc_import_state mds ost DISCONN
+ wait_osc_import_state client ost DISCONN
+ $LCTL dl
+ log "OSCs should all be DISCONN"
lazystatfs $MOUNT || error "lazystatfs should not return EIO"
run_test 100 "check lshowmount lists MGS, MDT, OST and 0@lo"
test_101() {
- local createmany_oid
+ local createmany_pid
local dev=$FSNAME-OST0000-osc-MDT0000
setup
- createmany -o $DIR1/$tfile-%d 50000 &
- createmany_oid=$!
+ mkdir $DIR1/$tdir
+ createmany -o $DIR1/$tdir/$tfile-%d 50000 &
+ createmany_pid=$!
# MDT->OST reconnection causes MDT<->OST last_id synchornisation
# via osp_precreate_cleanup_orphans.
for ((i = 0; i < 100; i++)); do
done
ls -asl $MOUNT | grep '???' &&
- (kill -9 $createmany_oid &>/dev/null; \
- error "File hasn't object on OST")
+ { kill -9 $createmany_pid &>/dev/null;
+ error "File has no object on OST"; }
- kill -s 0 $createmany_oid || break
+ kill -s 0 $createmany_pid || break
done
- wait $createmany_oid
+ wait $createmany_pid
+
+ unlinkmany $DIR1/$tdir/$tfile-%d 50000
cleanup
}
run_test 101 "Race MDT->OST reconnection with create"
}
run_test 117 "lctl get_param return errors properly"
+test_120() { # LU-11130
+ [ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
+ [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ skip "ldiskfs only test"
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] &&
+ skip "Need DNE2 capable MD target with LU-11130 fix"
+
+ setup
+
+ local mds1host=$(facet_active_host mds1)
+ local mds1dev=$(mdsdevname 1)
+
+ $LFS mkdir -i 1 $DIR/$tdir
+ $LFS mkdir -i 0 $DIR/$tdir/mds1dir
+
+ ln -s foo $DIR/$tdir/bar
+ mv $DIR/$tdir/bar $DIR/$tdir/mds1dir/bar2 ||
+ error "cross-target rename failed"
+
+ stopall
+
+ run_e2fsck $mds1host $mds1dev "-n"
+}
+run_test 120 "cross-target rename should not create bad symlinks"
+
test_122() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
[[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
}
run_test 122 "Check OST sequence update"
+test_123() {
+ setupall
+ local yaml_file="$TMP/$tfile.yaml"
+ do_facet mgs rm "$yaml_file"
+ local cfgfiles=$(do_facet mgs "lctl --device MGS llog_catlist |"\
+ " sed 's/config_log://'")
+
+ # set jobid_var to a different value for test
+ local orig_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P jobid_var="testname"
+
+ for i in params $cfgfiles; do
+ do_facet mgs "lctl --device MGS llog_print ${i} >> $yaml_file"
+ done
+
+ echo "Unmounting FS"
+ stopall
+ echo "Writeconf"
+ writeconf_all
+ echo "Remounting"
+ mountmgs
+ mountmds
+ mountoss
+ mountcli
+
+ # Reapply the config from before
+ echo "Setting configuration parameters"
+ do_facet mgs "lctl set_param -F $yaml_file"
+
+ local set_val=$(do_facet mgs $LCTL get_param jobid_var)
+ do_facet mgs $LCTL set_param -P $orig_val
+
+ [ $set_val == "jobid_var=testname" ] ||
+ error "$set_val is not testname"
+
+ do_facet mgs rm "$yaml_file"
+}
+run_test 123 "clear and reset all parameters using set_param -F"
+
if ! combined_mgs_mds ; then
stop mgs
fi