ONLY=${ONLY:-"$*"}
# bug number for skipped test:
-# 15977
ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
fi
+# LU-2059
+ALWAYS_EXCEPT="$ALWAYS_EXCEPT 5d 19b 21b 27a"
+
+
SRCDIR=`dirname $0`
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
#
test_17() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
check_mount || return 41
cleanup || return $?
lctl get_param -n devices
DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l)
[ $DEVS -gt 0 ] && return 2
+ # start mds to drop writeconf setting
+ start_mds || return 3
+ stop_mds || return 4
unload_modules_conf || return $?
}
run_test 26 "MDT startup failure cleans LOV (should return errs)"
-set_and_check() {
- local myfacet=$1
- local TEST=$2
- local PARAM=$3
- local ORIG=$(do_facet $myfacet "$TEST")
- if [ $# -gt 3 ]; then
- local FINAL=$4
- else
- local -i FINAL
- FINAL=$(($ORIG + 5))
- fi
- echo "Setting $PARAM from $ORIG to $FINAL"
- do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" || error conf_param failed
-
- wait_update $(facet_host $myfacet) "$TEST" "$FINAL" || error check failed!
-}
-
test_27a() {
start_ost || return 1
start_mds || return 2
echo "Requeue thread should have started: "
ps -e | grep ll_cfg_requeue
- set_and_check ost1 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
+ set_conf_param_and_check ost1 \
+ "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
+ "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
cleanup_nocli
}
run_test 27a "Reacquire MGS lock if OST started first"
local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }')
facet_failover $SINGLEMDS
- set_and_check $SINGLEMDS "lctl get_param -n mdt.$device.identity_acquire_expire" "$device.mdt.identity_acquire_expire" || return 3
- set_and_check client "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight" "$device.mdc.max_rpcs_in_flight" || return 4
+ set_conf_param_and_check $SINGLEMDS \
+ "lctl get_param -n mdt.$device.identity_acquire_expire" \
+ "$device.mdt.identity_acquire_expire" || return 3
+ set_conf_param_and_check client \
+ "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
+ "$device.mdc.max_rpcs_in_flight" || return 4
check_mount
cleanup
}
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
ORIG=$($TEST)
FINAL=$(($ORIG + 1))
- set_and_check client "$TEST" "$PARAM" $FINAL || return 3
+ set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3
FINAL=$(($FINAL + 1))
- set_and_check client "$TEST" "$PARAM" $FINAL || return 4
+ set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4
umount_client $MOUNT || return 200
mount_client $MOUNT
RESULT=$($TEST)
else
echo "New config success: got $RESULT"
fi
- set_and_check client "$TEST" "$PARAM" $ORIG || return 5
+ set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5
cleanup
}
run_test 28 "permanent parameter setting"
ACTV=$(lctl get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
- set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
+ set_conf_param_and_check client \
+ "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
# also check ost_server_uuid status
RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
if [ -z "$RESULT" ]; then
echo "Waiting $(($MAX - $WAIT)) secs for MDT deactivated"
done
- # quotacheck should not fail immediately after deactivate
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
-
# test new client starts deactivated
umount_client $MOUNT || return 200
mount_client $MOUNT
echo "New client success: got $RESULT"
fi
- # quotacheck should not fail after umount/mount operation
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
-
# make sure it reactivates
- set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
+ set_conf_param_and_check client \
+ "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
umount_client $MOUNT
stop_ost2
ORIG=$($TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
- set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
+ set_conf_param_and_check client "$TEST" \
+ "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
done
# make sure client restart still works
umount_client $MOUNT
echo "Using fake nid $NEW"
TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
- set_and_check client "$TEST" "$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW"
+ set_conf_param_and_check client "$TEST" \
+ "$FSNAME-OST0000.failover.node" $NEW ||
+ error "didn't add failover nid $NEW"
NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
echo $NIDS
NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
}
test_32a() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
[ -z "$TUNEFS" ] && skip_env "No tunefs" && return 0
run_test 32a "Upgrade from 1.8 (not live)"
test_32b() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
client_only && skip "client only testing" && return 0
[ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
[ -z "$TUNEFS" ] && skip_env "No tunefs" && return
run_test 37 "verify set tunables works for symlink device"
test_38() { # bug 14222
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
# like runtests
COUNT=10
run_test 42 "invalid config param should not prevent client from mounting"
test_43() {
- [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
- setup
- chmod ugo+x $DIR || error "chmod 0 failed"
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
- "0:0"
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME.mdt.nosquash_nids" \
- "NONE"
+ [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
+ setup
+ chmod ugo+x $DIR || error "chmod 0 failed"
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "0:0"
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME.mdt.nosquash_nids" \
+ "NONE"
#
# create set of test files
chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
- #
- # check root_squash:
- # set root squash UID:GID to RUNAS_ID
- # root should be able to access only files owned by RUNAS_ID
- #
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
- "$RUNAS_ID:$RUNAS_ID"
+ #
+ # check root_squash:
+ # set root squash UID:GID to RUNAS_ID
+ # root should be able to access only files owned by RUNAS_ID
+ #
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "$RUNAS_ID:$RUNAS_ID"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
error "$ST: root create permission is granted"
echo "$ST: root create permission is denied - ok"
- #
- # check nosquash_nids:
- # put client's NID into nosquash_nids list,
- # root should be able to access root file after that
- #
- local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
- NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
- NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME-MDTall.mdt.nosquash_nids" \
- "$NIDLIST"
+ #
+ # check nosquash_nids:
+ # put client's NID into nosquash_nids list,
+ # root should be able to access root file after that
+ #
+ local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
+ NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
+ NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME-MDTall.mdt.nosquash_nids" \
+ "$NIDLIST"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
}
test_52() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
start_mds
[ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
start_ost
local facet=$2
local parampat=$3
local opts=$4
+ local basethr=$5
local tmin
local tmin2
local tmax
local tstarted
local paramp
local msg="Insane $modname thread counts"
- local ncpts=$(check_cpt_number)
+ local ncpts=$(check_cpt_number $facet)
+ local nthrs
shift 4
setup
tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $?
+ nthrs=$(expr $tmax - $tmin)
+ if [ $nthrs -lt $ncpts ]; then
+ nthrs=0
+ else
+ nthrs=$ncpts
+ fi
+
+ [ $tmin -eq $tmax -a $tmin -eq $tstarted ] &&
+ skip_env "module parameter forced $facet thread count" &&
+ tmin=3 && tmax=$((3 * tmax))
# Check that we can change min/max
- do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + ncpts))"
- do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - ncpts))"
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + nthrs))"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - nthrs))"
tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
- lassert 25 "$msg" '(($tmin2 == ($tmin + $ncpts) && $tmax2 == ($tmax - $ncpts)))' || return $?
+ lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && $tmax2 == ($tmax - $nthrs)))' || return $?
# Check that we can set min/max to the same value
tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
LOAD_MODULES_REMOTE=true
cleanup
local oldvalue
- setmodopts -a $modname "$opts" oldvalue
+ local newvalue="${opts}=$(expr $basethr \* $ncpts)"
+ setmodopts -a $modname "$newvalue" oldvalue
load_modules
setup
}
test_53a() {
- local ncpts=$(check_cpt_number)
- local nthrs
-
- nthrs=`expr 16 \* $ncpts`
- thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads='$nthrs
+ thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
}
run_test 53a "check OSS thread count params"
test_53b() {
- local ncpts=$(check_cpt_number)
- local nthrs
-
- nthrs=`expr 16 \* $ncpts`
- thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads='$nthrs
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' '16'
}
run_test 53b "check MDT thread count params"
test_54a() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p
[ $? -eq 0 ] || error "llverdev failed!"
reformat_and_config
run_test 54a "test llverdev and partial verify of device"
test_54b() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
run_llverfs $MOUNT -p
[ $? -eq 0 ] || error "llverfs failed!"
}
test_55() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
local mdsdev=$(mdsdevname 1)
local mdsvdev=$(mdsvdevname 1)
mount_client $MOUNT || error "Unable to mount client"
echo ok
$LFS osts
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
stopall
MDSJOURNALSIZE=$mds_journal_size_orig
reformat
}
test_58() { # bug 22658
- if [ $(facet_fstype mds) == zfs ]; then
- skip "Does not work with ZFS-based MDTs yet"
+ if [ $(facet_fstype mds) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
return
fi
setup_noconfig
run_test 61 "large xattr"
test_62() {
- # MRP-118
- local mdsdev=$(mdsdevname 1)
- local ostdev=$(ostdevname 1)
-
- echo "disable journal for mds"
- do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
- start_mds && error "MDT start should fail"
- echo "disable journal for ost"
- do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
- start_ost && error "OST start should fail"
- cleanup || return $?
- reformat_and_config
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ # MRP-118
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] ||
+ { skip "Need MDS version at least 2.2.51"; return 0; }
+
+ echo "disable journal for mds"
+ do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
+ start_mds && error "MDT start should fail"
+ echo "disable journal for ost"
+ do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
+ start_ost && error "OST start should fail"
+ cleanup || return $?
+ reformat_and_config
}
run_test 62 "start with disabled journal"
+test_63() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ local inode_slab=$(do_facet $SINGLEMDS \
+ "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
+ if [ -z "$inode_slab" ]; then
+ skip "ldiskfs module has not been loaded"
+ return
+ fi
+
+ echo "$inode_slab ldisk inodes per page"
+ [ "$inode_slab" -ge "3" ] ||
+ error "ldisk inode size is too big, $inode_slab objs per page"
+ return
+}
+run_test 63 "Verify each page can at least hold 3 ldisk inodes"
+
if ! combined_mgs_mds ; then
stop mgs
fi