ONLY=${ONLY:-"$*"}
-# bug number for skipped test:
-ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
+SRCDIR=$(dirname $0)
+PTLDEBUG=${PTLDEBUG:--1}
+LUSTRE=${LUSTRE:-$(dirname $0)/..}
+. $LUSTRE/tests/test-framework.sh
+init_test_env $@
+init_logging
+
+# tool to create lustre filesystem images
+ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT 32newtarball"
+
+# bug number for skipped test: LU-11915
+ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-# bug number for skipped test:
-# a tool to create lustre filesystem images
-ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
if $SHARED_KEY; then
-# bug number for skipped tests: LU-9795 (all below)
+ # bug number for skipped tests: LU-9795 (all below)
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0 31 32a 32d 35a"
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 53a 53b 54b 76a 76b"
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 76c 76d 78 103"
fi
-SRCDIR=$(dirname $0)
-PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
+if ! combined_mgs_mds; then
+ # bug number for skipped test: LU-11991 LU-11990
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 32a 32b 32c 32d 32e 66"
+ # bug number for skipped test: LU-9897 LU-12032
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 84 123F"
+fi
-PTLDEBUG=${PTLDEBUG:--1}
-SAVE_PWD=$PWD
-LUSTRE=${LUSTRE:-$(dirname $0)/..}
-RLUSTRE=${RLUSTRE:-$LUSTRE}
-export MULTIOP=${MULTIOP:-multiop}
+# 8 22 40 165 (min)
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69 106 111"
-. $LUSTRE/tests/test-framework.sh
-init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+build_test_filter
# use small MDS + OST size to speed formatting time
# do not use too small MDSSIZE/OSTSIZE, which affect the default journal size
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
MDSSIZE=200000
-[ $(facet_fstype $SINGLEMDS) = "zfs" ] && MDSSIZE=400000
+[ "$mds1_FSTYPE" = zfs ] && MDSSIZE=400000
OSTSIZE=200000
-[ $(facet_fstype ost1) = "zfs" ] && OSTSIZE=400000
+[ "$ost1_FSTYPE" = zfs ] && OSTSIZE=400000
fs2mds_HOST=$mds_HOST
fs2ost_HOST=$ost_HOST
OSTDEV1_2=$fs2ost_DEV
OSTDEV2_2=$fs3ost_DEV
-# bug number for skipped test: LU-11915
-ALWAYS_EXCEPT="$ALWAYS_EXCEPT 110"
-
-if ! combined_mgs_mds; then
- # bug number for skipped test: LU-11991 LU-11990
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 32a 32b 32c 32d 32e 66"
- # bug number for skipped test: LU-9897 LU-12032
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 84 123F"
-fi
-
# pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then
LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
fi
-init_logging
-
#
require_dsh_mds || exit 0
require_dsh_ost || exit 0
-# 8 22 40 165 (min)
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="45 69 106 111"
-
assert_DIR
gen_config() {
}
mount_client() {
- local MOUNTPATH=$1
- echo "mount $FSNAME on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH || return 96
-}
+ local mountpath=$1
+ local mountopt="$2"
-remount_client() {
- local mountopt="remount,$1"
- local MOUNTPATH=$2
- echo "remount '$1' lustre on ${MOUNTPATH}....."
- zconf_mount $(hostname) $MOUNTPATH "$mountopt" || return 96
+ echo "mount $FSNAME ${mountopt:+with opts $mountopt} on $mountpath....."
+ zconf_mount $HOSTNAME $mountpath $mountopt || return 96
}
umount_client() {
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $1 | head -n 1
}
-build_test_filter
-
if [ "$ONLY" == "setup" ]; then
setup
exit
test_5g() {
modprobe lustre
- [ $(lustre_version_code client) -lt $(version_code 2.9.53) ] &&
+ [ "$CLIENT_VERSION" -lt $(version_code 2.9.53) ] &&
skip "automount of debugfs missing before 2.9.53"
umount /sys/kernel/debug
$LCTL get_param -n devices | egrep -v "error" && \
}
run_test 5g "handle missing debugfs"
+test_5h() {
+ setup
+
+ stop mds1
+ #define OBD_FAIL_MDS_FS_SETUP 0x135
+ do_facet mds1 "$LCTL set_param fail_loc=0x80000135"
+ start_mdt 1 && error "start mdt should fail"
+ start_mdt 1 || error "start mdt failed"
+ client_up || error "client_up failed"
+ cleanup
+}
+run_test 5h "start mdt failure at mdt_fs_setup()"
+
+test_5i() {
+ setup
+
+ stop mds1
+ #define OBD_FAIL_QUOTA_INIT 0xA05
+ do_facet mds1 "$LCTL set_param fail_loc=0x80000A05"
+ start_mdt 1 && error "start mdt should fail"
+ start_mdt 1 || error "start mdt failed"
+ client_up || error "client_up failed"
+ cleanup
+}
+run_test 5i "start mdt failure at mdt_quota_init()"
+
test_6() {
setup
manual_umount_client
#
test_17() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
test_18() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
mount_client $MOUNT || error "mount_client $MOUNT failed"
check_mount || error "check_mount failed"
rm -f $DIR/$tfile || error "remove $DIR/$tfile failed."
- remount_client ro $MOUNT || error "remount_client with ro failed"
+ mount_client $MOUNT remount,ro || error "remount client with ro failed"
touch $DIR/$tfile && error "$DIR/$tfile created incorrectly"
[ -e $DIR/$tfile ] && error "$DIR/$tfile exists incorrectly"
- remount_client rw $MOUNT || error "remount_client with rw failed"
+ mount_client $MOUNT remount,rw || error "remount client with rw failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
MCNT=$(grep -c $MOUNT' ' /etc/mtab)
[ "$MCNT" -ne 1 ] && error "$MOUNT in /etc/mtab $MCNT times"
test_28A() { # was test_28
setup_noconfig
- TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
- PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
- ORIG=$($LCTL get_param -n $TEST)
- FINAL=$(($ORIG + 1))
- set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
- FINAL=$(($FINAL + 1))
- set_persistent_param_and_check client "$TEST" "$PARAM" $FINAL
+
+ local TEST="llite.$FSNAME-*.max_read_ahead_whole_mb"
+ local PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
+ local orig=$($LCTL get_param -n $TEST)
+ local max=$($LCTL get_param -n \
+ llite.$FSNAME-*.max_read_ahead_per_file_mb)
+
+ orig=${orig%%.[0-9]*}
+ max=${max%%.[0-9]*}
+ echo "ORIG:$orig MAX:$max"
+ [[ $max -le $orig ]] && orig=$((max - 3))
+ echo "ORIG:$orig MAX:$max"
+
+ local final=$((orig + 1))
+
+ set_persistent_param_and_check client "$TEST" "$PARAM" $final
+ final=$((final + 1))
+ set_persistent_param_and_check client "$TEST" "$PARAM" $final
umount_client $MOUNT || error "umount_client $MOUNT failed"
mount_client $MOUNT || error "mount_client $MOUNT failed"
- RESULT=$($LCTL get_param -n $TEST)
- if [ $RESULT -ne $FINAL ]; then
- error "New config not seen: wanted $FINAL got $RESULT"
+
+ local result=$($LCTL get_param -n $TEST)
+
+ if [ $result -ne $final ]; then
+ error "New config not seen: wanted $final got $result"
else
- echo "New config success: got $RESULT"
+ echo "New config success: got $result"
fi
- set_persistent_param_and_check client "$TEST" "$PARAM" $ORIG
+ set_persistent_param_and_check client "$TEST" "$PARAM" $orig
cleanup || error "cleanup failed with rc $?"
}
run_test 28A "permanent parameter setting"
test_28a() { # LU-4221
- [[ $(lustre_version_code ost1) -ge $(version_code 2.5.52) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.5.52) ]] ||
skip "Need OST version at least 2.5.52"
- [ "$(facet_fstype ost1)" = "zfs" ] &&
+ [ "$ost1_FSTYPE" = zfs ] &&
skip "LU-4221: no such proc params for ZFS OSTs"
local name
# on the MDS servers which is tested with wait_osp_* below.
# For ost_server_uuid that only exist on client so filtering
# is safe.
- local PROC_ACT="osc.$FSNAME-OST0001-osc-[^M]*.active"
- local PROC_UUID="osc.$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
+ local PROC_ACT="os[cp].$FSNAME-OST0001-osc-[^M]*.active"
+ local PROC_UUID="os[cp].$FSNAME-OST0001-osc-[^M]*.ost_server_uuid"
ACTV=$($LCTL get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
mount_client $MOUNT || error "mount_client $MOUNT failed"
FINAL=$($LCTL get_param -n $TEST)
echo "deleted (default) value=$FINAL, orig=$ORIG"
+ ORIG=${ORIG%%.[0-9]*}
+ FINAL=${FINAL%%.[0-9]*}
# assumes this parameter started at the default value
[ "$FINAL" -eq "$ORIG" ] || fail "Deleted value=$FINAL, orig=$ORIG"
local dst=.
local src=/etc/rc.d
local tmp=$TMP/t32_image_create
- local server_version=$(lustre_version_code $SINGLEMDS)
local remote_dir
local striped_dir
local pushd_dir
setupall
- [[ $server_version -ge $(version_code 2.3.50) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.3.50) ]] ||
$LFS quotacheck -ug /mnt/$FSNAME
$LFS setquota -u $T32_QID -b 0 -B $T32_BLIMIT -i 0 -I $T32_ILIMIT \
/mnt/$FSNAME
$LFS mkdir -i 1 $remote_dir
tar cf - -C $tmp/src . | tar xf - -C $remote_dir
- if [[ $server_version -ge $(version_code 2.7.0) ]]; then
+ if [[ "$MDS1_VERSION" -ge $(version_code 2.7.0) ]]; then
striped_dir=/mnt/$FSNAME/striped_dir_old
$LFS mkdir -i 1 -c 2 $striped_dir
tar cf - -C $tmp/src . | tar xf - -C $striped_dir
pushd_dir=/mnt/$FSNAME
if [[ $MDSCOUNT -ge 2 ]]; then
pushd_dir=$remote_dir
- if [[ $server_version -ge $(version_code 2.7.0) ]]; then
+ if [[ "$MDS1_VERSION" -ge $(version_code 2.7.0) ]]; then
pushd $striped_dir
ls -Rni --time-style=+%s >$tmp/img/list2
popd
$LCTL get_param -n version | head -n 1 |
sed -e 's/^lustre: *//' >$tmp/img/commit
- [[ $server_version -ge $(version_code 2.3.50) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.3.50) ]] ||
$LFS quotaon -ug /mnt/$FSNAME
$LFS quota -u $T32_QID -v /mnt/$FSNAME
$LFS quota -v -u $T32_QID /mnt/$FSNAME |
sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8"
dst=$(cd $dst; pwd)
pushd $tmp/img
- tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S *
+ tar cjvf $dst/disk$version-"$mds1_FSTYPE".tar.bz2 -S *
popd
rm -r $tmp
! $r which "$TUNEFS" && skip_env "tunefs.lustre required on $node"
- local IMGTYPE=$(facet_fstype $SINGLEMDS)
+ local IMGTYPE="$mds1_FSTYPE"
tarballs=$($r find $RLUSTRE/tests -maxdepth 1 \
-name \'disk*-$IMGTYPE.tar.bz2\')
t32_test_cleanup() {
local tmp=$TMP/t32
local facet=$SINGLEMDS
- local fstype=$(facet_fstype $facet)
local rc=$?
if $shall_cleanup_lustre; then
$r rm -rf $tmp
rm -rf $tmp
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
local poolname
local poolname_list="t32fs-mdt1 t32fs-ost1"
local node=$1
local all_removed=false
local i=0
- local fstype=$(facet_fstype $SINGLEMDS)
- [ $fstype == "zfs" ] && do_rpc_nodes $node "service zed stop"
+ [ "$mds1_FSTYPE" == zfs ] && do_rpc_nodes $node "service zed stop"
while ((i < 20)); do
echo "Unloading modules on $node: Attempt $i"
- do_rpc_nodes $node $LUSTRE_RMMOD $fstype &&
+ do_rpc_nodes $node $LUSTRE_RMMOD "$mds1_FSTYPE" &&
all_removed=true
do_rpc_nodes $node check_mem_leak || return 1
if $all_removed; then
do_rpc_nodes $node load_modules
return 0
fi
- if [ $fstype == "zfs" ]; then
+ if [ "$mds1_FSTYPE" == zfs ]; then
do_rpc_nodes $node "$ZPOOL status -v"
fi
sleep 5
local facet=$1
local fsname=$2
local mnt=$3
- local fstype=$(facet_fstype $SINGLEMDS)
local qval
local cmd
# lustre will estimate the object count usage. This fails quota
# verification in 32b. The object quota usage should be accurate after
# zfs-0.7.0 is released.
- [ $fstype == "zfs" ] && {
+ [ "$mds1_FSTYPE" == zfs ] && {
local zfs_version=$(do_facet $facet cat /sys/module/zfs/version)
[ $(version_code $zfs_version) -lt $(version_code 0.7.0) ] && {
}
set_persistent_param_and_check $facet \
- "osd-$fstype.$fsname-MDT0000.quota_slave.enabled" \
+ "osd-$mds1_FSTYPE.$fsname-MDT0000.quota_slave.enabled" \
"$fsname.quota.mdt" ug
set_persistent_param_and_check $facet \
- "osd-$fstype.$fsname-OST0000.quota_slave.enabled" \
+ "osd-$mds1_FSTYPE.$fsname-OST0000.quota_slave.enabled" \
"$fsname.quota.ost" ug
chmod 0777 $mnt
local nrpcs_orig
local nrpcs
local list
- local fstype=$(facet_fstype $SINGLEMDS)
local mdt_dev=$tmp/mdt
local mdt2_dev=$tmp/mdt2
local ost_dev=$tmp/ost
# - ost device img version < 2.3.64
# - ost server version >= 2.5
[ $(version_code $img_commit) -ge $(version_code 2.3.64) -o \
- $(lustre_version_code ost1) -lt $(version_code 2.5.0) ] &&
+ "$OST1_VERSION" -lt $(version_code 2.5.0) ] &&
ff_convert="no"
! $r test -f $mdt2_dev || mdt2_is_available=true
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
# import pool first
local poolname
local poolname_list="t32fs-mdt1 t32fs-ost1"
if [ "$writeconf" ]; then
mopts=writeconf
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
$r $TUNEFS --quota $mdt_dev || {
$r losetup -a
fi
else
if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then
- [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.3.59) ]] ||
skip "LU-2200: Cannot run over IB w/o lctl replace_nids "
"(Need MGS version at least 2.3.59)"
local ostnid=$(do_node $osthost $LCTL list_nids | head -1)
mopts=nosvc
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
fi
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt
fi
mopts=exclude=$fsname-OST0000
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
fi
fi
local fs2mdsvdev=$(mdsvdevname 1_2)
echo "mkfs new MDT on ${fs2mdsdev}...."
- if [ $(facet_fstype mds1) == ldiskfs ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mkfsoptions="--mkfsoptions=\\\"-J size=8\\\""
fi
return 1
}
- [[ $(facet_fstype mds1) != zfs ]] || import_zpool mds1
+ [[ "$mds1_FSTYPE" != zfs ]] || import_zpool mds1
$r $TUNEFS --dryrun $fs2mdsdev || {
error_noexit "tunefs.lustre before mounting the MDT"
}
if [ "$writeconf" ]; then
mopts=mgsnode=$nid,$writeconf
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
$r $TUNEFS --quota $ost_dev || {
$r losetup -a
fi
else
mopts=mgsnode=$nid
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
fi
fi
return 1
}
- if [ "$ff_convert" != "no" -a $(facet_fstype ost1) == "ldiskfs" ]; then
+ if [ "$ff_convert" != "no" -a "$ost1_FSTYPE" == ldiskfs ]; then
$r $LCTL lfsck_start -M $fsname-OST0000 || {
error_noexit "Start OI scrub on OST0"
return 1
fi
# migrate files/dirs to remote MDT, then move them back
- if [ $(lustre_version_code mds1) -ge $(version_code 2.7.50) -a \
+ if [ "$MDS1_VERSION" -ge $(version_code 2.7.50) -a \
$dne_upgrade != "no" ]; then
$r $LCTL set_param -n \
mdt.${fsname}*.enable_remote_dir=1 2>/dev/null
error_noexit "Unmounting the MDT2"
return 1
}
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
$r "$ZPOOL export t32fs-mdt2"
fi
shall_cleanup_mdt1=false
error_noexit "Unmounting the MDT"
return 1
}
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
$r "$ZPOOL export t32fs-mdt1"
fi
shall_cleanup_mdt=false
error_noexit "Unmounting the OST"
return 1
}
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
$r "$ZPOOL export t32fs-ost1"
fi
shall_cleanup_ost=false
return 1
}
- if [[ $fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
local poolname=t32fs-mdt1
$r "modprobe zfs;
$ZPOOL list -H $poolname >/dev/null 2>&1 ||
}
mopts=exclude=$fsname-OST0000
- if [ $fstype == "ldiskfs" ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mopts="loop,$mopts"
fi
$r $MOUNT_CMD -o $mopts $mdt_dev $tmp/mnt/mdt || {
run_test 32d "convert ff test"
test_32e() {
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.56) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.10.56) ]] ||
skip "Need MDS version at least 2.10.56"
local tarballs
local fs2mdsvdev=$(mdsvdevname 1_2)
local fs2ostvdev=$(ostvdevname 1_2)
- if [ $(facet_fstype mds1) == ldiskfs ]; then
+ if [ "$mds1_FSTYPE" == ldiskfs ]; then
mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
fi
local opts=$MDS_MOUNT_OPTS
local rc=0
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
run_test 37 "verify set tunables works for symlink device"
test_38() { # bug 14222
- local fstype=$(facet_fstype $SINGLEMDS)
local mntpt=$(facet_mntpt $SINGLEMDS)
setup
run_test 40 "race during service thread startup"
test_41a() { #bug 14134
- if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ if [ "$mds1_FSTYPE" == ldiskfs ] &&
! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
skip "Loop devices does not work with nosvc option"
fi
run_test 41a "mount mds with --nosvc and --nomgs"
test_41b() {
- if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ if [ "$mds1_FSTYPE" == ldiskfs ] &&
! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
skip "Loop devices does not work with nosvc option"
fi
run_test 41b "mount mds with --nosvc and --nomgs on first mount"
test_41c() {
- local server_version=$(lustre_version_code $SINGLEMDS)
local oss_list=$(comma_list $(osts_nodes))
- [[ $server_version -ge $(version_code 2.6.52) ]] ||
- [[ $server_version -ge $(version_code 2.5.26) &&
- $server_version -lt $(version_code 2.5.50) ]] ||
- [[ $server_version -ge $(version_code 2.5.4) &&
- $server_version -lt $(version_code 2.5.11) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.6.52) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.5.26) &&
+ "$MDS1_VERSION" -lt $(version_code 2.5.50) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.5.4) &&
+ "$MDS1_VERSION" -lt $(version_code 2.5.11) ]] ||
skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"
# ensure mds1 ost1 have been created even if running sub-test standalone
local mds1dev=$(mdsdevname 1)
local mds1mnt=$(facet_mntpt mds1)
- local mds1fstype=$(facet_fstype mds1)
local mds1opts=$MDS_MOUNT_OPTS
- if [ $mds1fstype == ldiskfs ] &&
+ if [ "$mds1_FSTYPE" == ldiskfs ] &&
! do_facet mds1 test -b $mds1dev; then
mds1opts=$(csa_add "$mds1opts" -o loop)
fi
- if [[ $mds1fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
import_zpool mds1 || return ${PIPESTATUS[0]}
fi
local ost1dev=$(ostdevname 1)
local ost1mnt=$(facet_mntpt ost1)
- local ost1fstype=$(facet_fstype ost1)
local ost1opts=$OST_MOUNT_OPTS
- if [ $ost1fstype == ldiskfs ] &&
+ if [ "$ost1_FSTYPE" == ldiskfs ] &&
! do_facet ost1 test -b $ost1dev; then
ost1opts=$(csa_add "$ost1opts" -o loop)
fi
- if [[ $ost1fstype == zfs ]]; then
+ if [[ "$ost1_FSTYPE" == zfs ]]; then
import_zpool ost1 || return ${PIPESTATUS[0]}
fi
run_test 42 "allow client/server mount/unmount with invalid config param"
test_43a() {
- [[ $(lustre_version_code mgs) -ge $(version_code 2.5.58) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.5.58) ]] ||
skip "Need MDS version at least 2.5.58"
[ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
run_test 43a "check root_squash and nosquash_nids"
test_43b() { # LU-5690
- [[ $(lustre_version_code mgs) -ge $(version_code 2.7.62) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.7.62) ]] ||
skip "Need MGS version 2.7.62+"
if [[ -z "$fs2mds_DEV" ]]; then
# LOV EA, and so on. These EA will use some EA space that is shared by
# ACL entries. So here we only check some reasonable ACL entries count,
# instead of the max number that is calculated from the max_ea_size.
- if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.8.57) ];
- then
+ if [ "$MDS1_VERSION" -lt $(version_code 2.8.57) ]; then
count=28 # hard coded of RPC protocol
- elif [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
- count=4000 # max_num 4091 max_ea_size = ~65536
- elif ! large_xattr_enabled; then
- count=450 # max_num 497 max_ea_size = 4012
- else
+ elif large_xattr_enabled; then
count=4500 # max_num 8187 max_ea_size = 65452
- # not create too much (>5000) to save test time
+ # not create too many (4500) to save test time
+ else
+ count=450 # max_num 497 max_ea_size = 4012
fi
echo "It is expected to hold at least $count ACL entries"
# prepare MDT/OST, make OSC inactive for OST1
[ "$OSTCOUNT" -lt "2" ] && skip_env "needs >=2 OSTs"
- [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --param osc.active=0 `ostdevname 1`" ||
error "tunefs OST1 failed"
start_mds || error "Unable to start MDT"
"${FSNAME}-MDT0001.mdc.active" 1
wait_clients_import_state ${CLIENTS:-$HOSTNAME} mds2 FULL
- if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.60) ]
+ if [ "$MDS1_VERSION" -ge $(version_code 2.7.60) ]
then
wait_dne_interconnect
fi
}
test_52() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
if ! do_node $ost1node test -b $ost1_dev; then
loop="-o loop"
fi
- do_node $ost1node mount -t $(facet_fstype ost1) $loop $ost1_dev \
+ do_node $ost1node mount -t "$ost1_FSTYPE" $loop $ost1_dev \
$ost1mnt ||
error "Unable to mount ost1 as ldiskfs"
run_test 53b "check MDS thread count params"
test_54a() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
run_test 54a "test llverdev and partial verify of device"
test_54b() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
}
test_55() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
run_test 55 "check lov_objid size"
test_56a() {
- local server_version=$(lustre_version_code $SINGLEMDS)
local mds_journal_size_orig=$MDSJOURNALSIZE
local n
echo ok
$LFS osts
- if [[ $server_version -ge $(version_code 2.6.54) ]] ||
- [[ $server_version -ge $(version_code 2.5.4) &&
- $server_version -lt $(version_code 2.5.11) ]]; then
+ if [[ "$MDS1_VERSION" -ge $(version_code 2.6.54) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.5.4) &&
+ "$MDS1_VERSION" -lt $(version_code 2.5.11) ]]; then
wait_osc_import_state mds ost1 FULL
wait_osc_import_state mds ost2 FULL
$LFS setstripe --stripe-count=-1 $DIR/$tfile ||
local NID=$(do_facet ost1 "$LCTL get_param nis" |
tail -1 | awk '{print $1}')
writeconf_or_reformat
- [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=$NID `ostdevname 1`" ||
error "tunefs failed"
start_mgsmds
local NID=$(do_facet ost1 "$LCTL get_param nis" |
tail -1 | awk '{print $1}')
writeconf_or_reformat
- [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --servicenode=$NID `ostdevname 1`" ||
error "tunefs failed"
start_mgsmds
run_test 59 "writeconf mount option"
test_60() { # LU-471
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting"
test_61() { # LU-80
- local lxattr=false
+ local lxattr=$(large_xattr_enabled)
- [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] ||
+ [ "$MDS1_VERSION" -ge $(version_code 2.1.53) ] ||
skip "Need MDS version at least 2.1.53"
- if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ if [ "$mds1_FSTYPE" == ldiskfs ] &&
! large_xattr_enabled; then
lxattr=true
local small_value="bar"
local name="trusted.big"
- log "save large xattr $name on $file"
+ log "save large xattr of $(max_xattr_size) bytes on $name on $file"
setfattr -n $name -v $large_value $file ||
error "saving $name on $file failed"
log "remove large xattr $name from $file"
setfattr -x $name $file || error "removing $name from $file failed"
- if $lxattr; then
+ if $lxattr && [ "$mds1_FSTYPE" == ldiskfs ]; then
stopall || error "stopping for e2fsck run"
for num in $(seq $MDSCOUNT); do
run_e2fsck $(facet_active_host mds$num) \
run_test 61 "large xattr"
test_62() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.2.51) ]] ||
skip "Need MDS version at least 2.2.51"
# MRP-118
run_test 62 "start with disabled journal"
test_63() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
test_65() { # LU-2237
# Currently, the test is only valid for ldiskfs backend
- [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
+ [ "$mds1_FSTYPE" != ldiskfs ] &&
skip "ldiskfs only test"
local devname=$(mdsdevname ${SINGLEMDS//mds/})
# remove the "last_rcvd" file
do_facet $SINGLEMDS "mkdir -p $brpt"
do_facet $SINGLEMDS \
- "mount -t $(facet_fstype $SINGLEMDS) $opts $devname $brpt"
+ "mount -t $mds1_FSTYPE $opts $devname $brpt"
do_facet $SINGLEMDS "rm -f ${brpt}/last_rcvd"
do_facet $SINGLEMDS "$UMOUNT $brpt"
run_test 65 "re-create the lost last_rcvd file when server mount"
test_66() {
- [[ $(lustre_version_code mgs) -ge $(version_code 2.3.59) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.3.59) ]] ||
skip "Need MGS version at least 2.3.59"
setup
local START
local END
- [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.53) ] ||
+ [ "$MDS1_VERSION" -ge $(version_code 2.4.53) ] ||
skip "Need MDS version at least 2.4.53"
umount_client $MOUNT || error "umount client failed"
# just "precreate" the missing objects. In the past it might try to recreate
# millions of objects after an OST was reformatted
test_69() {
- local server_version=$(lustre_version_code $SINGLEMDS)
-
- [[ $server_version -lt $(version_code 2.4.2) ]] &&
+ [[ "$MDS1_VERSION" -lt $(version_code 2.4.2) ]] &&
skip "Need MDS version at least 2.4.2"
- [[ $server_version -ge $(version_code 2.4.50) ]] &&
- [[ $server_version -lt $(version_code 2.5.0) ]] &&
+ [[ "$MDS1_VERSION" -ge $(version_code 2.4.50) ]] &&
+ [[ "$MDS1_VERSION" -lt $(version_code 2.5.0) ]] &&
skip "Need MDS version at least 2.5.0"
setup
local OSTNAME=$(ostname_from_index 0)
local mdtosc_proc1=$(get_mdtosc_proc_path mds1 $OSTNAME)
local last_id=$(do_facet mds1 $LCTL get_param -n \
- osc.$mdtosc_proc1.prealloc_last_id)
+ osp.$mdtosc_proc1.prealloc_last_id)
# Want to have OST LAST_ID over 5 * OST_MAX_PRECREATE to
# verify that the LAST_ID recovery is working properly. If
test_70e() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
- [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.62) ] ||
+ [ "$MDS1_VERSION" -ge $(version_code 2.7.62) ] ||
skip "Need MDS version at least 2.7.62"
reformat || error "reformat failed with $?"
run_test 71e "start OST0, MDT1, OST1, MDT0"
test_72() { #LU-2634
- [ "$(facet_fstype $SINGLEMDS)" != "ldiskfs" ] &&
+ [ "$mds1_FSTYPE" != ldiskfs ] &&
skip "ldiskfs only test"
local mdsdev=$(mdsdevname 1)
run_test 72 "test fast symlink with extents flag enabled"
test_73() { #LU-3006
- [ $(facet_fstype ost1) == zfs ] && import_zpool ost1
+ [ "$ost1_FSTYPE" == zfs ] && import_zpool ost1
do_facet ost1 "$TUNEFS --failnode=1.2.3.4@$NETTYPE $(ostdevname 1)" ||
error "1st tunefs failed"
start_mgsmds || error "start mds failed"
run_test 73 "failnode to update from mountdata properly"
test_75() { # LU-2374
- [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.1) ]] &&
+ [[ "$MDS1_VERSION" -lt $(version_code 2.4.1) ]] &&
skip "Need MDS version at least 2.4.1"
local index=0
run_test 75 "The order of --index should be irrelevant"
test_76a() {
- [[ $(lustre_version_code mgs) -ge $(version_code 2.4.52) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.4.52) ]] ||
skip "Need MDS version at least 2.4.52"
setup
run_test 76a "set permanent params with lctl across mounts"
test_76b() { # LU-4783
- [[ $(lustre_version_code mgs) -ge $(version_code 2.5.57) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.5.57) ]] ||
skip "Need MGS version at least 2.5.57"
stopall
setupall
run_test 76b "verify params log setup correctly"
test_76c() {
- [[ $(lustre_version_code mgs) -ge $(version_code 2.8.54) ]] ||
+ [[ "$MGS_VERSION" -ge $(version_code 2.8.54) ]] ||
skip "Need MDS version at least 2.4.52"
setupall
local MASK_PARAM="mdd.*.changelog_mask"
run_test 76d "verify llite.*.xattr_cache can be set by 'lctl set_param -P' correctly"
test_77() { # LU-3445
- local server_version=$(lustre_version_code $SINGLEMDS)
- [[ $server_version -ge $(version_code 2.8.55) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.8.55) ]] ||
skip "Need MDS version 2.8.55+ "
if [[ -z "$fs2ost_DEV" || -z "$fs2mds_DEV" ]]; then
run_test 77 "comma-separated MGS NIDs and failover node NIDs"
test_78() {
- [[ $(facet_fstype $SINGLEMDS) != ldiskfs ||
- $(facet_fstype ost1) != ldiskfs ]] &&
+ [[ "$mds1_FSTYPE" != ldiskfs ||
+ "$ost1_FSTYPE" != ldiskfs ]] &&
skip "ldiskfs only test"
# reformat the Lustre filesystem with a smaller size
run_test 78 "run resize2fs on MDT and OST filesystems"
test_79() { # LU-4227
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.5.59) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.5.59) ]] ||
skip "Need MDS version at least 2.5.59"
local mdsdev1=$(mdsdevname 1)
mgsnode_opt=$(echo $opts_mds2 |
awk '{ for ( i = 1; i < NF; i++ )
if ( $i ~ "--mgsnode" ) { print $i; break } }')
- [ -n $mgsnode_opt ] &&
+ [ -n "$mgsnode_opt" ] &&
opts_mds2=$(echo $opts_mds2 | sed -e "s/$mgsnode_opt//")
mgsnode_opt=$(echo $opts_ost1 |
awk '{ for ( i = 1; i < NF; i++ )
if ( $i ~ "--mgsnode" ) { print $i; break } }')
- [ -n $mgsnode_opt ] &&
+ [ -n "$mgsnode_opt" ] &&
opts_ost1=$(echo $opts_ost1 | sed -e "s/$mgsnode_opt//")
load_modules
# -MGS, format a mdt without --mgs option
# expected. This test uses OST_INDEX_LIST to format OSTs with a randomly
# assigned index and ensures we can mount such a formatted file system
test_81() { # LU-4665
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.6.54) ]] ||
skip "Need MDS version at least 2.6.54"
[[ $OSTCOUNT -ge 3 ]] || skip_env "needs >= 3 OSTs"
# Check max_easize.
local max_easize=$($LCTL get_param -n llite.*.max_easize)
- if [ $MDS1_VERSION -lt $(version_code 2.12.51) ]
- then
- [[ $max_easize -eq 128 ]] ||
- error "max_easize is $max_easize, should be 128 bytes"
+ # 65452 is XATTR_SIZE_MAX less ldiskfs ea overhead
+ if large_xattr_enabled; then
+ [[ $max_easize -ge 65452 ]] ||
+ error "max_easize is $max_easize, should be at least 65452 bytes"
else
# LU-11868
- # 4012 is 4096 - ldiskfs ea overhead
+ # 4012 is 4096 less ldiskfs ea overhead
[[ $max_easize -ge 4012 ]] ||
- error "max_easize is $max_easize, should be at least 4012 bytes"
-
- # 65452 is XATTR_SIZE_MAX - ldiskfs ea overhead
- if large_xattr_enabled;
- then
- [[ $max_easize -ge 65452 ]] ||
- error "max_easize is $max_easize, should be at least 65452 bytes"
- fi
+ error "max_easize is $max_easize, should be at least 4012 bytes"
fi
restore_ostindex
#
# 5. Lastly ensure this functionality fails with directories.
test_82a() { # LU-4665
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.6.54) ]] ||
skip "Need MDS version at least 2.6.54"
[[ $OSTCOUNT -ge 3 ]] || skip_env "needs >= 3 OSTs"
# Collect debug information - start of test
do_nodes $(comma_list $(mdts_nodes)) \
- $LCTL get_param osc.*.prealloc_*_id
+ $LCTL get_param osp.*.prealloc_*_id
mount_client $MOUNT || error "mount client $MOUNT failed"
wait_osts_up
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
stack_trap "do_nodes $(comma_list $(mdts_nodes)) \
- $LCTL get_param osc.*.prealloc_*_id" EXIT
+ $LCTL get_param osp.*.prealloc_*_id" EXIT
# 1. If the file does not exist, new file will be created
# with specified OSTs.
local dir=$DIR/$tdir/$tdir
mkdir $dir || error "mkdir $dir failed"
cmd="$LFS setstripe -o $ost_indices $dir"
- if [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.11.53) &&
- $(lustre_version_code client -gt $(version_code 2.11.53)) ]]; then
+ if [ "$MDS1_VERSION" -gt $(version_code 2.11.53) ] &&
+ [ "$CLIENT_VERSION" -gt $(version_code 2.11.53) ]; then
echo -e "\n$cmd"
eval $cmd || error "unable to specify OST indices on directory"
else
# Remove OSTs from a pool and destroy the pool.
destroy_pool $ost_pool || true
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
restore_ostindex
}
# the supplied OST index list points to OSTs not contained in the user
# supplied pool.
test_82b() { # LU-4665
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.54) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.6.54) ]] ||
skip "Need MDS version at least 2.6.54"
[[ $OSTCOUNT -ge 4 ]] || skip_env "needs >= 4 OSTs"
done
mount_client $MOUNT || error "mount client $MOUNT failed"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
wait_osts_up
$LFS df $MOUNT || error "$LFS df $MOUNT failed"
run_test 82b "specify OSTs for file with --pool and --ost-list options"
test_83() {
- [[ $(lustre_version_code ost1) -ge $(version_code 2.6.91) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.6.91) ]] ||
skip "Need OST version at least 2.6.91"
- if [ $(facet_fstype ost1) != ldiskfs ]; then
+ if [ "$ost1_FSTYPE" != ldiskfs ]; then
skip "ldiskfs only test"
fi
dev=$(ostdevname 1)
ostmnt=$(facet_mntpt ost1)
- fstype=$(facet_fstype ost1)
# Mount the OST as an ldiskfs filesystem.
- log "mount the OST $dev as a $fstype filesystem"
+ log "mount the OST $dev as a $ost1_FSTYPE filesystem"
add ost1 $(mkfs_opts ost1 $dev) $FSTYPE_OPT \
--reformat $dev > /dev/null ||
error "format ost1 error"
mnt_opts=$(csa_add "$OST_MOUNT_OPTS" -o loop)
fi
echo "mnt_opts $mnt_opts"
- do_facet ost1 mount -t $fstype $dev \
+ do_facet ost1 mount -t "$ost1_FSTYPE" $dev \
$ostmnt $mnt_opts
# Run llverfs on the mounted ldiskfs filesystem.
# It is needed to get ENOSPACE.
- log "run llverfs in partial mode on the OST $fstype $ostmnt"
+ log "run llverfs in partial mode on the OST $ost1_FSTYPE $ostmnt"
do_rpc_nodes $(facet_host ost1) run_llverfs $ostmnt -vpl \
- "no" || error "run_llverfs error on $fstype"
+ "no" || error "run_llverfs error on $ost1_FSTYPE"
# Unmount the OST.
log "unmount the OST $dev"
run_test 84 "check recovery_hard_time"
test_85() {
- [[ $(lustre_version_code ost1) -ge $(version_code 2.7.55) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.7.55) ]] ||
skip "Need OST version at least 2.7.55"
##define OBD_FAIL_OSD_OST_EA_FID_SET 0x197
do_facet ost1 "lctl set_param fail_loc=0x197"
}
test_86() {
- local server_version=$(lustre_version_code $SINGLEMDS)
- [ "$(facet_fstype ost1)" = "zfs" ] &&
+ [ "$ost1_FSTYPE" = zfs ] &&
skip "LU-6442: no such mkfs params for ZFS OSTs"
- [[ $server_version -ge $(version_code 2.7.56) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.56) ]] ||
skip "Need server version newer than 2.7.55"
local OST_OPTS="$(mkfs_opts ost1 $(ostdevname 1)) \
run_test 86 "Replacing mkfs.lustre -G option"
test_87() { #LU-6544
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.51) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.9.51) ]] ||
skip "Need MDS version at least 2.9.51"
- [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ [[ "$mds1_FSTYPE" != ldiskfs ]] &&
skip "ldiskfs only test"
[[ $OSTCOUNT -gt 59 ]] &&
skip "Ignore wide striping situation"
run_test 87 "check if MDT inode can hold EAs with N stripes properly"
test_88() {
- [ "$(facet_fstype mds1)" == "zfs" ] &&
+ [ "$mds1_FSTYPE" == zfs ] &&
skip "LU-6662: no implementation for ZFS"
load_modules
run_test 88 "check the default mount options can be overridden"
test_89() { # LU-7131
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.9.54) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.9.54) ]] ||
skip "Need MDT version at least 2.9.54"
local key=failover.node
stopall
- [ $(facet_fstype mds1) == zfs ] && import_zpool mds1
+ [ "$mds1_FSTYPE" == zfs ] && import_zpool mds1
# Check that parameters are added correctly
echo "tunefs --param $key=$val1"
do_facet mds "$TUNEFS --param $key=$val1 $mdsdev >/dev/null" ||
local nid
local found
- [[ $(lustre_version_code ost1) -ge $(version_code 2.7.63) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.7.63) ]] ||
skip "Need OST version at least 2.7.63"
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.63) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.7.63) ]] ||
skip "Need MDT version at least 2.7.63"
start_mds || error "MDS start failed"
touch $ldevconfpath
fstype=$(facet_fstype mgs)
- if [ "$fstype" == "zfs" ]; then
+ if [ "$fstype" == zfs ]; then
fsldevformat="$fstype:"
else
fsldevformat=""
for num in $(seq $MDSCOUNT); do
fstype=$(facet_fstype mds$num)
- if [ "$fstype" == "zfs" ]; then
+ if [ "$fstype" == zfs ]; then
fsldevformat="$fstype:"
else
fsldevformat=""
for num in $(seq $OSTCOUNT); do
fstype=$(facet_fstype ost$num)
- if [ "$fstype" == "zfs" ]; then
+ if [ "$fstype" == zfs ]; then
fsldevformat="$fstype:"
else
fsldevformat=""
for ((x = 1; x <= 400; x++)); do
mountopt="$mountopt,user_xattr"
done
- remount_client $mountopt $MOUNT 2>&1 | grep "too long" ||
+ mount_client $MOUNT remount,$mountopt 2>&1 | grep "too long" ||
error "Buffer overflow check failed"
cleanup || error "cleanup failed"
}
test_99()
{
- [[ $(facet_fstype ost1) != ldiskfs ]] &&
+ [[ "$ost1_FSTYPE" != ldiskfs ]] &&
skip "ldiskfs only test"
- [[ $(lustre_version_code ost1) -ge $(version_code 2.8.57) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.8.57) ]] ||
skip "Need OST version at least 2.8.57"
local ost_opts="$(mkfs_opts ost1 $(ostdevname 1)) \
run_test 101 "Race MDT->OST reconnection with create"
test_102() {
- [[ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.53) ]] ||
+ [[ "$MDS1_VERSION" -gt $(version_code 2.9.53) ]] ||
skip "Need server version greater than 2.9.53"
[[ “$(mdsdevname 1)” != “$(mgsdevname)” ]] &&
[[ “$(facet_host mds1)” = “$(facet_host mgs)” ]] &&
local mds1dev=$(mdsdevname 1)
local mds1mnt=$(facet_mntpt mds1)
- local mds1fstype=$(facet_fstype mds1)
local mds1opts=$MDS_MOUNT_OPTS
- if [ $mds1fstype == ldiskfs ] &&
+ if [ "$mds1_FSTYPE" == ldiskfs ] &&
! do_facet mds1 test -b $mds1dev; then
mds1opts=$(csa_add "$mds1opts" -o loop)
fi
- if [[ $mds1fstype == zfs ]]; then
+ if [[ "$mds1_FSTYPE" == zfs ]]; then
import_zpool mds1 || return ${PIPESTATUS[0]}
fi
echo "rename $FSNAME to $newname"
if ! combined_mgs_mds ; then
- local facet=$(mgsdevname)
+ local dev=$(mgsdevname)
do_facet mgs \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(7) Fail to rename MGS"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(7) Fail to rename MGS"
+ if [ "$(facet_fstype mgs)" = "zfs" ]; then
reimport_zpool mgs $newname-mgs
fi
fi
for num in $(seq $MDSCOUNT); do
- local facet=$(mdsdevname $num)
+ local dev=$(mdsdevname $num)
do_facet mds${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(8) Fail to rename MDT $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(8) Fail to rename MDT $num"
+ if [ "$(facet_fstype mds${num})" = "zfs" ]; then
reimport_zpool mds${num} $newname-mdt${num}
fi
done
for num in $(seq $OSTCOUNT); do
- local facet=$(ostdevname $num)
+ local dev=$(ostdevname $num)
do_facet ost${num} \
- "$TUNEFS --fsname=$newname --rename=$FSNAME -v $facet"||
- error "(9) Fail to rename OST $num"
- if [ "$(facet_fstype $facet)" = "zfs" ]; then
+ "$TUNEFS --fsname=$newname --rename=$FSNAME -v $dev" ||
+ error "(9) Fail to rename OST $num"
+ if [ "$(facet_fstype ost${num})" = "zfs" ]; then
reimport_zpool ost${num} $newname-ost${num}
fi
done
cp $LUSTRE/tests/test-framework.sh $DIR/$tdir ||
error "(2) Fail to copy test-framework.sh"
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
do_facet mgs $LCTL pool_new $FSNAME.pool1 ||
error "(3) Fail to create $FSNAME.pool1"
# name the pool name as the fsname
$LFS setstripe -p $FSNAME $DIR/$tdir/d0 ||
error "(6) Fail to setstripe on $DIR/$tdir/d0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
KEEP_ZPOOL=true
stopall
FSNAME="mylustre"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 7
if [ $OSTCOUNT -ge 2 ]; then
$LFS setstripe -p $save_fsname $DIR/$tdir/f0 ||
error "(16) Fail to setstripe on $DIR/$tdir/f0"
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
FSNAME="tfs"
setupall
- if ! combined_mgs_mds ; then
- mount_mgs_client
- fi
test_103_check_pool $save_fsname 17
- if ! combined_mgs_mds ; then
- umount_mgs_client
- fi
stopall
test_renamefs $save_fsname
}
run_test 103 "rename filesystem name"
-test_104() { # LU-6952
+test_104a() { # LU-6952
local mds_mountopts=$MDS_MOUNT_OPTS
local ost_mountopts=$OST_MOUNT_OPTS
local mds_mountfsopts=$MDS_MOUNT_FS_OPTS
OST_MOUNT_OPTS=$ost_mountopts
MDS_MOUNT_FS_OPTS=$mds_mountfsopts
}
-run_test 104 "Make sure user defined options are reflected in mount"
+run_test 104a "Make sure user defined options are reflected in mount"
+
+test_104b() { # LU-12859
+ mount_client $MOUNT3 flock,localflock
+ stack_trap "umount_client $MOUNT3" EXIT
+ mount | grep "$MOUNT3 .*,flock" && error "flock is still set"
+ mount | grep "$MOUNT3 .*,localflock" || error "localflock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock
+ mount | grep "$MOUNT3 .*,localflock" && error "localflock is still set"
+ mount | grep "$MOUNT3 .*,flock" || error "flock is not set"
+ umount_client $MOUNT3
+ mount_client $MOUNT3 localflock,flock,noflock
+ flock_is_enabled $MOUNT3 && error "some flock is still enabled" || true
+}
+run_test 104b "Mount uses last flock argument"
error_and_umount() {
umount $TMP/$tdir
run_test 106 "check osp llog processing when catalog is wrapped"
test_107() {
- [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.10.50) ]] ||
+ [[ "$MDS1_VERSION" -ge $(version_code 2.10.50) ]] ||
skip "Need MDS version > 2.10.50"
local cmd
$rcmd mkdir -p $tmp/{mnt,images} || error "failed to mkdir remotely"
for facet in $facets; do
- [ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+ [ "$mds1_FSTYPE" = zfs ] &&
$rcmd $ZPOOL -f export lustre-$facet > /dev/null 2>&1
$rcmd mkdir $tmp/mnt/$facet ||
error "failed to mkdir $tmp/mnt/$facet"
for facet in $facets; do
$rcmd umount -f $tmp/mnt/$facet ||
error "failed to umount $facet"
- if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ if [ "$mds1_FSTYPE" = zfs ]; then
$rcmd $ZPOOL export -f lustre-$facet ||
error "failed to export lustre-$facet"
fi
test_108a() {
[ "$CLIENTONLY" ] && skip "Client-only testing"
- [ $(facet_fstype $SINGLEMDS) != "zfs" ] && skip "zfs only test"
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ [ "$mds1_FSTYPE" != zfs ] && skip "zfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.58) ] &&
skip "Need server version at least 2.10.58"
stopall
test_108b() {
[ "$CLIENTONLY" ] && skip "Client-only testing"
- [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && skip "ldiskfs only test"
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.58) ] &&
+ [ "$mds1_FSTYPE" != ldiskfs ] && skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.58) ] &&
skip "Need server version at least 2.10.58"
stopall
test_109a()
{
- [ "$(facet_fstype mgs)" == "zfs" ] &&
+ [ $MDS1_VERSION -lt $(version_code 2.10.59) ] &&
+ skip "Needs MDS version 2.10.59 or later."
+ [ "$(facet_fstype mgs)" == zfs ] &&
skip "LU-8727: no implementation for ZFS"
stopall
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109a "test lctl clear_conf fsname"
test_109b()
{
- [ "$(facet_fstype mgs)" == "zfs" ] &&
+ [ $MDS1_VERSION -lt $(version_code 2.10.59) ] &&
+ skip "Needs MDS version 2.10.59 or later."
+ [ "$(facet_fstype mgs)" == zfs ] &&
skip "LU-8727: no implementation for ZFS"
stopall
reformat
setup_noconfig
client_up || error "client_up failed"
- #pool commands requires a client on MGS for procfs interfaces
- if ! combined_mgs_mds ; then
- mount_mgs_client
- stack_trap umount_mgs_client EXIT
- fi
#
# set number of permanent parameters
#
test_109_set_params $FSNAME
- combined_mgs_mds || umount_mgs_client
umount_client $MOUNT || error "umount_client failed"
stop_ost || error "stop_ost failed"
stop_mds || error "stop_mds failed"
error "failed to clear client config"
setup_noconfig
- combined_mgs_mds || mount_mgs_client
#
# check that configurations are intact
#
#
destroy_test_pools || error "destroy test pools failed"
- combined_mgs_mds || umount_mgs_client
cleanup
}
run_test 109b "test lctl clear_conf one config"
test_110()
{
- [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ [[ "$mds1_FSTYPE" != ldiskfs ]] &&
skip "Only applicable to ldiskfs-based MDTs"
do_facet $SINGLEMDS $DEBUGFS -w -R supported_features |grep large_dir ||
combined_mgs_mds || replace=" --replace "
local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
$replace --reformat $(mdsdevname 1) $(mdsvdevname 1)"
- if [[ $opts != *mkfsoptions* ]]; then
- opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\""
- else
- opts="${opts//--mkfsoptions=\\\"/ \
- --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536\\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -b 1024 -i 65536 }"
+ fi
fi
echo "MDT params: $opts"
load_modules
opts="$(mkfs_opts ost1 $(ostdevname 1)) \
$replace --reformat $(ostdevname 1) $(ostvdevname 1)"
- if [[ $opts != *mkfsoptions* ]]; then
- opts+=" --mkfsoptions=\\\"-O large_dir\\\" "
- else
- opts="${opts//--mkfsoptions=\\\"/ \
- --mkfsoptions=\\\"-O large_dir }"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir\\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir }"
+ fi
fi
echo "OST params: $opts"
add ost1 $opts || error "add ost1 failed with new params"
run_test 110 "Adding large_dir with 3-level htree"
test_111() {
- [[ $(facet_fstype $SINGLEMDS) != ldiskfs ]] &&
+ [[ "$mds1_FSTYPE" != ldiskfs ]] &&
skip "Only applicable to ldiskfs-based MDTs"
is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
combined_mgs_mds || replace=" --replace "
local opts="$(mkfs_opts mds1 $(mdsdevname 1)) \
$replace --reformat $(mdsdevname 1) $(mdsvdevname 1)"
- if [[ $opts != *mkfsoptions* ]]; then
- opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" "
- else
- opts="${opts//--mkfsoptions=\\\"/ \
- --mkfsoptions=\\\"-O large_dir -i 1048576 }"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir -i 1048576 \\\" "
+ else
+ opts="${opts//--mkfsoptions=\\\"/ \
+ --mkfsoptions=\\\"-O large_dir -i 1048576 }"
+ fi
fi
echo "MDT params: $opts"
load_modules
opts="$(mkfs_opts ost1 $(ostdevname 1)) \
$replace --reformat $(ostdevname 1) $(ostvdevname 1)"
- if [[ $opts != *mkfsoptions* ]]; then
- opts+=" --mkfsoptions=\\\"-O large_dir \\\""
- else
- opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }"
+ if [[ $opts != *large_dir* ]]; then
+ if [[ $opts != *mkfsoptions* ]]; then
+ opts+=" --mkfsoptions=\\\"-O large_dir \\\""
+ else
+ opts="${opts//--mkfsoptions=\\\"/ --mkfsoptions=\\\"-O large_dir }"
+ fi
fi
echo "OST params: $opts"
__touch_device ost 1
trap 0
stopall
rm -f $TMP/$tdir/lustre-mdt
- reformat_and_config
}
test_115() {
- if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ if [ "$mds1_FSTYPE" != ldiskfs ]; then
skip "Only applicable to ldiskfs-based MDTs"
fi
+ local dbfs_ver=$(do_facet $SINGLEMDS $DEBUGFS -V 2>&1)
+
+ echo "debugfs version: $dbfs_ver"
+ echo "$dbfs_ver" | egrep -w "1.44.3.wc1|1.44.5.wc1|1.45.2.wc1" &&
+ skip_env "This version of debugfs doesn't show inode number"
+
+ is_dm_flakey_dev $SINGLEMDS $(mdsdevname 1) &&
+ skip "This test can not be executed on flakey dev"
+
IMAGESIZE=$((3072 << 30)) # 3072 GiB
stopall
local mdsdev=$(do_facet $SINGLEMDS "losetup -f")
do_facet $SINGLEMDS "losetup $mdsdev $mdsimgname"
- local mds_opts="$(mkfs_opts mds1 ${mdsdev}) --device-size=$IMAGESIZE \
- --mkfsoptions='-O lazy_itable_init,ea_inode,^resize_inode,meta_bg \
- -i 1024'"
+ local mds_opts="$(mkfs_opts mds1 $(mdsdevname 1)) --device-size=$IMAGESIZE \
+ --mkfsoptions='-O ea_inode,^resize_inode,meta_bg \
+ -N 2247484000 -E lazy_itable_init'"
add mds1 $mds_opts --mgs --reformat $mdsdev ||
skip_env "format large MDT failed"
- add ost1 $(mkfs_opts ost1 $(ostdevname 1)) --index=$i \
- --reformat $(ostdevname 1) $(ostvdevname 1)
-
- start $SINGLEMDS ${mdsdev} $MDS_MOUNT_OPTS || error "start MDS failed"
+ opts="$(mkfs_opts ost1 $(ostdevname 1)) \
+ $replace --reformat $(ostdevname 1) $(ostvdevname 1)"
+ add ost1 $opts || error "add ost1 failed with new params"
+ start $SINGLEMDS $mdsdev $MDS_MOUNT_OPTS || error "start MDS failed"
start_ost || error "start OSS failed"
mount_client $MOUNT || error "mount client failed"
mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir fail"
- for goal in $(do_facet $SINGLEMDS "ls /sys/fs/ldiskfs/*/inode_goal"); do
- do_facet $SINGLEMDS "echo 2147483947 >> $goal; grep . $goal"
- done
-
+ goal="/sys/fs/ldiskfs/$(basename $mdsdev)/inode_goal"
+echo goal: $goal
+ # 2147483648 is 0x80000000
+ do_facet $SINGLEMDS "echo 2147483648 >> $goal; grep . $goal"
touch $DIR/$tdir/$tfile
- # Add > 5k bytes to xattr
- for i in {1..30}; do
- ln $DIR/$tdir/$tfile $DIR/$tdir/$(printf "link%0250d" $i) ||
- error "Can't make link"
+ # attrs from 1 to 15 go to block, 16th - to inode
+ for i in {1..16}; do
+ local nm="trusted.ea$i"
+ setfattr -n $nm -v $(printf "xattr%0250d" $i) $DIR/$tdir/$tfile
done
- sync; sleep 5; sync
-
+ # inode <2147483649> trusted.ea16 (255)
local inode_num=$(do_facet $SINGLEMDS \
- "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsimgname" |
- awk '/link =/ { print $4 }' |
+ "$DEBUGFS -c -R 'stat ROOT/$tdir/$tfile' $mdsdev" |
+ awk '/ea16/ { print $2 }' |
sed -e 's/>//' -e 's/<//' -e 's/\"//')
echo "inode num: $inode_num"
- [ $inode_num -ge 2147483947 ] || error "inode $inode_num too small"
+ [ $inode_num -ge 2147483648 ] || error "inode $inode_num too small"
do_facet $SINGLEMDS "losetup -d $mdsdev"
cleanup_115
}
run_test 115 "Access large xattr with inodes number over 2TB"
test_116() {
- [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] && skip "ldiskfs only test"
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.59) ] &&
+ [ "$mds1_FSTYPE" != ldiskfs ] && skip "ldiskfs only test"
+ [ "$MDS1_VERSION" -lt $(version_code 2.10.59) ] &&
skip "Need server version at least 2.10.59"
do_facet $SINGLEMDS which mkfs.xfs ||
skip_env "No mkfs.xfs installed"
test_120() { # LU-11130
[ "$MDSCOUNT" -lt 2 ] && skip "mdt count < 2"
- [ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
+ [ "$mds1_FSTYPE" != ldiskfs ] &&
skip "ldiskfs only test"
- [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] &&
+ [ "$MDS1_VERSION" -lt $(version_code 2.11.56) ] &&
skip "Need DNE2 capable MD target with LU-11130 fix"
setup
test_122() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs"
- [[ $(lustre_version_code ost1) -ge $(version_code 2.11.53) ]] ||
+ [[ "$OST1_VERSION" -ge $(version_code 2.11.53) ]] ||
skip "Need OST version at least 2.11.53"
reformat
fi
# test old positional parameters for a while still
- if [ $(lustre_version_code mgs) -le $(version_code 3.1.53) ]; then
+ if [ "$MGS_VERSION" -le $(version_code 3.1.53) ]; then
log=$FSNAME-client
orig=$(do_facet mgs $LCTL --device MGS llog_print $log |
tail -1 | awk '{ print $4 }' | tr -d , )
}
run_test 124 "check failover after replace_nids"
+get_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ cat ${max_sectors_path}"
+}
+
+get_max_hw_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_hw_path="/sys/block/${dev_base}/queue/max_hw_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_hw_path} ]] && cat ${max_hw_path}"
+}
+
+set_max_sectors_kb() {
+ local facet="$1"
+ local device="$2"
+ local value="$3"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local max_sectors_path="/sys/block/${dev_base}/queue/max_sectors_kb"
+
+ do_facet ${facet} "[[ -e ${max_sectors_path} ]] &&
+ echo ${value} > ${max_sectors_path}"
+ rc=$?
+
+ [[ $rc -ne 0 ]] && echo "Failed to set ${max_sectors_path} to ${value}"
+
+ return $rc
+}
+
+# Return 0 if all slave devices have max_sectors_kb == max_hw_sectors_kb
+# Otherwise return > 0
+check_slaves_max_sectors_kb()
+{
+ local facet="$1"
+ local device="$2"
+ local dev_base=$(basename $(do_facet ${facet} "readlink -f ${device}"))
+ local slaves_dir=/sys/block/${dev_base}/slaves
+ local slave_devices=$(do_facet ${facet} "ls ${slaves_dir} 2>/dev/null")
+ [[ -z ${slave_devices} ]] && return 0
+
+ local slave max_sectors new_max_sectors max_hw_sectors path
+ local rc=0
+ for slave in ${slave_devices}; do
+ path="/dev/${slave}"
+ ! is_blkdev ${facet} ${path} && continue
+ max_sectors=$(get_max_sectors_kb ${facet} ${path})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${path})
+ new_max_sectors=${max_hw_sectors}
+ [[ ${new_max_sectors} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors=${RQ_SIZE_LIMIT}
+
+ if [[ ${max_sectors} -ne ${new_max_sectors} ]]; then
+ echo "${path} ${max_sectors} ${new_max_sectors}"
+ ((rc++))
+ fi
+ check_slaves_max_sectors_kb ${facet} ${path}
+ ((rc + $?))
+ done
+
+ return $rc
+}
+
+test_125()
+{
+ local facet_list="mgs mds1 ost1"
+ combined_mgs_mds && facet_list="mgs ost1"
+
+ local facet
+ for facet in ${facet_list}; do
+ [[ $(facet_fstype ${facet}) != ldiskfs ]] &&
+ skip "ldiskfs only test" &&
+ return 0
+ ! is_blkdev ${facet} $(facet_device ${facet}) &&
+ skip "requires all real devices" &&
+ return 0
+ done
+
+ local rc=0
+ # We don't increase IO request size limit past 16MB. See comments in
+ # lustre/utils/libmount_utils_ldiskfs.c:tune_max_sectors_kb()
+ RQ_SIZE_LIMIT=$((16 * 1024))
+ local device old_max_sectors new_max_sectors max_hw_sectors
+ for facet in ${facet_list}; do
+ device=$(facet_device ${facet})
+ old_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ max_hw_sectors=$(get_max_hw_sectors_kb ${facet} ${device})
+
+ # The expected value after l_tunedisk is executed
+ new_max_sectors=$old_max_sectors
+ [[ ${new_max_sectors_kb} -gt ${RQ_SIZE_LIMIT} ]] &&
+ new_max_sectors_kb=${RQ_SIZE_LIMIT}
+
+ # Ensure the current value of max_sectors_kb does not equal
+ # max_hw_sectors_kb, so we can tell whether l_tunedisk did
+ # anything
+ set_max_sectors_kb ${facet} ${device} $((new_max_sectors - 1))
+
+ # Value before l_tunedisk
+ local pre_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+ if [[ ${pre_max_sectors} -ne $((new_max_sectors - 1)) ]]; then
+ echo "unable to satsify test pre-condition:"
+ echo "${pre_max_sectors} != $((new_max_sectors - 1))"
+ ((rc++))
+ continue
+ fi
+
+ echo "Before: ${facet} ${device} ${pre_max_sectors} ${max_hw_sectors}"
+
+ do_facet ${facet} "libtool execute l_tunedisk ${device}"
+
+ # Value after l_tunedisk
+ local post_max_sectors=$(get_max_sectors_kb ${facet} ${device})
+
+ echo "After: ${facet} ${device} ${post_max_sectors} ${max_hw_sectors}"
+
+ if [[ ${facet} != ost1 ]]; then
+ if [[ ${post_max_sectors} -ne ${pre_max_sectors} ]]; then
+ echo "l_tunedisk modified max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ set_max_sectors_kb ${facet} ${device} ${old_max_sectors}
+ else
+ if [[ ${post_max_sectors} -eq ${pre_max_sectors} ]]; then
+ echo "l_tunedisk failed to modify max_sectors_kb of ${facet}"
+ ((rc++))
+ fi
+
+ check_slaves_max_sectors_kb ${facet} ${device} ||
+ ((rc++))
+ fi
+ done
+
+ return $rc
+}
+run_test 125 "check l_tunedisk only tunes OSTs and their slave devices"
+
if ! combined_mgs_mds ; then
stop mgs
fi