ONLY=${ONLY:-"$*"}
# bug number for skipped test:
-# 15977
ALWAYS_EXCEPT="$CONF_SANITY_EXCEPT"
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
+is_sles11() # LU-2181
+{
+ if [ -r /etc/SuSE-release ]
+ then
+ local vers=`grep VERSION /etc/SuSE-release | awk '{print $3}'`
+ local patchlev=`grep PATCHLEVEL /etc/SuSE-release \
+ | awk '{print $3}'`
+ if [ $vers -eq 11 ] && [ $patchlev -eq 2 ]
+ then
+ return 0
+ fi
+ fi
+ return 1
+}
+
+if is_sles11; then # LU-2181
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 23a 34b"
+fi
+
if [ "$FAILURE_MODE" = "HARD" ]; then
CONFIG_EXCEPTIONS="24a " && \
echo "Except the tests: $CONFIG_EXCEPTIONS for FAILURE_MODE=$FAILURE_MODE, bug 23573" && \
ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
fi
+# bug number for skipped test:
+# a tool to create lustre filesystem images
+ALWAYS_EXCEPT="32newtarball $ALWAYS_EXCEPT"
+
SRCDIR=`dirname $0`
PATH=$PWD/$SRCDIR:$SRCDIR:$SRCDIR/../utils:$PATH
fi
# pass "-E lazy_itable_init" to mke2fs to speed up the formatting time
-for facet in MGS MDS OST; do
- opts=${facet}_MKFS_OPTS
- if [[ ${!opts} != *lazy_itable_init* ]]; then
- eval SAVED_${facet}_MKFS_OPTS=\"${!opts}\"
- if [[ ${!opts} != *mkfsoptions* ]]; then
- eval ${facet}_MKFS_OPTS=\"${!opts} --mkfsoptions='\\\"-E lazy_itable_init\\\"'\"
- else
- val=${!opts//--mkfsoptions=\\\"/--mkfsoptions=\\\"-E lazy_itable_init }
- eval ${facet}_MKFS_OPTS='${val}'
- fi
- fi
-done
+if [[ "$LDISKFS_MKFS_OPTS" != *lazy_itable_init* ]]; then
+ LDISKFS_MKFS_OPTS=$(csa_add "$LDISKFS_MKFS_OPTS" -E lazy_itable_init)
+fi
init_logging
grep " $MOUNT " /etc/mtab && \
error false "unexpected entry in mtab before mount" && return 10
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-2059: no local config for ZFS OSTs" && return
+
local rc=0
start_ost
start_mds
#
test_17() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
check_mount || return 41
cleanup || return $?
run_test 17 "Verify failed mds_postsetup won't fail assertion (2936) (should return errs)"
test_18() {
- [ "$FSTYPE" != "ldiskfs" ] && skip "not needed for FSTYPE=$FSTYPE" && return
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
echo "mount mds with large journal..."
- local OLD_MDS_MKFS_OPTS=$MDS_MKFS_OPTS
- local opts="--mdt --fsname=$FSNAME --device-size=$myMDSSIZE --param sys.timeout=$TIMEOUT $MDSOPT"
-
- if combined_mgs_mds ; then
- MDS_MKFS_OPTS="--mgs $opts"
- else
- MDS_MKFS_OPTS="--mgsnode=$MGSNID $opts"
- fi
+ local OLD_MDSSIZE=$MDSSIZE
+ MDSSIZE=$myMDSSIZE
reformat_and_config
echo "mount lustre system..."
cleanup || return $?
- MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
- reformat_and_config
+ MDSSIZE=$OLD_MDSSIZE
+ reformat_and_config
}
run_test 18 "check mkfs creates large journals"
run_test 19a "start/stop MDS without OSTs"
test_19b() {
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-2059: no local config for ZFS OSTs" && return
+
start_ost || return 1
stop_ost -f || return 2
}
run_test 21a "start mds before ost, stop ost first"
test_21b() {
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-2059: no local config for ZFS OSTs" && return
+
start_ost
start_mds
wait_osc_import_state mds ost FULL
fs2mds_HOST=$mds_HOST
fs2ost_HOST=$ost_HOST
+MDSDEV1_2=$fs2mds_DEV
+OSTDEV1_2=$fs2ost_DEV
+OSTDEV2_2=$fs3ost_DEV
+
cleanup_24a() {
trap 0
echo "umount $MOUNT2 ..."
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
- local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
- local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2ostdev=$(ostdevname 1_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
+ local fs2ostvdev=$(ostvdevname 1_2)
# test 8-char fsname as well
local FSNAME2=test1234
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --nomgs --mgsnode=$MGSNID --reformat $fs2mdsdev || exit 10
+ add fs2mds $(mkfs_opts mds1) --nomgs --mgsnode=$MGSNID \
+ --fsname=${FSNAME2} --reformat $fs2mdsdev $fs2mdsvdev || exit 10
- add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2ostdev || exit 10
+ add fs2ost $(mkfs_opts ost1) --fsname=${FSNAME2} --reformat \
+ $fs2ostdev $fs2ostvdev || exit 10
setup
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
skip_env "mixed loopback and real device not working" && return
fi
- local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME}2 --mgs --reformat $fs2mdsdev || exit 10
+ add fs2mds $(mkfs_opts mds1) --mgs --fsname=${FSNAME}2 --reformat \
+ $fs2mdsdev $fs2mdsvdev || exit 10
setup
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && return 2
cleanup || return 6
lctl get_param -n devices
DEVS=$(lctl get_param -n devices | egrep -v MG | wc -l)
[ $DEVS -gt 0 ] && return 2
+ # start mds to drop writeconf setting
+ start_mds || return 3
+ stop_mds || return 4
unload_modules_conf || return $?
}
run_test 26 "MDT startup failure cleans LOV (should return errs)"
-set_and_check() {
- local myfacet=$1
- local TEST=$2
- local PARAM=$3
- local ORIG=$(do_facet $myfacet "$TEST")
- if [ $# -gt 3 ]; then
- local FINAL=$4
- else
- local -i FINAL
- FINAL=$(($ORIG + 5))
- fi
- echo "Setting $PARAM from $ORIG to $FINAL"
- do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" || error conf_param failed
-
- wait_update $(facet_host $myfacet) "$TEST" "$FINAL" || error check failed!
-}
-
test_27a() {
+ [ "$(facet_fstype ost1)" = "zfs" ] &&
+ skip "LU-2059: no local config for ZFS OSTs" && return
+
start_ost || return 1
start_mds || return 2
echo "Requeue thread should have started: "
ps -e | grep ll_cfg_requeue
- set_and_check ost1 "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
+ set_conf_param_and_check ost1 \
+ "lctl get_param -n obdfilter.$FSNAME-OST0000.client_cache_seconds" \
+ "$FSNAME-OST0000.ost.client_cache_seconds" || return 3
cleanup_nocli
}
run_test 27a "Reacquire MGS lock if OST started first"
local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }')
facet_failover $SINGLEMDS
- set_and_check $SINGLEMDS "lctl get_param -n mdt.$device.identity_acquire_expire" "$device.mdt.identity_acquire_expire" || return 3
- set_and_check client "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight" "$device.mdc.max_rpcs_in_flight" || return 4
+ set_conf_param_and_check $SINGLEMDS \
+ "lctl get_param -n mdt.$device.identity_acquire_expire" \
+ "$device.mdt.identity_acquire_expire" || return 3
+ set_conf_param_and_check client \
+ "lctl get_param -n mdc.$device-mdc-*.max_rpcs_in_flight"\
+ "$device.mdc.max_rpcs_in_flight" || return 4
check_mount
cleanup
}
PARAM="$FSNAME.llite.max_read_ahead_whole_mb"
ORIG=$($TEST)
FINAL=$(($ORIG + 1))
- set_and_check client "$TEST" "$PARAM" $FINAL || return 3
+ set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 3
FINAL=$(($FINAL + 1))
- set_and_check client "$TEST" "$PARAM" $FINAL || return 4
+ set_conf_param_and_check client "$TEST" "$PARAM" $FINAL || return 4
umount_client $MOUNT || return 200
mount_client $MOUNT
RESULT=$($TEST)
else
echo "New config success: got $RESULT"
fi
- set_and_check client "$TEST" "$PARAM" $ORIG || return 5
+ set_conf_param_and_check client "$TEST" "$PARAM" $ORIG || return 5
cleanup
}
run_test 28 "permanent parameter setting"
ACTV=$(lctl get_param -n $PROC_ACT)
DEAC=$((1 - $ACTV))
- set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
+ set_conf_param_and_check client \
+ "lctl get_param -n $PROC_ACT" "$PARAM" $DEAC || return 2
# also check ost_server_uuid status
RESULT=$(lctl get_param -n $PROC_UUID | grep DEACTIV)
if [ -z "$RESULT" ]; then
echo "Waiting $(($MAX - $WAIT)) secs for MDT deactivated"
done
- # quotacheck should not fail immediately after deactivate
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
-
# test new client starts deactivated
umount_client $MOUNT || return 200
mount_client $MOUNT
echo "New client success: got $RESULT"
fi
- # quotacheck should not fail after umount/mount operation
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
-
# make sure it reactivates
- set_and_check client "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
+ set_conf_param_and_check client \
+ "lctl get_param -n $PROC_ACT" "$PARAM" $ACTV || return 6
umount_client $MOUNT
stop_ost2
ORIG=$($TEST)
LIST=(1 2 3 4 5 4 3 2 1 2 3 4 5 4 3 2 1 2 3 4 5)
for i in ${LIST[@]}; do
- set_and_check client "$TEST" "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
+ set_conf_param_and_check client "$TEST" \
+ "$FSNAME.llite.max_read_ahead_whole_mb" $i || return 3
done
# make sure client restart still works
umount_client $MOUNT
echo "Using fake nid $NEW"
TEST="$LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids | sed -n 's/.*\($NEW\).*/\1/p'"
- set_and_check client "$TEST" "$FSNAME-OST0000.failover.node" $NEW || error "didn't add failover nid $NEW"
+ set_conf_param_and_check client "$TEST" \
+ "$FSNAME-OST0000.failover.node" $NEW ||
+ error "didn't add failover nid $NEW"
NIDS=$($LCTL get_param -n osc.$FSNAME-OST0000-osc-[^M]*.import | grep failover_nids)
echo $NIDS
NIDCOUNT=$(($(echo "$NIDS" | wc -w) - 1))
}
run_test 31 "Connect to non-existent node (shouldn't crash)"
-# Use these start32/stop32 fn instead of t-f start/stop fn,
-# for local devices, to skip global facet vars init
-stop32 () {
- local facet=$1
- shift
- echo "Stopping local ${MOUNT%/*}/${facet} (opts:$@)"
- umount -d $@ ${MOUNT%/*}/${facet}
- losetup -a
-}
-
-start32 () {
- local facet=$1
- shift
- local device=$1
- shift
- mkdir -p ${MOUNT%/*}/${facet}
-
- echo "Starting local ${facet}: $@ $device ${MOUNT%/*}/${facet}"
- mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}
- local RC=$?
- if [ $RC -ne 0 ]; then
- echo "mount -t lustre $@ ${device} ${MOUNT%/*}/${facet}"
- echo "Start of ${device} of local ${facet} failed ${RC}"
- fi
- losetup -a
- return $RC
-}
-
-cleanup_nocli32 () {
- stop32 mds1 -f
- stop32 ost1 -f
- wait_exit_ST client
-}
+#
+# This is not really a test but a tool to create new disk
+# image tarballs for the upgrade tests.
+#
+# Disk image tarballs should be created on single-node
+# clusters by running this test with default configurations
+# plus a few mandatory environment settings that are verified
+# at the beginning of the test.
+#
+test_32newtarball() {
+ local version
+ local dst=.
+ local src=/etc/rc.d
+ local tmp=$TMP/t32_image_create
+
+ if [ $FSNAME != t32fs -o $MDSCOUNT -ne 1 -o \
+ \( -z "$MDSDEV" -a -z "$MDSDEV1" \) -o $OSTCOUNT -ne 1 -o \
+ -z "$OSTDEV1" ]; then
+ error "Needs FSNAME=t32fs MDSCOUNT=1 MDSDEV1=<nonexistent_file>" \
+ "(or MDSDEV, in the case of b1_8) OSTCOUNT=1" \
+ "OSTDEV1=<nonexistent_file>"
+ fi
-cleanup_32() {
- trap 0
- echo "Cleanup test_32 umount $MOUNT ..."
- umount -f $MOUNT || true
- echo "Cleanup local mds ost1 ..."
- cleanup_nocli32
- combined_mgs_mds || start_mgs
- unload_modules_conf
-}
+ mkdir $tmp || {
+ echo "Found stale $tmp"
+ return 1
+ }
-test_32a() {
- client_only && skip "client only testing" && return 0
- [ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip_env "No tunefs" && return 0
+ mkdir $tmp/src
+ tar cf - -C $src . | tar xf - -C $tmp/src
- local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
- [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
- local tmpdir=$TMP/conf32a
- mkdir -p $tmpdir
- tar xjvf $DISK1_8 -C $tmpdir || \
- { skip_env "Cannot untar $DISK1_8" && return 0; }
+ formatall
- load_modules
- $LCTL set_param debug="$PTLDEBUG"
+ setupall
+ tar cf - -C $tmp/src . | tar xf - -C /mnt/$FSNAME
+ stopall
- $TUNEFS $tmpdir/mds || error "tunefs failed"
+ mkdir $tmp/img
- combined_mgs_mds || stop mgs
+ setupall
+ pushd /mnt/$FSNAME
+ ls -Rni --time-style=+%s >$tmp/img/list
+ find . ! -name .lustre -type f -exec sha1sum {} \; |
+ sort -k 2 >$tmp/img/sha1sums
+ popd
+ $LCTL get_param -n version | head -n 1 |
+ sed -e 's/^lustre: *//' >$tmp/img/commit
+ stopall
- # nids are wrong, so client wont work, but server should start
- start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
- trap cleanup_32 EXIT INT || return 3
+ pushd $tmp/src
+ find -type f -exec sha1sum {} \; | sort -k 2 >$tmp/sha1sums.src
+ popd
- local UUID=$($LCTL get_param -n mdt.lustre-MDT0000.uuid)
- echo MDS uuid $UUID
- [ "$UUID" == "lustre-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
+ if ! diff -u $tmp/sha1sums.src $tmp/img/sha1sums; then
+ echo "Data verification failed"
+ fi
- $TUNEFS --mgsnode=$HOSTNAME $tmpdir/ost1 || error "tunefs failed"
- start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$($LCTL get_param -n obdfilter.lustre-OST0000.uuid)
- echo OST uuid $UUID
- [ "$UUID" == "lustre-OST0000_UUID" ] || error "UUID is wrong: $UUID"
+ uname -r >$tmp/img/kernel
+ uname -m >$tmp/img/arch
- local NID=$($LCTL list_nids | head -1)
+ mv ${MDSDEV1:-$MDSDEV} $tmp/img
+ mv $OSTDEV1 $tmp/img
- echo "OSC changes should succeed:"
- $LCTL conf_param lustre-OST0000.osc.max_dirty_mb=15 || return 7
- $LCTL conf_param lustre-OST0000.failover.node=$NID || return 8
- echo "ok."
+ version=$(sed -e 's/\(^[0-9]\+\.[0-9]\+\)\(.*$\)/\1/' $tmp/img/commit |
+ sed -e 's/\./_/g') # E.g., "1.8.7" -> "1_8"
+ dst=$(cd $dst; pwd)
+ pushd $tmp/img
+ tar cjvf $dst/disk$version-$(facet_fstype $SINGLEMDS).tar.bz2 -S *
+ popd
- echo "MDC changes should succeed:"
- $LCTL conf_param lustre-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
- $LCTL conf_param lustre-MDT0000.failover.node=$NID || return 10
- echo "ok."
+ rm -r $tmp
+}
+#run_test 32newtarball "Create a new test_32 disk image tarball for this version"
- echo "LOV changes should succeed:"
- $LCTL pool_new lustre.interop || return 11
- $LCTL conf_param lustre-MDT0000.lov.stripesize=4M || return 12
- echo "ok."
+#
+# The list of applicable tarballs is returned via the caller's
+# variable "tarballs".
+#
+t32_check() {
+ local node=$(facet_active_host $SINGLEMDS)
+ local r="do_node $node"
- cleanup_32
+ if [ "$CLIENTONLY" ]; then
+ skip "Client-only testing"
+ exit 0
+ fi
- # mount a second time to make sure we didnt leave upgrade flag on
- load_modules
- $TUNEFS --dryrun $tmpdir/mds || error "tunefs failed"
+ if ! $r which $TUNEFS; then
+ skip_env "tunefs.lustre required on $node"
+ exit 0
+ fi
- combined_mgs_mds || stop mgs
+ if [ -n "$($LCTL list_nids | grep -v '\(tcp\|lo\)[[:digit:]]*$')" ]; then
+ skip "LU-2200: Test cannot run over Infiniband"
+ exit 0
+ fi
- start32 mds1 $tmpdir/mds "-o loop,exclude=lustre-OST0000" && \
- trap cleanup_32 EXIT INT || return 12
+ local IMGTYPE=$(facet_fstype $SINGLEMDS)
- cleanup_32
+ tarballs=$($r find $RLUSTRE/tests -maxdepth 1 -name \'disk*-$IMGTYPE.tar.bz2\')
- rm -rf $tmpdir || true # true is only for TMP on NFS
+ if [ -z "$tarballs" ]; then
+ skip "No applicable tarballs found"
+ exit 0
+ fi
}
-run_test 32a "Upgrade from 1.8 (not live)"
-test_32b() {
- client_only && skip "client only testing" && return 0
- [ "$NETTYPE" = "tcp" ] || { skip "NETTYPE != tcp" && return 0; }
- [ -z "$TUNEFS" ] && skip_env "No tunefs" && return
+t32_test_cleanup() {
+ local node=$(facet_active_host $SINGLEMDS)
+ local r="do_node $node"
+ local tmp=$TMP/t32
+ local rc=$?
- local DISK1_8=$LUSTRE/tests/disk1_8.tar.bz2
- [ ! -r $DISK1_8 ] && skip_env "Cannot find $DISK1_8" && return 0
- local tmpdir=$TMP/conf32b
- mkdir -p $tmpdir
- tar xjvf $DISK1_8 -C $tmpdir || \
- { skip_env "Cannot untar $DISK1_8" && return ; }
+ if $shall_cleanup_lustre; then
+ umount $tmp/mnt/lustre || rc=$?
+ fi
+ if $shall_cleanup_mdt; then
+ $r umount -d $tmp/mnt/mdt || rc=$?
+ fi
+ if $shall_cleanup_ost; then
+ $r umount -d $tmp/mnt/ost || rc=$?
+ fi
+ $r rm -rf $tmp || rc=$?
+ rm -rf $tmp || rc=$?
+ return $rc
+}
- load_modules
- $LCTL set_param debug="+config"
- local NEWNAME=lustre
+t32_bits_per_long() {
+ #
+ # Yes, this is not meant to be perfect.
+ #
+ case $1 in
+ ppc64|x86_64)
+ echo -n 64;;
+ i*86)
+ echo -n 32;;
+ esac
+}
- # writeconf will cause servers to register with their current nids
- $TUNEFS --writeconf --erase-params \
- --param mdt.identity_upcall=$L_GETIDENTITY \
- --fsname=$NEWNAME $tmpdir/mds || error "tunefs failed"
- combined_mgs_mds || stop mgs
+t32_reload_modules() {
+ local node=$1
+ local all_removed=false
+ local i=0
+
+ while ((i < 20)); do
+ echo "Unloading modules on $node: Attempt $i"
+ do_rpc_nodes $node $LUSTRE_RMMOD $(facet_fstype $SINGLEMDS) &&
+ all_removed=true
+ do_rpc_nodes $node check_mem_leak || return 1
+ if $all_removed; then
+ load_modules
+ return 0
+ fi
+ sleep 5
+ i=$((i + 1))
+ done
+ echo "Unloading modules on $node: Given up"
+ return 1
+}
- start32 mds1 $tmpdir/mds "-o loop" && \
- trap cleanup_32 EXIT INT || return 3
+t32_wait_til_devices_gone() {
+ local node=$1
+ local devices
+ local i=0
+
+ echo wait for devices to go
+ while ((i < 20)); do
+ devices=$(do_rpc_nodes $node $LCTL device_list | wc -l)
+ echo $device
+ ((devices == 0)) && return 1
+ sleep 5
+ i=$((i + 1))
+ done
+ echo "waiting for devices on $node: Given up"
+ return 1
+}
+
+t32_test() {
+ local tarball=$1
+ local writeconf=$2
+ local shall_cleanup_mdt=false
+ local shall_cleanup_ost=false
+ local shall_cleanup_lustre=false
+ local node=$(facet_active_host $SINGLEMDS)
+ local r="do_node $node"
+ local tmp=$TMP/t32
+ local img_commit
+ local img_kernel
+ local img_arch
+ local fsname=t32fs
+ local nid=$($r $LCTL list_nids | head -1)
+ local mopts
+ local uuid
+ local nrpcs_orig
+ local nrpcs
+ local list
+
+ trap 'trap - RETURN; t32_test_cleanup' RETURN
+
+ mkdir -p $tmp/mnt/lustre
+ $r mkdir -p $tmp/mnt/{mdt,ost}
+ $r tar xjvf $tarball -S -C $tmp || {
+ error_noexit "Unpacking the disk image tarball"
+ return 1
+ }
+ img_commit=$($r cat $tmp/commit)
+ img_kernel=$($r cat $tmp/kernel)
+ img_arch=$($r cat $tmp/arch)
+ echo "Upgrading from $(basename $tarball), created with:"
+ echo " Commit: $img_commit"
+ echo " Kernel: $img_kernel"
+ echo " Arch: $img_arch"
+
+ $r $LCTL set_param debug="$PTLDEBUG"
+
+ $r $TUNEFS --dryrun $tmp/mdt || {
+ error_noexit "tunefs.lustre before mounting the MDT"
+ return 1
+ }
+ if [ "$writeconf" ]; then
+ mopts=loop,writeconf
+ else
+ mopts=loop,exclude=$fsname-OST0000
+ fi
- local UUID=$($LCTL get_param -n mdt.${NEWNAME}-MDT0000.uuid)
- echo MDS uuid $UUID
- [ "$UUID" == "${NEWNAME}-MDT0000_UUID" ] || error "UUID is wrong: $UUID"
+ t32_wait_til_devices_gone $node
- $TUNEFS --writeconf --erase-params \
- --mgsnode=$HOSTNAME --fsname=$NEWNAME $tmpdir/ost1 ||\
- error "tunefs failed"
- start32 ost1 $tmpdir/ost1 "-o loop" || return 5
- UUID=$($LCTL get_param -n obdfilter.${NEWNAME}-OST0000.uuid)
- echo OST uuid $UUID
- [ "$UUID" == "${NEWNAME}-OST0000_UUID" ] || error "UUID is wrong: $UUID"
+ $r mount -t lustre -o $mopts $tmp/mdt $tmp/mnt/mdt || {
+ error_noexit "Mounting the MDT"
+ return 1
+ }
+ shall_cleanup_mdt=true
- local NID=$($LCTL list_nids | head -1)
+ uuid=$($r $LCTL get_param -n mdt.$fsname-MDT0000.uuid) || {
+ error_noexit "Getting MDT UUID"
+ return 1
+ }
+ if [ "$uuid" != $fsname-MDT0000_UUID ]; then
+ error_noexit "Unexpected MDT UUID: \"$uuid\""
+ return 1
+ fi
- echo "OSC changes should succeed:"
- $LCTL conf_param ${NEWNAME}-OST0000.osc.max_dirty_mb=15 || return 7
- $LCTL conf_param ${NEWNAME}-OST0000.failover.node=$NID || return 8
- echo "ok."
+ $r $TUNEFS --dryrun $tmp/ost || {
+ error_noexit "tunefs.lustre before mounting the OST"
+ return 1
+ }
+ if [ "$writeconf" ]; then
+ mopts=loop,mgsnode=$nid,$writeconf
+ else
+ mopts=loop,mgsnode=$nid
+ fi
+ $r mount -t lustre -o $mopts $tmp/ost $tmp/mnt/ost || {
+ error_noexit "Mounting the OST"
+ return 1
+ }
+ shall_cleanup_ost=true
- echo "MDC changes should succeed:"
- $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=9 || return 9
- $LCTL conf_param ${NEWNAME}-MDT0000.failover.node=$NID || return 10
- echo "ok."
+ uuid=$($r $LCTL get_param -n obdfilter.$fsname-OST0000.uuid) || {
+ error_noexit "Getting OST UUID"
+ return 1
+ }
+ if [ "$uuid" != $fsname-OST0000_UUID ]; then
+ error_noexit "Unexpected OST UUID: \"$uuid\""
+ return 1
+ fi
- echo "LOV changes should succeed:"
- $LCTL pool_new ${NEWNAME}.interop || return 11
- $LCTL conf_param ${NEWNAME}-MDT0000.lov.stripesize=4M || return 12
- echo "ok."
+ $r $LCTL conf_param $fsname-OST0000.osc.max_dirty_mb=15 || {
+ error_noexit "Setting \"max_dirty_mb\""
+ return 1
+ }
+ $r $LCTL conf_param $fsname-OST0000.failover.node=$nid || {
+ error_noexit "Setting OST \"failover.node\""
+ return 1
+ }
+ $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=9 || {
+ error_noexit "Setting \"max_rpcs_in_flight\""
+ return 1
+ }
+ $r $LCTL conf_param $fsname-MDT0000.failover.node=$nid || {
+ error_noexit "Setting MDT \"failover.node\""
+ return 1
+ }
+ $r $LCTL pool_new $fsname.interop || {
+ error_noexit "Setting \"interop\""
+ return 1
+ }
+ $r $LCTL conf_param $fsname-MDT0000.lov.stripesize=4M || {
+ error_noexit "Setting \"lov.stripesize\""
+ return 1
+ }
+
+ if [ "$writeconf" ]; then
+ mount -t lustre $nid:/$fsname $tmp/mnt/lustre || {
+ error_noexit "Mounting the client"
+ return 1
+ }
+ shall_cleanup_lustre=true
+ $LCTL set_param debug="$PTLDEBUG"
+
+ if $r test -f $tmp/sha1sums; then
+ $r sort -k 2 $tmp/sha1sums >$tmp/sha1sums.orig
+ pushd $tmp/mnt/lustre
+ find ! -name .lustre -type f -exec sha1sum {} \; |
+ sort -k 2 >$tmp/sha1sums || {
+ error_noexit "sha1sum"
+ return 1
+ }
+ popd
+ if ! diff -ub $tmp/sha1sums.orig $tmp/sha1sums; then
+ error_noexit "sha1sum verification failed"
+ return 1
+ fi
+ else
+ echo "sha1sum verification skipped"
+ fi
- # MDT and OST should have registered with new nids, so we should have
- # a fully-functioning client
- echo "Check client and old fs contents"
+ if $r test -f $tmp/list; then
+ #
+ # There is not a Test Framework API to copy files to or
+ # from a remote node.
+ #
+ $r sort -k 6 $tmp/list >$tmp/list.orig
+ pushd $tmp/mnt/lustre
+ ls -Rni --time-style=+%s | sort -k 6 >$tmp/list || {
+ error_noexit "ls"
+ return 1
+ }
+ popd
+ #
+ # 32-bit and 64-bit clients use different algorithms to
+ # convert FIDs into inode numbers. Hence, remove the inode
+ # numbers from the lists, if the original list was created
+ # on an architecture with different number of bits per
+ # "long".
+ #
+ if [ $(t32_bits_per_long $(uname -m)) != \
+ $(t32_bits_per_long $img_arch) ]; then
+ echo "Different number of bits per \"long\" from the disk image"
+ for list in list.orig list; do
+ sed -i -e 's/^[0-9]\+[ \t]\+//' $tmp/$list
+ done
+ fi
+ if ! diff -ub $tmp/list.orig $tmp/list; then
+ error_noexit "list verification failed"
+ return 1
+ fi
+ else
+ echo "list verification skipped"
+ fi
- local device=`h2$NETTYPE $HOSTNAME`:/$NEWNAME
- echo "Starting local client: $HOSTNAME: $device $MOUNT"
- mount -t lustre $device $MOUNT || return 1
+ #
+ # When adding new data verification tests, please check for
+ # the presence of the required reference files first, like
+ # the "sha1sums" and "list" tests above, to avoid the need to
+ # regenerate every image for each test addition.
+ #
+
+ nrpcs_orig=$($LCTL get_param -n mdc.*.max_rpcs_in_flight) || {
+ error_noexit "Getting \"max_rpcs_in_flight\""
+ return 1
+ }
+ nrpcs=$((nrpcs_orig + 5))
+ $r $LCTL conf_param $fsname-MDT0000.mdc.max_rpcs_in_flight=$nrpcs || {
+ error_noexit "Changing \"max_rpcs_in_flight\""
+ return 1
+ }
+ wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" \
+ $nrpcs || {
+ error_noexit "Verifying \"max_rpcs_in_flight\""
+ return 1
+ }
+
+ umount $tmp/mnt/lustre || {
+ error_noexit "Unmounting the client"
+ return 1
+ }
+ shall_cleanup_lustre=false
+ else
+ $r umount -d $tmp/mnt/mdt || {
+ error_noexit "Unmounting the MDT"
+ return 1
+ }
+ shall_cleanup_mdt=false
+ $r umount -d $tmp/mnt/ost || {
+ error_noexit "Unmounting the OST"
+ return 1
+ }
+ shall_cleanup_ost=false
+
+ t32_reload_modules $node || {
+ error_noexit "Reloading modules"
+ return 1
+ }
+
+ # mount a second time to make sure we didnt leave upgrade flag on
+ $r $TUNEFS --dryrun $tmp/mdt || {
+ error_noexit "tunefs.lustre before remounting the MDT"
+ return 1
+ }
+ $r mount -t lustre -o loop,exclude=$fsname-OST0000 $tmp/mdt \
+ $tmp/mnt/mdt || {
+ error_noexit "Remounting the MDT"
+ return 1
+ }
+ shall_cleanup_mdt=true
+ fi
+}
- local old=$($LCTL get_param -n mdc.*.max_rpcs_in_flight)
- local new=$((old + 5))
- $LCTL conf_param ${NEWNAME}-MDT0000.mdc.max_rpcs_in_flight=$new
- wait_update $HOSTNAME "$LCTL get_param -n mdc.*.max_rpcs_in_flight" $new || return 11
+test_32a() {
+ local tarballs
+ local tarball
+ local rc=0
- [ "$(cksum $MOUNT/passwd | cut -d' ' -f 1,2)" == "94306271 1478" ] || return 12
- echo "ok."
+ t32_check
+ for tarball in $tarballs; do
+ t32_test $tarball || rc=$?
+ done
+ return $rc
+}
+run_test 32a "Upgrade (not live)"
- cleanup_32
+test_32b() {
+ local tarballs
+ local tarball
+ local rc=0
- rm -rf $tmpdir || true # true is only for TMP on NFS
+ t32_check
+ for tarball in $tarballs; do
+ t32_test $tarball writeconf || rc=$?
+ done
+ return $rc
}
-run_test 32b "Upgrade from 1.8 with writeconf"
+run_test 32b "Upgrade with writeconf"
test_33a() { # bug 12333, was test_33
local rc=0
local FSNAME2=test-123
local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
+ local mkfsoptions
[ -n "$ost1_HOST" ] && fs2ost_HOST=$ost1_HOST
skip_env "mixed loopback and real device not working" && return
fi
- combined_mgs_mds || mkfs_opts="$mkfs_opts --nomgs"
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2ostdev=$(ostdevname 1_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
+ local fs2ostvdev=$(ostvdevname 1_2)
+
+ if [ $(facet_fstype mds1) == ldiskfs ]; then
+ mkfsoptions="--mkfsoptions=\\\"-J size=8\\\"" # See bug 17931.
+ fi
- local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
- local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
- add fs2mds $MDS_MKFS_OPTS --mkfsoptions='\"-J size=8\"' --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
- add fs2ost $OST_MKFS_OPTS --fsname=${FSNAME2} --index=8191 --mgsnode=$MGSNID --reformat $fs2ostdev || exit 10
+ add fs2mds $(mkfs_opts mds1) --fsname=${FSNAME2} --reformat \
+ $mkfsoptions $fs2mdsdev $fs2mdsvdev || exit 10
+ add fs2ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \
+ --index=8191 --reformat $fs2ostdev $fs2ostvdev || exit 10
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS && trap cleanup_24a EXIT INT
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
umount -d $MOUNT2
stop fs2ost -f
stop fs2mds -f
- rm -rf $MOUNT2 $fs2mdsdev $fs2ostdev
cleanup_nocli || rc=6
return $rc
}
do_facet client dd if=/dev/zero of=$MOUNT/24 bs=1024k count=1
# Drop lock cancelation reply during umount
- #define OBD_FAIL_LDLM_CANCEL 0x304
+ #define OBD_FAIL_LDLM_CANCEL_NET 0x304
do_facet client lctl set_param fail_loc=0x80000304
#lctl set_param debug=-1
umount_client $MOUNT
log "Set up a fake failnode for the MDS"
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "lctl get_param -n devices" | awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs $LCTL conf_param ${device}.failover.node=$FAKENID || return 4
+ do_facet mgs "$LCTL conf_param ${device}.failover.node=" \
+ "$(h2$NETTYPE $FAKENID)" || return 4
log "Wait for RECONNECT_INTERVAL seconds (10s)"
sleep 10
FAKENID="127.0.0.2"
local device=$(do_facet $SINGLEMDS "$LCTL get_param -n devices" | \
awk '($3 ~ "mdt" && $4 ~ "MDT") { print $4 }' | head -1)
- do_facet mgs "$LCTL conf_param ${device}.failover.node=$FAKENID" || \
- return 1
+ do_facet mgs "$LCTL conf_param ${device}.failover.node=" \
+ "$(h2$NETTYPE $FAKENID)" || return 1
local at_max_saved=0
# adaptive timeouts may prevent seeing the issue
skip_env "mixed loopback and real device not working" && return
fi
- local fs2mdsdev=${fs2mds_DEV:-${MDSDEV}_2}
- local fs2ostdev=${fs2ost_DEV:-$(ostdevname 1)_2}
- local fs3ostdev=${fs3ost_DEV:-$(ostdevname 2)_2}
- add fs2mds $MDS_MKFS_OPTS --fsname=${FSNAME2} --reformat $fs2mdsdev || exit 10
- # XXX after we support non 4K disk blocksize, change following --mkfsoptions with
- # other argument
- add fs2ost $OST_MKFS_OPTS --mkfsoptions='-b4096' --fsname=${FSNAME2} --mgsnode=$MGSNID --reformat $fs2ostdev || exit 10
- add fs3ost $OST_MKFS_OPTS --mkfsoptions='-b4096' --fsname=${FSNAME2} --mgsnode=$MGSNID --reformat $fs3ostdev || exit 10
+ local fs2mdsdev=$(mdsdevname 1_2)
+ local fs2ostdev=$(ostdevname 1_2)
+ local fs3ostdev=$(ostdevname 2_2)
+ local fs2mdsvdev=$(mdsvdevname 1_2)
+ local fs2ostvdev=$(ostvdevname 1_2)
+ local fs3ostvdev=$(ostvdevname 2_2)
+
+ add fs2mds $(mkfs_opts mds1) --fsname=${FSNAME2} --reformat \
+ $fs2mdsdev $fs2mdsvdev || exit 10
+ # XXX after we support non 4K disk blocksize in ldiskfs, specify a
+ # different one than the default value here.
+ add fs2ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \
+ --reformat $fs2ostdev $fs2ostvdev || exit 10
+ add fs3ost $(mkfs_opts ost1) --mgsnode=$MGSNID --fsname=${FSNAME2} \
+ --reformat $fs3ostdev $fs3ostvdev || exit 10
start fs2mds $fs2mdsdev $MDS_MOUNT_OPTS
start fs2ost $fs2ostdev $OST_MOUNT_OPTS
stop fs3ost -f || return 200
stop fs2ost -f || return 201
stop fs2mds -f || return 202
- rm -rf $MOUNT2 $fs2mdsdev $fs2ostdev $fs3ostdev
unload_modules_conf || return 203
return $rc
}
local mntpt=$(facet_mntpt $SINGLEMDS)
local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
local mdsdev_sym="$TMP/sym_mdt.img"
+ local opts=$MDS_MOUNT_OPTS
+ local rc=0
+
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Currently only applicable to ldiskfs-based MDTs"
+ return
+ fi
echo "MDS : $mdsdev"
echo "SYMLINK : $mdsdev_sym"
echo "mount symlink device - $mdsdev_sym"
- local rc=0
- mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $MDS_MOUNT_OPTS $mdsdev_sym $mntpt 2>&1 )
+ if ! do_facet $SINGLEMDS test -b $mdsdev; then
+ opts=$(csa_add "$opts" -o loop)
+ fi
+ mount_op=$(do_facet $SINGLEMDS mount -v -t lustre $opts \
+ $mdsdev_sym $mntpt 2>&1)
rc=${PIPESTATUS[0]}
echo mount_op=$mount_op
run_test 37 "verify set tunables works for symlink device"
test_38() { # bug 14222
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
# like runtests
COUNT=10
run_test 40 "race during service thread startup"
test_41a() { #bug 14134
- echo $MDS_MOUNT_OPTS | grep "loop" && skip " loop devices does not work with nosvc option" && return
+ if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
+ skip "Loop devices does not work with nosvc option"
+ return
+ fi
local rc
local MDSDEV=$(mdsdevname ${SINGLEMDS//mds/})
run_test 41a "mount mds with --nosvc and --nomgs"
test_41b() {
- echo $MDS_MOUNT_OPTS | grep "loop" && skip " loop devices does not work with nosvc option" && return
+ if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ ! do_facet $SINGLEMDS test -b $(mdsdevname 1); then
+ skip "Loop devices does not work with nosvc option"
+ return
+ fi
! combined_mgs_mds && skip "needs combined mgs device" && return 0
run_test 42 "invalid config param should not prevent client from mounting"
test_43() {
- [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
- setup
- chmod ugo+x $DIR || error "chmod 0 failed"
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
- "0:0"
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME.mdt.nosquash_nids" \
- "NONE"
+ [ $UID -ne 0 -o $RUNAS_ID -eq 0 ] && skip_env "run as root"
+ setup
+ chmod ugo+x $DIR || error "chmod 0 failed"
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "0:0"
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME.mdt.nosquash_nids" \
+ "NONE"
#
# create set of test files
chmod go-rwx $DIR/$tdir-rootdir || error "chmod 3 failed"
touch $DIR/$tdir-rootdir/tfile-1 || error "touch failed"
- #
- # check root_squash:
- # set root squash UID:GID to RUNAS_ID
- # root should be able to access only files owned by RUNAS_ID
- #
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
- "$FSNAME.mdt.root_squash" \
- "$RUNAS_ID:$RUNAS_ID"
+ #
+ # check root_squash:
+ # set root squash UID:GID to RUNAS_ID
+ # root should be able to access only files owned by RUNAS_ID
+ #
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.root_squash" \
+ "$FSNAME.mdt.root_squash" \
+ "$RUNAS_ID:$RUNAS_ID"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-userfile)
dd if=$DIR/$tfile-userfile 1>/dev/null 2>/dev/null || \
error "$ST: root create permission is granted"
echo "$ST: root create permission is denied - ok"
- #
- # check nosquash_nids:
- # put client's NID into nosquash_nids list,
- # root should be able to access root file after that
- #
- local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
- NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
- NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
- set_and_check mds \
- "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
- "$FSNAME-MDTall.mdt.nosquash_nids" \
- "$NIDLIST"
+ #
+ # check nosquash_nids:
+ # put client's NID into nosquash_nids list,
+ # root should be able to access root file after that
+ #
+ local NIDLIST=$(lctl list_nids all | tr '\n' ' ')
+ NIDLIST="2@elan $NIDLIST 192.168.0.[2,10]@tcp"
+ NIDLIST=$(echo $NIDLIST | tr -s ' ' ' ')
+ set_conf_param_and_check mds \
+ "lctl get_param -n mdt.$FSNAME-MDT0000.nosquash_nids" \
+ "$FSNAME-MDTall.mdt.nosquash_nids" \
+ "$NIDLIST"
ST=$(stat -c "%n: owner uid %u (%A)" $DIR/$tfile-rootfile)
dd if=$DIR/$tfile-rootfile 1>/dev/null 2>/dev/null || \
# check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE
test_49() { # bug 17710
- local OLD_MDS_MKFS_OPTS=$MDS_MKFS_OPTS
- local OLD_OST_MKFS_OPTS=$OST_MKFS_OPTS
+ local timeout_orig=$TIMEOUT
+ local ldlm_timeout_orig=$LDLM_TIMEOUT
local LOCAL_TIMEOUT=20
-
- OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$LOCAL_TIMEOUT $MKFSOPT $OSTOPT"
+ LDLM_TIMEOUT=$LOCAL_TIMEOUT
+ TIMEOUT=$LOCAL_TIMEOUT
reformat
setup_noconfig
stop_ost || return 2
stop_mds || return 3
- OST_MKFS_OPTS="--ost --fsname=$FSNAME --device-size=$OSTSIZE --mgsnode=$MGSNID --param sys.timeout=$LOCAL_TIMEOUT --param sys.ldlm_timeout=$((LOCAL_TIMEOUT - 1)) $MKFSOPT $OSTOPT"
+ LDLM_TIMEOUT=$((LOCAL_TIMEOUT - 1))
reformat
setup_noconfig
cleanup || return $?
- MDS_MKFS_OPTS=$OLD_MDS_MKFS_OPTS
- OST_MKFS_OPTS=$OLD_OST_MKFS_OPTS
+ LDLM_TIMEOUT=$ldlm_timeout_orig
+ TIMEOUT=$timeout_orig
}
run_test 49 "check PARAM_SYS_LDLM_TIMEOUT option of MKFS.LUSTRE"
}
test_52() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
start_mds
[ $? -eq 0 ] || { error "Unable to start MDS"; return 1; }
start_ost
local ost1mnt=$(facet_mntpt ost1)
local ost1node=$(facet_active_host ost1)
local ost1tmp=$TMP/conf52
+ local loop
mkdir -p $DIR/$tdir
[ $? -eq 0 ] || { error "Unable to create tdir"; return 4; }
echo mount ost1 as ldiskfs
do_node $ost1node mkdir -p $ost1mnt
[ $? -eq 0 ] || { error "Unable to create $ost1mnt"; return 23; }
- do_node $ost1node mount -t $FSTYPE $ost1_dev $ost1mnt $OST_MOUNT_OPTS
+ if ! do_node $ost1node test -b $ost1_dev; then
+ loop="-o loop"
+ fi
+ do_node $ost1node mount -t $(facet_fstype ost1) $loop $ost1_dev \
+ $ost1mnt
[ $? -eq 0 ] || { error "Unable to mount ost1 as ldiskfs"; return 12; }
# backup objects
local facet=$2
local parampat=$3
local opts=$4
+ local basethr=$5
local tmin
local tmin2
local tmax
local tstarted
local paramp
local msg="Insane $modname thread counts"
+ local ncpts=$(check_cpt_number $facet)
+ local nthrs
shift 4
setup
tstarted=$(do_facet $facet "lctl get_param -n ${paramp}.threads_started" || echo 0)
lassert 23 "$msg (PDSH problems?)" '(($tstarted && $tmin && $tmax))' || return $?
lassert 24 "$msg" '(($tstarted >= $tmin && $tstarted <= $tmax ))' || return $?
+ nthrs=$(expr $tmax - $tmin)
+ if [ $nthrs -lt $ncpts ]; then
+ nthrs=0
+ else
+ nthrs=$ncpts
+ fi
+
+ [ $tmin -eq $tmax -a $tmin -eq $tstarted ] &&
+ skip_env "module parameter forced $facet thread count" &&
+ tmin=3 && tmax=$((3 * tmax))
# Check that we can change min/max
- do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + 1))"
- do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - 1))"
- tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
- tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
- lassert 25 "$msg" '(($tmin2 == ($tmin + 1) && $tmax2 == ($tmax -1)))' || return $?
+ do_facet $facet "lctl set_param ${paramp}.threads_min=$((tmin + nthrs))"
+ do_facet $facet "lctl set_param ${paramp}.threads_max=$((tmax - nthrs))"
+ tmin2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
+ tmax2=$(do_facet $facet "lctl get_param -n ${paramp}.threads_max" || echo 0)
+ lassert 25 "$msg" '(($tmin2 == ($tmin + $nthrs) && $tmax2 == ($tmax - $nthrs)))' || return $?
# Check that we can set min/max to the same value
tmin=$(do_facet $facet "lctl get_param -n ${paramp}.threads_min" || echo 0)
LOAD_MODULES_REMOTE=true
cleanup
local oldvalue
- setmodopts -a $modname "$opts" oldvalue
+ local newvalue="${opts}=$(expr $basethr \* $ncpts)"
+ setmodopts -a $modname "$newvalue" oldvalue
load_modules
setup
lassert 28 "$msg" '(($tstarted == $tmin && $tstarted == $tmax ))' || return $?
cleanup
- # Workaround a YALA bug where YALA expects that modules will remain
- # loaded on the servers
- LOAD_MODULES_REMOTE=false
load_modules
setup
cleanup
}
test_53a() {
- thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads=64'
+ thread_sanity OST ost1 'ost.*.ost' 'oss_num_threads' '16'
}
run_test 53a "check OSS thread count params"
test_53b() {
- thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads=64'
+ thread_sanity MDT $SINGLEMDS 'mdt.*.*.' 'mdt_num_threads' '16'
}
run_test 53b "check MDT thread count params"
test_54a() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
do_rpc_nodes $(facet_host ost1) run_llverdev $(ostdevname 1) -p
[ $? -eq 0 ] || error "llverdev failed!"
reformat_and_config
run_test 54a "test llverdev and partial verify of device"
test_54b() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
setup
run_llverfs $MOUNT -p
[ $? -eq 0 ] || error "llverfs failed!"
}
test_55() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
local mdsdev=$(mdsdevname 1)
- local ostdev=$(ostdevname 1)
- local saved_opts=$OST_MKFS_OPTS
+ local mdsvdev=$(mdsvdevname 1)
for i in 1023 2048
do
- OST_MKFS_OPTS="$saved_opts --index $i"
- reformat
-
+ add mds1 $(mkfs_opts mds1) --reformat $mdsdev $mdsvdev ||
+ exit 10
+ add ost1 $(mkfs_opts ost1) --index=$i --reformat \
+ $(ostdevname 1) $(ostvdevname 1)
setup_noconfig
stopall
-
setup_noconfig
sync
+
echo checking size of lov_objid for ost index $i
LOV_OBJID_SIZE=$(do_facet mds1 "$DEBUGFS -R 'stat lov_objid' $mdsdev 2>/dev/null" | grep ^User | awk '{print $6}')
if [ "$LOV_OBJID_SIZE" != $(lov_objid_size $i) ]; then
stopall
done
- OST_MKFS_OPTS=$saved_opts
reformat
}
run_test 55 "check lov_objid size"
test_56() {
- add mds1 $MDS_MKFS_OPTS --mkfsoptions='\"-J size=16\"' --reformat $(mdsdevname 1)
- add ost1 $OST_MKFS_OPTS --index=1000 --reformat $(ostdevname 1)
- add ost2 $OST_MKFS_OPTS --index=10000 --reformat $(ostdevname 2)
+ local mds_journal_size_orig=$MDSJOURNALSIZE
+
+ MDSJOURNALSIZE=16
+ add mds1 $(mkfs_opts mds1) --reformat $(mdsdevname 1) $(mdsvdevname 1)
+ add ost1 $(mkfs_opts ost1) --index=1000 --reformat \
+ $(ostdevname 1) $(ostvdevname 1)
+ add ost2 $(mkfs_opts ost2) --index=10000 --reformat \
+ $(ostdevname 2) $(ostvdevname 2)
start_mgsmds
start_ost
mount_client $MOUNT || error "Unable to mount client"
echo ok
$LFS osts
- [ -n "$ENABLE_QUOTA" ] && { $LFS quotacheck -ug $MOUNT || error "quotacheck has failed" ; }
stopall
+ MDSJOURNALSIZE=$mds_journal_size_orig
reformat
}
run_test 56 "check big indexes"
}
test_58() { # bug 22658
- [ "$FSTYPE" != "ldiskfs" ] && skip "not supported for $FSTYPE" && return
+ if [ $(facet_fstype mds) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
setup_noconfig
mkdir -p $DIR/$tdir
createmany -o $DIR/$tdir/$tfile-%d 100
run_test 59 "writeconf mount option"
test_60() { # LU-471
- add mds1 $MDS_MKFS_OPTS --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' --reformat $(mdsdevname 1)
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ add mds1 $(mkfs_opts mds1) \
+ --mkfsoptions='\" -E stride=64 -O ^uninit_bg\"' --reformat \
+ $(mdsdevname 1) $(mdsvdevname 1) || exit 10
dump=$(do_facet $SINGLEMDS dumpe2fs $(mdsdevname 1))
rc=${PIPESTATUS[0]}
run_test 60 "check mkfs.lustre --mkfsoptions -E -O options setting"
test_61() { # LU-80
- local reformat=false
-
- [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] ||
- { skip "Need MDS version at least 2.1.53"; return 0; }
-
- if ! large_xattr_enabled; then
- reformat=true
- local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
- add $SINGLEMDS $MDS_MKFS_OPTS --mkfsoptions='\"-O large_xattr\"' \
- --reformat $mds_dev || error "reformatting $mds_dev failed"
- fi
+ local reformat=false
+
+ [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.1.53) ] ||
+ { skip "Need MDS version at least 2.1.53"; return 0; }
+
+ if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ ! large_xattr_enabled; then
+ reformat=true
+ local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
+ LDISKFS_MKFS_OPTS+=" -O large_xattr"
+ add $SINGLEMDS $(mkfs_opts $SINGLEMDS) --reformat $mds_dev ||
+ error "reformatting $mds_dev failed"
+ fi
setup_noconfig || error "setting up the filesystem failed"
client_up || error "starting client failed"
rm -f $file
stopall
- $reformat && reformat
+ if $reformat; then
+ LDISKFS_MKFS_OPTS=${LDISKFS_MKFS_OPTS% -O large_xattr}
+ reformat
+ fi
}
run_test 61 "large xattr"
test_62() {
- # MRP-118
- local mdsdev=$(mdsdevname 1)
- local ostdev=$(ostdevname 1)
-
- echo "disable journal for mds"
- do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
- start_mds && error "MDT start should fail"
- echo "disable journal for ost"
- do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
- start_ost && error "OST start should fail"
- cleanup || return $?
- reformat_and_config
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ # MRP-118
+ local mdsdev=$(mdsdevname 1)
+ local ostdev=$(ostdevname 1)
+
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.2.51) ]] ||
+ { skip "Need MDS version at least 2.2.51"; return 0; }
+
+ echo "disable journal for mds"
+ do_facet mds tune2fs -O ^has_journal $mdsdev || error "tune2fs failed"
+ start_mds && error "MDT start should fail"
+ echo "disable journal for ost"
+ do_facet ost1 tune2fs -O ^has_journal $ostdev || error "tune2fs failed"
+ start_ost && error "OST start should fail"
+ cleanup || return $?
+ reformat_and_config
}
run_test 62 "start with disabled journal"
+test_63() {
+ if [ $(facet_fstype $SINGLEMDS) != ldiskfs ]; then
+ skip "Only applicable to ldiskfs-based MDTs"
+ return
+ fi
+
+ local inode_slab=$(do_facet $SINGLEMDS \
+ "awk '/ldiskfs_inode_cache/ { print \\\$5 }' /proc/slabinfo")
+ if [ -z "$inode_slab" ]; then
+ skip "ldiskfs module has not been loaded"
+ return
+ fi
+
+ echo "$inode_slab ldisk inodes per page"
+ [ "$inode_slab" -ge "3" ] ||
+ error "ldisk inode size is too big, $inode_slab objs per page"
+ return
+}
+run_test 63 "Verify each page can at least hold 3 ldisk inodes"
+
+test_64() {
+ start_mds
+ start_ost
+ start_ost2 || error "Unable to start second ost"
+ mount_client $MOUNT || error "Unable to mount client"
+ stop_ost2 || error "Unable to stop second ost"
+ echo "$LFS df"
+ $LFS df --lazy || error "lfs df failed"
+ cleanup || return $?
+}
+run_test 64 "check lfs df --lazy "
+
if ! combined_mgs_mds ; then
stop mgs
fi
cleanup_gss
-# restore the ${facet}_MKFS_OPTS variables
-for facet in MGS MDS OST; do
- opts=SAVED_${facet}_MKFS_OPTS
- if [[ -n ${!opts} ]]; then
- eval ${facet}_MKFS_OPTS=\"${!opts}\"
- fi
-done
-
-complete $(basename $0) $SECONDS
+complete $SECONDS
exit_status