export ZFS=${ZFS:-zfs}
export ZPOOL=${ZPOOL:-zpool}
export ZDB=${ZDB:-zdb}
- export PARTPROBE=${PARTPROBE:-partprobe}
#[ -d /r ] && export ROOT=${ROOT:-/r}
export TMP=${TMP:-$ROOT/tmp}
fi
export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
+ export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
# command line
grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ load_module lfsck/lfsck
[ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
modprobe zfs
local size=0
case $fstype in
ldiskfs) size=50;; # largest seen is 44, leave some headroom
- zfs) size=256;;
+ zfs) size=400;; # largest seen is 384
esac
echo -n $size
}
#
-# Get the device of a facet.
-#
-facet_device() {
- local facet=$1
- local device
-
- case $facet in
- mgs) device=$(mgsdevname) ;;
- mds*) device=$(mdsdevname $(facet_number $facet)) ;;
- ost*) device=$(ostdevname $(facet_number $facet)) ;;
- fs2mds) device=$(mdsdevname 1_2) ;;
- fs2ost) device=$(ostdevname 1_2) ;;
- fs3ost) device=$(ostdevname 2_2) ;;
- *) ;;
- esac
-
- echo -n $device
-}
-
-#
-# Get the virtual device of a facet.
-#
-facet_vdevice() {
- local facet=$1
- local device
-
- case $facet in
- mgs) device=$(mgsvdevname) ;;
- mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
- ost*) device=$(ostvdevname $(facet_number $facet)) ;;
- fs2mds) device=$(mdsvdevname 1_2) ;;
- fs2ost) device=$(ostvdevname 1_2) ;;
- fs3ost) device=$(ostvdevname 2_2) ;;
- *) ;;
- esac
-
- echo -n $device
-}
-
-#
-# Re-read the partition table on failover partner host.
-# After a ZFS storage pool is created on a shared device, the partition table
-# on the device may change. However, the operating system on the failover
-# host may not notice the change automatically. Without the up-to-date partition
-# block devices, 'zpool import ..' cannot find the labels, whose positions are
-# relative to partition rather than disk beginnings.
-#
-# This function performs partprobe on the failover host to make it re-read the
-# partition table.
-#
-refresh_partition_table() {
- local facet=$1
- local device=$2
- local host
-
- host=$(facet_passive_host $facet)
- if [[ -n "$host" ]]; then
- do_node $host "$PARTPROBE $device"
- fi
-}
-
-#
-# Get ZFS storage pool name.
-#
-zpool_name() {
- local facet=$1
- local device
- local poolname
-
- device=$(facet_device $facet)
- # poolname is string before "/"
- poolname="${device%%/*}"
-
- echo -n $poolname
-}
-
-#
-# Export ZFS storage pool.
-# Before exporting the pool, all datasets within the pool should be unmounted.
-#
-export_zpool() {
- local facet=$1
- shift
- local opts="$@"
- local poolname
-
- poolname=$(zpool_name $facet)
-
- if [[ -n "$poolname" ]]; then
- do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
- $ZPOOL export $opts $poolname"
- fi
-}
-
-#
-# Import ZFS storage pool.
-# Force importing, even if the pool appears to be potentially active.
-#
-import_zpool() {
- local facet=$1
- shift
- local opts=${@:-"-o cachefile=none"}
- local poolname
-
- poolname=$(zpool_name $facet)
-
- if [[ -n "$poolname" ]]; then
- do_facet $facet "$ZPOOL import -f $opts $poolname"
- fi
-}
-
-#
-# Set the "cachefile=none" property on ZFS storage pool so that the pool
-# is not automatically imported on system startup.
-#
-# In a failover environment, this will provide resource level fencing which
-# will ensure that the same ZFS storage pool will not be imported concurrently
-# on different nodes.
-#
-disable_zpool_cache() {
- local facet=$1
- local poolname
-
- poolname=$(zpool_name $facet)
-
- if [[ -n "$poolname" ]]; then
- do_facet $facet "$ZPOOL set cachefile=none $poolname"
- fi
-}
-
-#
# This and set_osd_param() shall be used to access OSD parameters
# once existed under "obdfilter":
#
opts=$(csa_add "$opts" -o loop)
fi
- if [[ $(facet_fstype $facet) == zfs ]]; then
- # import ZFS storage pool
- import_zpool $facet || return ${PIPESTATUS[0]}
- fi
-
echo "Starting ${facet}: $opts ${!dev} $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
return $RC
}
+#
+# When a ZFS OSD is made read-only by replay_barrier(), its pool is "freezed".
+# Because stopping corresponding target may not clear this in-memory state, we
+# need to zap the pool from memory by exporting and reimporting the pool.
+#
+# Although the uberblocks are not updated when a pool is freezed, transactions
+# are still written to the disks. Modified blocks may be cached in memory when
+# tests try reading them back. The export-and-reimport process also evicts any
+# cached pool data from memory to provide the correct "data loss" semantics.
+#
+refresh_disk() {
+ local facet=$1
+ local fstype=$(facet_fstype $facet)
+ local _dev
+ local dev
+ local poolname
+
+ if [ "${fstype}" == "zfs" ]; then
+ _dev=$(facet_active $facet)_dev
+ dev=${!_dev} # expand _dev to its value, e.g. ${mds1_dev}
+ poolname="${dev%%/*}" # poolname is string before "/"
+
+ if [ "${poolname}" == "" ]; then
+ echo "invalid dataset name: $dev"
+ return
+ fi
+ do_facet $facet "cp /etc/zfs/zpool.cache /tmp/zpool.cache.back"
+ do_facet $facet "$ZPOOL export ${poolname}"
+ do_facet $facet "$ZPOOL import -f -c /tmp/zpool.cache.back \
+ ${poolname}"
+ fi
+}
+
stop() {
local running
local facet=$1
do_facet ${facet} umount -d $@ $mntpt
fi
- # umount should block, but we should wait for unrelated obd's
- # like the MGS or MGC to also stop.
- wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
-
- if [[ $(facet_fstype $facet) == zfs ]]; then
- # export ZFS storage pool
- export_zpool $facet
- fi
+ # umount should block, but we should wait for unrelated obd's
+ # like the MGS or MGC to also stop.
+ wait_exit_ST ${facet}
}
# save quota version (both administrative and operational quotas)
if [ "$FAILURE_MODE" = HARD ]; then
reboot_node $(facet_active_host $facet)
else
+ refresh_disk ${facet}
sleep 10
fi
}
do_facet $facet "sync; sync; sync"
df $MOUNT
- # make sure there will be no seq change
+ # make sure there will be no seq change
local clients=${CLIENTS:-$HOSTNAME}
local f=fsa-\\\$\(hostname\)
do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
local svc=${facet}_svc
do_facet $facet $LCTL --device ${!svc} notransno
- #
- # If a ZFS OSD is made read-only here, its pool is "freezed". This
- # in-memory state has to be cleared by either rebooting the host or
- # exporting and reimporting the pool.
- #
- # Although the uberblocks are not updated when a pool is freezed,
- # transactions are still written to the disks. Modified blocks may be
- # cached in memory when tests try reading them back. The
- # export-and-reimport process also evicts any cached pool data from
- # memory to provide the correct "data loss" semantics.
- #
- # In the test framework, the exporting and importing operations are
- # handled by stop() and mount_facet() separately, which are used
- # inside fail() and fail_abort().
- #
do_facet $facet $LCTL --device ${!svc} readonly
do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
$LCTL mark "local REPLAY BARRIER on ${!svc}"
fail_abort() {
local facet=$1
stop $facet
+ refresh_disk ${facet}
change_active $facet
wait_for_facet $facet
mount_facet $facet -o abort_recovery
}
add() {
- local facet=$1
- shift
- # make sure its not already running
- stop ${facet} -f
- rm -f $TMP/${facet}active
- [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
- do_facet ${facet} $MKFS $* || return ${PIPESTATUS[0]}
-
- if [[ $(facet_fstype $facet) == zfs ]]; then
- #
- # After formatting a ZFS target, "cachefile=none" property will
- # be set on the ZFS storage pool so that the pool is not
- # automatically imported on system startup. And then the pool
- # will be exported so as to leave the importing and exporting
- # operations handled by mount_facet() and stop() separately.
- #
- refresh_partition_table $facet $(facet_vdevice $facet)
- disable_zpool_cache $facet
- export_zpool $facet
- fi
+ local facet=$1
+ shift
+ # make sure its not already running
+ stop ${facet} -f
+ rm -f $TMP/${facet}active
+ [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
+ do_facet ${facet} $MKFS $*
}
ostdevname() {
set_default_debug_nodes $(comma_list $(nodes_list))
fi
- if [ -n "$OSD_TRACK_DECLARES_LBUG" ] ; then
- do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
- "$LCTL set_param osd-*.track_declares_assert=1" \
- > /dev/null
+ if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ local facets="$(get_facets OST),$(get_facets MDS),mgs"
+ local nodes="$(facets_hosts ${facets})"
+ if [ -n "$nodes" ] ; then
+ do_nodes $nodes "$LCTL set_param \
+ osd-ldiskfs.track_declares_assert=1 || true"
+ fi
fi
init_gss
#Remove objects from OST
remove_ost_objects() {
- shift
- local ostdev=$1
- local group=$2
- shift 2
+ local facet=$1
+ local ostdev=$2
+ local group=$3
+ shift 3
local objids="$@"
- local facet=ost$((OSTIDX + 1))
local mntpt=$(facet_mntpt $facet)
local opts=$OST_MOUNT_OPTS
local i
local rc
echo "removing objects from $ostdev on $facet: $objids"
- if ! do_facet $facet test -b $ostdev; then
+ if ! test -b $ostdev; then
opts=$(csa_add "$opts" -o loop)
fi
mount -t $(facet_fstype $facet) $opts $ostdev $mntpt ||
echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log"
}
-# mkdir directory on different MDTs
+# make directory on different MDTs
test_mkdir() {
local option
local parent
local child
local path
- local dir
local rc=0
- if [ $# -eq 2 ]; then
- option=$1
- path=$2
- else
- path=$1
- fi
-
- child=${path##*/}
- parent=${path%/*}
+ case $# in
+ 1) path=$1;;
+ 2) option=$1
+ path=$2;;
+ *) error "Only creating single directory is supported";;
+ esac
- if [ "$parent" == "$child" ]; then
- parent=$(pwd)
- fi
+ child=$(basename $path)
+ parent=$(dirname $path)
- if [ "$option" == "-p" -a -d ${parent}/${child} ]; then
+ if [ "$option" == "-p" -a -d $parent/$child ]; then
return $rc
fi
- # it needs to check whether there is further / in child
- dir=$(echo $child | awk -F '/' '{print $2}')
- if [ ! -z "$dir" ]; then
- local subparent=$(echo $child | awk -F '/' '{ print $1 }')
- parent=${parent}"/"${subparent}
- child=$dir
- fi
-
if [ ! -d ${parent} ]; then
if [ "$option" == "-p" ]; then
mkdir -p ${parent}
fi
if [ $MDSCOUNT -le 1 ]; then
- mkdir $option ${parent}/${child} || rc=$?
+ mkdir $option $parent/$child || rc=$?
else
local mdt_idx=$($LFS getstripe -M $parent)
+ local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
if [ "$mdt_idx" -ne 0 ]; then
- mkdir $option ${parent}/${child} || rc=$?
- return $rc
+ mkdir $option $parent/$child || rc=$?
+ else
+ mdt_idx=$((test_num % MDSCOUNT))
+ echo "mkdir $mdt_idx for $parent/$child"
+ $LFS setdirstripe -i $mdt_idx $parent/$child || rc=$?
fi
-
- local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
- local mdt_idx=$((test_num % MDSCOUNT))
- echo "mkdir $mdt_idx for ${parent}/${child}"
- $LFS setdirstripe -i $mdt_idx ${parent}/${child} || rc=$?
fi
return $rc
}