set -e
#set -x
+export LANG=en_US
export EJOURNAL=${EJOURNAL:-""}
export REFORMAT=${REFORMAT:-""}
export WRITECONF=${WRITECONF:-""}
export GSS_PIPEFS=false
export IDENTITY_UPCALL=default
export QUOTA_AUTO=1
-export JOBSTATS_AUTO=${JOBSTATS_AUTO:-1}
-export JOBID_VAR=${JOBID_VAR:-"procname_uid"}
+# specify environment variable containing batch job name for server statistics
+export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
# LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or
# LOAD_LLOOP is true. LOAD_LLOOP is false by default.
}
init_test_env() {
- export LUSTRE=`absolute_path $LUSTRE`
- export TESTSUITE=`basename $0 .sh`
- export TEST_FAILED=false
- export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
+ export LUSTRE=$(absolute_path $LUSTRE)
+ export TESTSUITE=$(basename $0 .sh)
+ export TEST_FAILED=false
+ export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
+ export RPC_MODE=${RPC_MODE:-false}
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
fi
fi
+ export RESIZE2FS=$RESIZE2FS
+ if [ -z "$RESIZE2FS" ]; then
+ if which resizefs.ldiskfs >/dev/null 2>&1; then
+ export RESIZE2FS=resizefs.ldiskfs
+ else
+ export RESIZE2FS=resize2fs
+ fi
+ fi
+
export LFSCK_BIN=${LFSCK_BIN:-lfsck}
export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite
export FSCK_MAX_ERR=4 # File system errors left uncorrected
export ZFS=${ZFS:-zfs}
export ZPOOL=${ZPOOL:-zpool}
export ZDB=${ZDB:-zdb}
+ export PARTPROBE=${PARTPROBE:-partprobe}
#[ -d /r ] && export ROOT=${ROOT:-/r}
export TMP=${TMP:-$ROOT/tmp}
IDENTITY_UPCALL=false
;;
esac
- USE_OFD=${USE_OFD:-yes}
- [ "$USE_OFD" = "yes" ] && LOAD_MODULES_REMOTE=true
export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
export RLUSTRE=${RLUSTRE:-$LUSTRE}
export RPWD=${RPWD:-$PWD}
export I_MOUNTED=${I_MOUNTED:-"no"}
- if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
- ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
- ! -f `dirname $0`/../mdt/mdt.ko ]; then
- export CLIENTMODSONLY=yes
- fi
+ if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
+ ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
+ ! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \
+ ! -f $LUSTRE/mdt/mdt.ko ]; then
+ export CLIENTMODSONLY=yes
+ fi
- export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
+ export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
+ export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
# command line
shift $((OPTIND - 1))
ONLY=${ONLY:-$*}
- # print the durations of each test if "true"
- DDETAILS=${DDETAILS:-false}
- [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
- rm -f $TMP/*active
+ # print the durations of each test if "true"
+ DDETAILS=${DDETAILS:-false}
+ [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
+ if ! $RPC_MODE; then
+ rm -f $TMP/*active
+ fi
}
check_cpt_number() {
# split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3"
eval set -- $(tr "[:punct:]" " " <<< $*)
- echo -n $((($1 << 16) | ($2 << 8) | $3))
+ echo -n "$((($1 << 16) | ($2 << 8) | $3))"
}
export LINUX_VERSION=$(uname -r | sed -e "s/[-.]/ /3" -e "s/ .*//")
optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
eval set -- \$$optvar
if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
- # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
- set -- $(grep -P "^options\\s+${BASE}" $MODPROBECONF)
- # Get rid of "options $module"
- (($# > 0)) && shift 2
-
- # Ensure we have accept=all for lnet
- if [ $(basename $module) = lnet ]; then
- # OK, this is a bit wordy...
- local arg accept_all_present=false
- for arg in "$@"; do
- [ "$arg" = accept=all ] && accept_all_present=true
- done
- $accept_all_present || set -- "$@" accept=all
- fi
+ # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
+ local opt
+ opt=$(awk -v var="^options $BASE" '$0 ~ var \
+ {gsub("'"options $BASE"'",""); print}' $MODPROBECONF)
+ set -- $(echo -n $opt)
+
+ # Ensure we have accept=all for lnet
+ if [ $(basename $module) = lnet ]; then
+ # OK, this is a bit wordy...
+ local arg accept_all_present=false
+
+ for arg in "$@"; do
+ [ "$arg" = accept=all ] && \
+ accept_all_present=true
+ done
+ $accept_all_present || set -- "$@" accept=all
+ fi
+ export $optvar="$*"
fi
fi
}
load_modules_local() {
- [ $(facet_fstype ost1) == "zfs" ] && export USE_OFD=yes
-
if [ -n "$MODPROBE" ]; then
# use modprobe
echo "Using modprobe to load modules"
load_module ../lnet/lnet/lnet
LNETLND=${LNETLND:-"socklnd/ksocklnd"}
load_module ../lnet/klnds/$LNETLND
- load_module lvfs/lvfs
load_module obdclass/obdclass
load_module ptlrpc/ptlrpc
load_module ptlrpc/gss/ptlrpc_gss
grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ load_module lfsck/lfsck
[ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
modprobe zfs
load_module osd-zfs/osd_zfs
fi
- load_module mgs/mgs
- load_module mdd/mdd
if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
- #
- # This block shall be moved up beside osd-zfs as soon
- # as osd-ldiskfs stops using mdd symbols.
- #
grep -q exportfs_decode_fh $SYMLIST ||
{ modprobe exportfs 2> /dev/null || true; }
- load_module ../ldiskfs/ldiskfs/ldiskfs
- load_module lvfs/fsfilt_ldiskfs
+ load_module ../ldiskfs/ldiskfs
load_module osd-ldiskfs/osd_ldiskfs
fi
+ load_module nodemap/nodemap
+ load_module mgs/mgs
+ load_module mdd/mdd
load_module mdt/mdt
load_module ost/ost
load_module lod/lod
return 0
}
+fs_log_size() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=50;; # largest seen is 44, leave some headroom
+ zfs) size=400;; # largest seen is 384
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
fi
}
+facet_svc() {
+ local facet=$1
+ local var=${facet}_svc
+
+ echo -n ${!var}
+}
+
facet_type() {
local facet=$1
- echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9]\+//' |
+ echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9_]\+//' |
tr '[:lower:]' '[:upper:]'
}
return
fi
+ if [[ $facet == mgs ]] && combined_mgs_mds; then
+ facet_fstype mds1
+ return
+ fi
+
return 1
}
}
#
-# This and set_obdfilter_param() shall be used to access OSD parameters
+# Get the device of a facet.
+#
+facet_device() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsdevname) ;;
+ mds*) device=$(mdsdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsdevname 1_2) ;;
+ fs2ost) device=$(ostdevname 1_2) ;;
+ fs3ost) device=$(ostdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Get the virtual device of a facet.
+#
+facet_vdevice() {
+ local facet=$1
+ local device
+
+ case $facet in
+ mgs) device=$(mgsvdevname) ;;
+ mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
+ ost*) device=$(ostvdevname $(facet_number $facet)) ;;
+ fs2mds) device=$(mdsvdevname 1_2) ;;
+ fs2ost) device=$(ostvdevname 1_2) ;;
+ fs3ost) device=$(ostvdevname 2_2) ;;
+ *) ;;
+ esac
+
+ echo -n $device
+}
+
+#
+# Re-read the partition table on failover partner host.
+# After a ZFS storage pool is created on a shared device, the partition table
+# on the device may change. However, the operating system on the failover
+# host may not notice the change automatically. Without the up-to-date partition
+# block devices, 'zpool import ..' cannot find the labels, whose positions are
+# relative to partition rather than disk beginnings.
+#
+# This function performs partprobe on the failover host to make it re-read the
+# partition table.
+#
+refresh_partition_table() {
+ local facet=$1
+ local device=$2
+ local host
+
+ host=$(facet_passive_host $facet)
+ if [[ -n "$host" ]]; then
+ do_node $host "$PARTPROBE $device"
+ fi
+}
+
+#
+# Get ZFS storage pool name.
+#
+zpool_name() {
+ local facet=$1
+ local device
+ local poolname
+
+ device=$(facet_device $facet)
+ # poolname is string before "/"
+ poolname="${device%%/*}"
+
+ echo -n $poolname
+}
+
+#
+# Create ZFS storage pool.
+#
+create_zpool() {
+ local facet=$1
+ local poolname=$2
+ local vdev=$3
+ shift 3
+ local opts=${@:-"-o cachefile=none"}
+
+ do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL create -f $opts $poolname $vdev"
+}
+
+#
+# Create ZFS file system.
+#
+create_zfs() {
+ local facet=$1
+ local dataset=$2
+ shift 2
+ local opts=${@:-"-o mountpoint=legacy"}
+
+ do_facet $facet "$ZFS list -H $dataset >/dev/null 2>&1 ||
+ $ZFS create $opts $dataset"
+}
+
+#
+# Export ZFS storage pool.
+# Before exporting the pool, all datasets within the pool should be unmounted.
+#
+export_zpool() {
+ local facet=$1
+ shift
+ local opts="$@"
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ grep -q ^$poolname/ /proc/mounts ||
+ $ZPOOL export $opts $poolname"
+ fi
+}
+
+#
+# Destroy ZFS storage pool.
+# Destroy the given pool and free up any devices for other use. This command
+# tries to unmount any active datasets before destroying the pool.
+# -f Force any active datasets contained within the pool to be unmounted.
+#
+destroy_zpool() {
+ local facet=$1
+ local poolname=${2:-$(zpool_name $facet)}
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL destroy -f $poolname"
+ fi
+}
+
+#
+# Import ZFS storage pool.
+# Force importing, even if the pool appears to be potentially active.
+#
+import_zpool() {
+ local facet=$1
+ shift
+ local opts=${@:-"-o cachefile=none"}
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ opts+=" -d $(dirname $(facet_vdevice $facet))"
+ do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ $ZPOOL import -f $opts $poolname"
+ fi
+}
+
+#
+# Set the "cachefile=none" property on ZFS storage pool so that the pool
+# is not automatically imported on system startup.
+#
+# In a failover environment, this will provide resource level fencing which
+# will ensure that the same ZFS storage pool will not be imported concurrently
+# on different nodes.
+#
+disable_zpool_cache() {
+ local facet=$1
+ local poolname
+
+ poolname=$(zpool_name $facet)
+
+ if [[ -n "$poolname" ]]; then
+ do_facet $facet "$ZPOOL set cachefile=none $poolname"
+ fi
+}
+
+#
+# This and set_osd_param() shall be used to access OSD parameters
# once existed under "obdfilter":
#
# mntdev
# read_cache_enable
# writethrough_cache_enable
#
-get_obdfilter_param() {
+get_osd_param() {
local nodes=$1
local device=${2:-$FSNAME-OST*}
local name=$3
osd-*.$device.$name 2>&1" | grep -v 'Found no match'
}
-set_obdfilter_param() {
+set_osd_param() {
local nodes=$1
local device=${2:-$FSNAME-OST*}
local name=$3
opts=$(csa_add "$opts" -o loop)
fi
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ # import ZFS storage pool
+ import_zpool $facet || return ${PIPESTATUS[0]}
+ fi
+
echo "Starting ${facet}: $opts ${!dev} $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
return $RC
}
-#
-# When a ZFS OSD is made read-only by replay_barrier(), its pool is "freezed".
-# Because stopping corresponding target may not clear this in-memory state, we
-# need to zap the pool from memory by exporting and reimporting the pool.
-#
-# Although the uberblocks are not updated when a pool is freezed, transactions
-# are still written to the disks. Modified blocks may be cached in memory when
-# tests try reading them back. The export-and-reimport process also evicts any
-# cached pool data from memory to provide the correct "data loss" semantics.
-#
-refresh_disk() {
- local facet=$1
- local fstype=$(facet_fstype $facet)
- local _dev
- local dev
- local poolname
-
- if [ "${fstype}" == "zfs" ]; then
- _dev=$(facet_active $facet)_dev
- dev=${!_dev} # expand _dev to its value, e.g. ${mds1_dev}
- poolname="${dev%%/*}" # poolname is string before "/"
-
- if [ "${poolname}" == "" ]; then
- echo "invalid dataset name: $dev"
- return
- fi
- do_facet $facet "cp /etc/zfs/zpool.cache /tmp/zpool.cache.back"
- do_facet $facet "$ZPOOL export ${poolname}"
- do_facet $facet "$ZPOOL import -f -c /tmp/zpool.cache.back \
- ${poolname}"
- fi
-}
-
stop() {
local running
local facet=$1
do_facet ${facet} umount -d $@ $mntpt
fi
- # umount should block, but we should wait for unrelated obd's
- # like the MGS or MGC to also stop.
- wait_exit_ST ${facet}
+ # umount should block, but we should wait for unrelated obd's
+ # like the MGS or MGC to also stop.
+ wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
+
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ # export ZFS storage pool
+ export_zpool $facet
+ fi
}
# save quota version (both administrative and operational quotas)
# XXX This function is kept for interoperability with old server (< 2.3.50),
# it should be removed whenever we drop the interoperability for such
# server.
-quota_type () {
- local fsname=${1:-$FSNAME}
- local rc=0
- do_facet mgs lctl get_param mdd.${fsname}-MDT*.quota_type || rc=$?
- do_nodes $(comma_list $(osts_nodes)) \
- lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
- return $rc
+quota_type() {
+ local fsname=${1:-$FSNAME}
+ local rc=0
+ do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type ||
+ rc=$?
+ do_nodes $(comma_list $(osts_nodes)) \
+ lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
+ return $rc
}
# XXX This function is kept for interoperability with old server (< 2.3.50),
local quota_usrs=$QUOTA_USERS
# get_filesystem_size
- local disksz=$(lfs df $mntpt | grep "filesystem summary:" | awk '{print $3}')
+ local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
local blk_soft=$((disksz + 1024))
local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
- local Inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | awk '{print $3}')
+ local Inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
local i_soft=$Inodes
local i_hard=$((i_soft + i_soft / 20))
fi
}
+# Handle the case when there is a space in the lfs df
+# "filesystem summary" line the same as when there is no space.
+# This will allow fixing the "lfs df" summary line in the future.
+lfs_df() {
+ $LFS df $* | sed -e 's/filesystem /filesystem_/'
+}
+
+# Get free inodes on the MDT specified by mdt index, free indoes on
+# the whole filesystem will be returned when index == -1.
+mdt_free_inodes() {
+ local index=$1
+ local free_inodes
+ local mdt_uuid
+
+ if [ $index -eq -1 ]; then
+ mdt_uuid="summary"
+ else
+ mdt_uuid=$(mdtuuid_from_index $index)
+ fi
+
+ free_inodes=$(lfs_df -i $MOUNT | grep $mdt_uuid | awk '{print $4}')
+ echo $free_inodes
+}
+
setup_quota(){
if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
setup_quota_old $1
local quota_usrs=$QUOTA_USERS
# get_filesystem_size
- local disksz=$(lfs df $mntpt | grep "filesystem summary:" |
- awk '{print $3}')
+ local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
local blk_soft=$((disksz + 1024))
local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
- local inodes=$(lfs df -i $mntpt | grep "filesystem summary:" |
- awk '{print $3}')
+ local inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
local i_soft=$inodes
local i_hard=$((i_soft + i_soft / 20))
if [ "$FAILURE_MODE" = HARD ]; then
reboot_node $(facet_active_host $facet)
else
- refresh_disk ${facet}
sleep 10
fi
}
TESTNAME=$TESTNAME \
DBENCH_LIB=$DBENCH_LIB \
DBENCH_SRC=$DBENCH_SRC \
+CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
+LFS=$LFS \
run_${load}.sh" &
local ppid=$!
log "Started client load: ${load} on $client"
}
wait_update () {
- local node=$1
- local TEST=$2
- local FINAL=$3
- local MAX=${4:-90}
-
- local RESULT
- local WAIT=0
- local sleep=1
- local print=10
- while [ true ]; do
- RESULT=$(do_node $node "$TEST")
- if [ "$RESULT" == "$FINAL" ]; then
- [ -z "$RESULT" -o $WAIT -le $sleep ] ||
- echo "Updated after ${WAIT}s: wanted '$FINAL' got '$RESULT'"
- return 0
- fi
- [ $WAIT -ge $MAX ] && break
- [ $((WAIT % print)) -eq 0 ] &&
- echo "Waiting $((MAX - WAIT)) secs for update"
- WAIT=$((WAIT + sleep))
- sleep $sleep
- done
- echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
- return 3
+ local verbose=false
+ if [[ "$1" == "--verbose" ]]; then
+ shift
+ verbose=true
+ fi
+
+ local node=$1
+ local TEST=$2
+ local FINAL=$3
+ local MAX=${4:-90}
+ local RESULT
+ local PREV_RESULT
+ local WAIT=0
+ local sleep=1
+ local print=10
+
+ PREV_RESULT=$(do_node $node "$TEST")
+ while [ true ]; do
+ RESULT=$(do_node $node "$TEST")
+ if [[ "$RESULT" == "$FINAL" ]]; then
+ [[ -z "$RESULT" || $WAIT -le $sleep ]] ||
+ echo "Updated after ${WAIT}s: wanted '$FINAL'"\
+ "got '$RESULT'"
+ return 0
+ fi
+ if [[ $verbose && "$RESULT" != "$PREV_RESULT" ]]; then
+ echo "Changed after ${WAIT}s: from '$PREV_RESULT'"\
+ "to '$RESULT'"
+ PREV_RESULT=$RESULT
+ fi
+ [[ $WAIT -ge $MAX ]] && break
+ [[ $((WAIT % print)) -eq 0 ]] &&
+ echo "Waiting $((MAX - WAIT)) secs for update"
+ WAIT=$((WAIT + sleep))
+ sleep $sleep
+ done
+ echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
+ return 3
}
wait_update_facet() {
}
sync_all_data() {
- do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "lctl set_param -n osd*.*MDT*.force_sync 1"
+ do_nodes $(comma_list $(osts_nodes)) \
+ "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
grep -v 'Found no match'
}
}
wait_mds_ost_sync () {
- # just because recovery is done doesn't mean we've finished
- # orphan cleanup. Wait for llogs to get synchronized.
- echo "Waiting for orphan cleanup..."
- # MAX value includes time needed for MDS-OST reconnection
- local MAX=$(( TIMEOUT * 2 ))
- local WAIT=0
- while [ $WAIT -lt $MAX ]; do
- local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param -n obdfilter.*.mds_sync"))
- local con=1
- local i
- for ((i=0; i<${#sync[@]}; i++)); do
- [ ${sync[$i]} -eq 0 ] && continue
- # there is a not finished MDS-OST synchronization
- con=0
- break;
- done
- sleep 2 # increase waiting time and cover statfs cache
- [ ${con} -eq 1 ] && return 0
- echo "Waiting $WAIT secs for $facet mds-ost sync done."
- WAIT=$((WAIT + 2))
- done
- echo "$facet recovery not done in $MAX sec. $STATUS"
- return 1
+ # just because recovery is done doesn't mean we've finished
+ # orphan cleanup. Wait for llogs to get synchronized.
+ echo "Waiting for orphan cleanup..."
+ # MAX value includes time needed for MDS-OST reconnection
+ local MAX=$(( TIMEOUT * 2 ))
+ local WAIT_TIMEOUT=${1:-$MAX}
+ local WAIT=0
+ local new_wait=true
+ local list=$(comma_list $(mdts_nodes))
+ local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed"
+ if ! do_facet $SINGLEMDS \
+ "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null"
+ then
+ # old way, use mds_sync
+ new_wait=false
+ list=$(comma_list $(osts_nodes))
+ cmd="$LCTL get_param -n obdfilter.*.mds_sync"
+ fi
+
+ echo "wait $WAIT_TIMEOUT secs maximumly for $list mds-ost sync done."
+ while [ $WAIT -lt $WAIT_TIMEOUT ]; do
+ local -a sync=($(do_nodes $list "$cmd"))
+ local con=1
+ local i
+ for ((i=0; i<${#sync[@]}; i++)); do
+ if $new_wait; then
+ [ ${sync[$i]} -eq 1 ] && continue
+ else
+ [ ${sync[$i]} -eq 0 ] && continue
+ fi
+ # there is a not finished MDS-OST synchronization
+ con=0
+ break;
+ done
+ sleep 2 # increase waiting time and cover statfs cache
+ [ ${con} -eq 1 ] && return 0
+ echo "Waiting $WAIT secs for $list $i mds-ost sync done."
+ WAIT=$((WAIT + 2))
+ done
+
+ # show which nodes are not finished.
+ do_nodes $list "$cmd"
+ echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS"
+ return 1
}
wait_destroy_complete () {
}
facet_failover() {
- local facet=$1
- local sleep_time=$2
- local host=$(facet_active_host $facet)
-
- echo "Failing $facet on node $host"
-
- local affected=$(affected_facets $facet)
+ local facets=$1
+ local sleep_time=$2
+ local -a affecteds
+ local facet
+ local total=0
+ local index=0
+ local skip
+
+ #Because it will only get up facets, we need get affected
+ #facets before shutdown
+ #For HARD Failure mode, it needs make sure facets on the same
+ #HOST will only be shutdown and reboot once
+ for facet in ${facets//,/ }; do
+ local affected_facet
+ skip=0
+ #check whether facet has been included in other affected facets
+ for ((index=0; index<$total; index++)); do
+ [[ *,$facet,* == ,${affecteds[index]}, ]] && skip=1
+ done
- shutdown_facet $facet
+ if [ $skip -eq 0 ]; then
+ affecteds[$total]=$(affected_facets $facet)
+ total=$((total+1))
+ fi
+ done
- echo affected facets: $affected
+ for ((index=0; index<$total; index++)); do
+ facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
+ local host=$(facet_active_host $facet)
+ echo "Failing ${affecteds[index]} on $host"
+ shutdown_facet $facet
+ done
- [ -n "$sleep_time" ] && sleep $sleep_time
+ for ((index=0; index<$total; index++)); do
+ facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
+ echo reboot facets: ${affecteds[index]}
- reboot_facet $facet
+ reboot_facet $facet
- change_active $affected
+ change_active ${affecteds[index]}
- wait_for_facet $affected
- # start mgs first if it is affected
- if ! combined_mgs_mds && list_member $affected mgs; then
- mount_facet mgs || error "Restart of mgs failed"
- fi
- # FIXME; has to be changed to mount all facets concurrently
- affected=$(exclude_items_from_list $affected mgs)
- mount_facets $affected
+ wait_for_facet ${affecteds[index]}
+ # start mgs first if it is affected
+ if ! combined_mgs_mds &&
+ list_member ${affecteds[index]} mgs; then
+ mount_facet mgs || error "Restart of mgs failed"
+ fi
+ # FIXME; has to be changed to mount all facets concurrently
+ affected=$(exclude_items_from_list ${affecteds[index]} mgs)
+ echo mount facets: ${affecteds[index]}
+ mount_facets ${affecteds[index]}
+ done
}
obd_name() {
}
replay_barrier() {
- local facet=$1
- do_facet $facet "sync; sync; sync"
- df $MOUNT
-
- # make sure there will be no seq change
- local clients=${CLIENTS:-$HOSTNAME}
- local f=fsa-\\\$\(hostname\)
- do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
- do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi"
+ local facet=$1
+ do_facet $facet "sync; sync; sync"
+ df $MOUNT
- local svc=${facet}_svc
- do_facet $facet $LCTL --device %${!svc} notransno
- do_facet $facet $LCTL --device %${!svc} readonly
- do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
- $LCTL mark "local REPLAY BARRIER on ${!svc}"
+ # make sure there will be no seq change
+ local clients=${CLIENTS:-$HOSTNAME}
+ local f=fsa-\\\$\(hostname\)
+ do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
+ do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi"
+
+ local svc=${facet}_svc
+ do_facet $facet $LCTL --device ${!svc} notransno
+ #
+ # If a ZFS OSD is made read-only here, its pool is "freezed". This
+ # in-memory state has to be cleared by either rebooting the host or
+ # exporting and reimporting the pool.
+ #
+ # Although the uberblocks are not updated when a pool is freezed,
+ # transactions are still written to the disks. Modified blocks may be
+ # cached in memory when tests try reading them back. The
+ # export-and-reimport process also evicts any cached pool data from
+ # memory to provide the correct "data loss" semantics.
+ #
+ # In the test framework, the exporting and importing operations are
+ # handled by stop() and mount_facet() separately, which are used
+ # inside fail() and fail_abort().
+ #
+ do_facet $facet $LCTL --device ${!svc} readonly
+ do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
+ $LCTL mark "local REPLAY BARRIER on ${!svc}"
}
replay_barrier_nodf() {
- local facet=$1 echo running=${running}
- do_facet $facet "sync; sync; sync"
- local svc=${facet}_svc
- echo Replay barrier on ${!svc}
- do_facet $facet $LCTL --device %${!svc} notransno
- do_facet $facet $LCTL --device %${!svc} readonly
- do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
- $LCTL mark "local REPLAY BARRIER on ${!svc}"
+ local facet=$1 echo running=${running}
+ do_facet $facet "sync; sync; sync"
+ local svc=${facet}_svc
+ echo Replay barrier on ${!svc}
+ do_facet $facet $LCTL --device ${!svc} notransno
+ do_facet $facet $LCTL --device ${!svc} readonly
+ do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
+ $LCTL mark "local REPLAY BARRIER on ${!svc}"
}
replay_barrier_nosync() {
- local facet=$1 echo running=${running}
- local svc=${facet}_svc
- echo Replay barrier on ${!svc}
- do_facet $facet $LCTL --device %${!svc} notransno
- do_facet $facet $LCTL --device %${!svc} readonly
- do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
- $LCTL mark "local REPLAY BARRIER on ${!svc}"
+ local facet=$1 echo running=${running}
+ local svc=${facet}_svc
+ echo Replay barrier on ${!svc}
+ do_facet $facet $LCTL --device ${!svc} notransno
+ do_facet $facet $LCTL --device ${!svc} readonly
+ do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
+ $LCTL mark "local REPLAY BARRIER on ${!svc}"
+}
+
+#
+# Get Lustre client uuid for a given Lustre mount point.
+#
+get_client_uuid() {
+ local mntpnt=${1:-$MOUNT}
+
+ local name=$($LFS getname $mntpnt | cut -d' ' -f1)
+ local uuid=$($LCTL get_param -n llite.$name.uuid)
+
+ echo -n $uuid
}
mds_evict_client() {
- UUID=`lctl get_param -n mdc.${mds1_svc}-mdc-*.uuid`
- do_facet mds1 "lctl set_param -n mdt.${mds1_svc}.evict_client $UUID"
+ local mntpnt=${1:-$MOUNT}
+ local uuid=$(get_client_uuid $mntpnt)
+
+ do_facet $SINGLEMDS \
+ "$LCTL set_param -n mdt.${mds1_svc}.evict_client $uuid"
}
ost_evict_client() {
- UUID=`lctl get_param -n devices| grep ${ost1_svc}-osc- | egrep -v 'MDT' | awk '{print $5}'`
- do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.evict_client $UUID"
+ local mntpnt=${1:-$MOUNT}
+ local uuid=$(get_client_uuid $mntpnt)
+
+ do_facet ost1 \
+ "$LCTL set_param -n obdfilter.${ost1_svc}.evict_client $uuid"
}
fail() {
- facet_failover $* || error "failover: $?"
- clients_up || error "post-failover df: $?"
+ local facets=$1
+ local clients=${CLIENTS:-$HOSTNAME}
+
+ facet_failover $* || error "failover: $?"
+ wait_clients_import_state "$clients" "$facets" FULL
+ clients_up || error "post-failover df: $?"
}
fail_nodf() {
fail_abort() {
local facet=$1
stop $facet
- refresh_disk ${facet}
change_active $facet
wait_for_facet $facet
mount_facet $facet -o abort_recovery
}
h2name_or_ip() {
- if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
- echo $1"@$2"
- fi
+ if [ "$1" = "'*'" ]; then echo \'*\'; else
+ echo $1"@$2"
+ fi
}
h2ptl() {
- if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
- ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | awk '{print $1}'`
- if [ -z "$ID" ]; then
- echo "Could not get a ptl id for $1..."
- exit 1
- fi
- echo $ID"@ptl"
- fi
+ if [ "$1" = "'*'" ]; then echo \'*\'; else
+ ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \
+ awk '{print $1}'`
+ if [ -z "$ID" ]; then
+ echo "Could not get a ptl id for $1..."
+ exit 1
+ fi
+ echo $ID"@ptl"
+ fi
}
declare -fx h2ptl
h2tcp() {
- h2name_or_ip "$1" "tcp"
+ h2name_or_ip "$1" "tcp"
}
declare -fx h2tcp
h2elan() {
- if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
- if type __h2elan >/dev/null 2>&1; then
- ID=$(__h2elan $1)
- else
- ID=`echo $1 | sed 's/[^0-9]*//g'`
- fi
- echo $ID"@elan"
- fi
+ if [ "$1" = "'*'" ]; then echo \'*\'; else
+ if type __h2elan >/dev/null 2>&1; then
+ ID=$(__h2elan $1)
+ else
+ ID=`echo $1 | sed 's/[^0-9]*//g'`
+ fi
+ echo $ID"@elan"
+ fi
}
declare -fx h2elan
h2o2ib() {
- h2name_or_ip "$1" "o2ib"
+ h2name_or_ip "$1" "o2ib"
}
declare -fx h2o2ib
}
facet_host() {
- local facet=$1
+ local facet=$1
+ local varname
- [ "$facet" == client ] && echo -n $HOSTNAME && return
- varname=${facet}_HOST
- if [ -z "${!varname}" ]; then
- if [ "${facet:0:3}" == "ost" ]; then
- eval ${facet}_HOST=${ost_HOST}
- fi
- fi
- echo -n ${!varname}
+ [ "$facet" == client ] && echo -n $HOSTNAME && return
+ varname=${facet}_HOST
+ if [ -z "${!varname}" ]; then
+ if [ "${facet:0:3}" == "ost" ]; then
+ local fh=${facet%failover}_HOST
+ eval export ${facet}_HOST=${!fh}
+ if [ -z "${!varname}" ]; then
+ eval export ${facet}_HOST=${ost_HOST}
+ fi
+ elif [ "${facet:0:3}" == "mdt" -o \
+ "${facet:0:3}" == "mds" -o \
+ "${facet:0:3}" == "mgs" ]; then
+ eval export ${facet}_HOST=${mds_HOST}
+ fi
+ fi
+ echo -n ${!varname}
}
facet_failover_host() {
local facet=$1
- local var
+ local varname
var=${facet}failover_HOST
if [ -n "${!var}" ]; then
return
fi
+ if [ "${facet:0:3}" == "mdt" -o "${facet:0:3}" == "mds" -o \
+ "${facet:0:3}" == "mgs" ]; then
+
+ eval export ${facet}failover_host=${mds_HOST}
+ echo ${mds_HOST}
+ return
+ fi
+
if [[ $facet == ost* ]]; then
- var=ostfailover_HOST
- if [ -n "${!var}" ]; then
- echo ${!var}
- return
- fi
+ eval export ${facet}failover_host=${ost_HOST}
+ echo ${ost_HOST}
+ return
fi
}
fi
}
+# Get the passive failover partner host of facet.
+facet_passive_host() {
+ local facet=$1
+ [[ $facet = client ]] && return
+
+ local host=${facet}_HOST
+ local failover_host=${facet}failover_HOST
+ local active_host=$(facet_active_host $facet)
+
+ [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return
+
+ if [[ $active_host = ${!host} ]]; then
+ echo -n ${!failover_host}
+ else
+ echo -n ${!host}
+ fi
+}
+
change_active() {
local facetlist=$1
local facet
echo -n " ${var}=\"$value\""
done
- echo -n " USE_OFD=$USE_OFD"
-
for facet in ${facets//,/ }; do
var=${facet}_FSTYPE
if [ -n "${!var}" ]; then
}
add() {
- local facet=$1
- shift
- # make sure its not already running
- stop ${facet} -f
- rm -f $TMP/${facet}active
- [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
- do_facet ${facet} $MKFS $*
+ local facet=$1
+ shift
+ # make sure its not already running
+ stop ${facet} -f
+ rm -f $TMP/${facet}active
+ [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
+ do_facet ${facet} $MKFS $* || return ${PIPESTATUS[0]}
+
+ if [[ $(facet_fstype $facet) == zfs ]]; then
+ #
+ # After formatting a ZFS target, "cachefile=none" property will
+ # be set on the ZFS storage pool so that the pool is not
+ # automatically imported on system startup. And then the pool
+ # will be exported so as to leave the importing and exporting
+ # operations handled by mount_facet() and stop() separately.
+ #
+ refresh_partition_table $facet $(facet_vdevice $facet)
+ disable_zpool_cache $facet
+ export_zpool $facet
+ fi
}
+# Device formatted as ost
ostdevname() {
- num=$1
- DEVNAME=OSTDEV$num
+ local num=$1
+ local DEVNAME=OSTDEV$num
local fstype=$(facet_fstype ost$num)
#if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
zfs )
- #dataset name is independent of vdev device names
- eval DEVPTR=${FSNAME}-ost${num}/ost${num};;
+ #try $OSTZFSDEVn - independent of vdev
+ DEVNAME=OSTZFSDEV$num
+ eval DEVPTR=${!DEVNAME:=${FSNAME}-ost${num}/ost${num}};;
* )
error "unknown fstype!";;
esac
echo -n $DEVPTR
}
+# Physical device location of data
ostvdevname() {
- num=$1
- DEVNAME=OSTDEV$num
+ local num=$1
+ local DEVNAME
+ local VDEVPTR
local fstype=$(facet_fstype ost$num)
# vdevs are not supported by ldiskfs
eval VDEVPTR="";;
zfs )
- #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
+ #if $OSTDEVn isn't defined, default is $OSTDEVBASE{n}
+ # Device formated by zfs
+ DEVNAME=OSTDEV$num
eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
* )
error "unknown fstype!";;
echo -n $VDEVPTR
}
+# Logical device formated for lustre
mdsdevname() {
- num=$1
- DEVNAME=MDSDEV$num
+ local num=$1
+ local DEVNAME=MDSDEV$num
local fstype=$(facet_fstype mds$num)
case $fstype in
ldiskfs )
- #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num
+ #if $MDSDEVn isn't defined, default is $MDSDEVBASE{n}
eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
zfs )
- #dataset name is independent of vdev device names
- eval DEVPTR=${FSNAME}-mdt${num}/mdt${num};;
+ # try $MDSZFSDEVn - independent of vdev
+ DEVNAME=MDSZFSDEV$num
+ eval DEVPTR=${!DEVNAME:=${FSNAME}-mdt${num}/mdt${num}};;
* )
error "unknown fstype!";;
esac
echo -n $DEVPTR
}
+# Physical location of data
mdsvdevname() {
- num=$1
- DEVNAME=MDSDEV$num
-
+ local VDEVPTR=""
+ local num=$1
local fstype=$(facet_fstype mds$num)
case $fstype in
# vdevs are not supported by ldiskfs
eval VDEVPTR="";;
zfs )
- #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num
+ # if $MDSDEVn isn't defined, default is $MDSDEVBASE{n}
+ # Device formated by ZFS
+ local DEVNAME=MDSDEV$num
eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
* )
error "unknown fstype!";;
}
mgsdevname() {
- DEVNAME=MGSDEV
-
- local fstype=$(facet_fstype mds$num)
+ local DEVPTR
+ local fstype=$(facet_fstype mgs)
case $fstype in
- ldiskfs )
- #if $MGSDEV isn't defined, default is $MDSDEV1
- eval DEVPTR=${!DEVNAME:=${MDSDEV1}};;
- zfs )
- #dataset name is independent of vdev device names
- eval DEVPTR=${FSNAME}-mgs/mgs;;
- * )
- error "unknown fstype!";;
+ ldiskfs )
+ if [ $(facet_host mgs) = $(facet_host mds1) ] &&
+ ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsdevname 1) ] ); then
+ DEVPTR=$(mdsdevname 1)
+ else
+ DEVPTR=$MGSDEV
+ fi;;
+ zfs )
+ if [ $(facet_host mgs) = $(facet_host mds1) ] &&
+ ( [ -z "$MGSZFSDEV" ] &&
+ [ -z "$MGSDEV" -o "$MGSDEV" = $(mdsvdevname 1) ] ); then
+ DEVPTR=$(mdsdevname 1)
+ else
+ DEVPTR=${MGSZFSDEV:-${FSNAME}-mgs/mgs}
+ fi;;
+ * )
+ error "unknown fstype!";;
esac
- echo -n $DEVPTR
+ echo -n $DEVPTR
}
mgsvdevname() {
- DEVNAME=MGSDEV
+ local VDEVPTR=""
- local fstype=$(facet_fstype mds$num)
+ local fstype=$(facet_fstype mgs)
case $fstype in
- ldiskfs )
- # vdevs are not supported by ldiskfs
- eval VDEVPTR="";;
- zfs )
- #if $MGSDEV isn't defined, default is $MGSDEV1
- eval VDEVPTR=${!DEVNAME:=${MDSDEV1}};;
- * )
- error "unknown fstype!";;
+ ldiskfs )
+ # vdevs are not supported by ldiskfs
+ ;;
+ zfs )
+ if [ $(facet_host mgs) = $(facet_host mds1) ] &&
+ ( [ -z "$MGSDEV" ] &&
+ [ -z "$MGSZFSDEV" -o "$MGSZFSDEV" = $(mdsdevname 1) ]); then
+ VDEVPTR=$(mdsvdevname 1)
+ elif [ -n "$MGSDEV" ]; then
+ VDEVPTR=$MGSDEV
+ fi;;
+ * )
+ error "unknown fstype!";;
esac
echo -n $VDEVPTR
echo -n $mntpt
}
+mount_ldiskfs() {
+ local facet=$1
+ local dev=$(facet_device $facet)
+ local mnt=$(facet_mntpt $facet)
+ local opts
+
+ if ! do_facet $facet test -b $dev; then
+ opts="-o loop"
+ fi
+ do_facet $facet mount -t ldiskfs $opts $dev $mnt
+}
+
+unmount_ldiskfs() {
+ local facet=$1
+ local dev=$(facet_device $facet)
+ local mnt=$(facet_mntpt $facet)
+
+ do_facet $facet umount -d $mnt
+}
+
+var_name() {
+ echo -n "$1" | tr -c '[:alnum:]\n' '_'
+}
+
+mount_zfs() {
+ local facet=$1
+ local ds=$(facet_device $facet)
+ local mnt=$(facet_mntpt $facet)
+ local canmnt
+ local mntpt
+
+ import_zpool $facet
+ canmnt=$(do_facet $facet $ZFS get -H -o value canmount $ds)
+ mntpt=$(do_facet $facet $ZFS get -H -o value mountpoint $ds)
+ do_facet $facet $ZFS set canmount=noauto $ds
+ #
+ # The "legacy" mount method is used here because "zfs unmount $mnt"
+ # calls stat(2) on $mnt/../*, which may include $MOUNT. If certain
+ # targets are not available at the time, the stat(2) on $MOUNT will
+ # hang.
+ #
+ do_facet $facet $ZFS set mountpoint=legacy $ds
+ do_facet $facet mount -t zfs $ds $mnt
+ eval export mz_$(var_name ${facet}_$ds)_canmount=$canmnt
+ eval export mz_$(var_name ${facet}_$ds)_mountpoint=$mntpt
+}
+
+unmount_zfs() {
+ local facet=$1
+ local ds=$(facet_device $facet)
+ local mnt=$(facet_mntpt $facet)
+ local var_mntpt=mz_$(var_name ${facet}_$ds)_mountpoint
+ local var_canmnt=mz_$(var_name ${facet}_$ds)_canmount
+ local mntpt=${!var_mntpt}
+ local canmnt=${!var_canmnt}
+
+ unset $var_mntpt
+ unset $var_canmnt
+ do_facet $facet umount $mnt
+ do_facet $facet $ZFS set mountpoint=$mntpt $ds
+ do_facet $facet $ZFS set canmount=$canmnt $ds
+ export_zpool $facet
+}
+
+mount_fstype() {
+ local facet=$1
+ local fstype=$(facet_fstype $facet)
+
+ mount_$fstype $facet
+}
+
+unmount_fstype() {
+ local facet=$1
+ local fstype=$(facet_fstype $facet)
+
+ unmount_$fstype $facet
+}
+
########
## MountConf setup
}
combined_mgs_mds () {
- [[ $MDSDEV1 = $MGSDEV ]] && [[ $mds1_HOST = $mgs_HOST ]]
+ [[ "$(mdsdevname 1)" = "$(mgsdevname)" ]] &&
+ [[ "$(facet_host mds1)" = "$(facet_host mgs)" ]]
}
lower() {
mkfs_opts() {
local facet=$1
+ local dev=$2
+ local fsname=${3:-"$FSNAME"}
local type=$(facet_type $facet)
local index=$(($(facet_number $facet) - 1))
local fstype=$(facet_fstype $facet)
+ local host=$(facet_host $facet)
local opts
local fs_mkfs_opts
local var
return 1
fi
- if [ $type == MGS ] || ( [ $type == MDS ] && combined_mgs_mds ); then
+ if [ $type == MGS ] || ( [ $type == MDS ] &&
+ [ "$dev" == $(mgsdevname) ] &&
+ [ "$host" == "$(facet_host mgs)" ] ); then
opts="--mgs"
else
opts="--mgsnode=$MGSNID"
fi
if [ $type != MGS ]; then
- opts+=" --fsname=$FSNAME --$(lower ${type/MDS/MDT}) --index=$index"
+ opts+=" --fsname=$fsname --$(lower ${type/MDS/MDT}) \
+ --index=$index"
fi
var=${facet}failover_HOST
opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
if [ $fstype == ldiskfs ]; then
+ # Check for wide striping
+ if [ $OSTCOUNT -gt 160 ]; then
+ MDSJOURNALSIZE=${MDSJOURNALSIZE:-4096}
+ fs_mkfs_opts+="-O large_xattr"
+ fi
+
fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
+ if [ ! -z $EJOURNAL ]; then
+ fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"}
+ fi
fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"}
fi
fi
echo Formatting mgs, mds, osts
if ! combined_mgs_mds ; then
echo "Format mgs: $(mgsdevname)"
- add mgs $(mkfs_opts mgs) --reformat $(mgsdevname) \
- $(mgsvdevname) ${quiet:+>/dev/null} || exit 10
- fi
+ add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
+ $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} ||
+ exit 10
+ fi
- for num in `seq $MDSCOUNT`; do
- echo "Format mds$num: $(mdsdevname $num)"
- add mds$num $(mkfs_opts mds$num) --reformat \
- $(mdsdevname $num) $(mdsvdevname $num) \
+ for num in $(seq $MDSCOUNT); do
+ echo "Format mds$num: $(mdsdevname $num)"
+ add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
+ --reformat $(mdsdevname $num) $(mdsvdevname $num) \
${quiet:+>/dev/null} || exit 10
- done
+ done
- for num in `seq $OSTCOUNT`; do
- echo "Format ost$num: $(ostdevname $num)"
- add ost$num $(mkfs_opts ost$num) --reformat \
- $(ostdevname $num) $(ostvdevname ${num}) \
+ for num in $(seq $OSTCOUNT); do
+ echo "Format ost$num: $(ostdevname $num)"
+ add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
+ --reformat $(ostdevname $num) $(ostvdevname ${num}) \
${quiet:+>/dev/null} || exit 10
- done
+ done
}
mount_client() {
zconf_mount `hostname` $1 || error "mount failed"
}
-writeconf_facet () {
- local facet=$1
- local dev=$2
+writeconf_facet() {
+ local facet=$1
+ local dev=$2
- do_facet $facet "$TUNEFS --writeconf $dev"
+ stop ${facet} -f
+ rm -f $TMP/${facet}active
+ do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1
+ return 0
}
writeconf_all () {
- for num in `seq $MDSCOUNT`; do
- DEVNAME=$(mdsdevname $num)
- writeconf_facet mds$num $DEVNAME
- done
+ local mdt_count=${1:-$MDSCOUNT}
+ local ost_count=${2:-$OSTCOUNT}
+ local rc=0
- for num in `seq $OSTCOUNT`; do
- DEVNAME=$(ostdevname $num)
- writeconf_facet ost$num $DEVNAME
- done
+ for num in $(seq $mdt_count); do
+ DEVNAME=$(mdsdevname $num)
+ writeconf_facet mds$num $DEVNAME || rc=$?
+ done
+
+ for num in $(seq $ost_count); do
+ DEVNAME=$(ostdevname $num)
+ writeconf_facet ost$num $DEVNAME || rc=$?
+ done
+ return $rc
}
setupall() {
local varname=${facet}failover_HOST
if [ -z "${!varname}" ]; then
- eval $varname=$(facet_host $facet)
+ eval export $varname=$(facet_host $facet)
fi
+ varname=${facet}_HOST
+ if [ -z "${!varname}" ]; then
+ eval export $varname=$(facet_host $facet)
+ fi
+
# ${facet}failover_dev is set in cfg file
varname=${facet}failover_dev
if [ -n "${!varname}" ] ; then
}
init_facets_vars () {
- local DEVNAME
+ local DEVNAME
- if ! remote_mds_nodsh; then
- for num in `seq $MDSCOUNT`; do
- DEVNAME=`mdsdevname $num`
- init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS
- done
- fi
+ if ! remote_mds_nodsh; then
+ for num in $(seq $MDSCOUNT); do
+ DEVNAME=`mdsdevname $num`
+ init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS
+ done
+ fi
combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS
- remote_ost_nodsh && return
-
- for num in `seq $OSTCOUNT`; do
- DEVNAME=`ostdevname $num`
- init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS
- done
+ if ! remote_ost_nodsh; then
+ for num in $(seq $OSTCOUNT); do
+ DEVNAME=$(ostdevname $num)
+ init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS
+ done
+ fi
}
osc_ensure_active () {
osc_ensure_active $SINGLEMDS $TIMEOUT
osc_ensure_active client $TIMEOUT
- local jobid_var
- if [ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ]; then
- jobid_var="none"
- elif [ $JOBSTATS_AUTO -ne 0 ]; then
- echo "enable jobstats, set job scheduler as $JOBID_VAR"
- jobid_var=$JOBID_VAR
- else
- jobid_var=`$LCTL get_param -n jobid_var`
- if [ $jobid_var != "disable" ]; then
- echo "disable jobstats as required"
- jobid_var="disable"
- else
- jobid_var="none"
- fi
- fi
+ if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then
+ local current_jobid_var=$($LCTL get_param -n jobid_var)
+
+ if [ $JOBID_VAR = "existing" ]; then
+ echo "keeping jobstats as $current_jobid_var"
+ elif [ $current_jobid_var != $JOBID_VAR ]; then
+ echo "seting jobstats to $JOBID_VAR"
- if [ $jobid_var == $JOBID_VAR -o $jobid_var == "disable" ]; then
- do_facet mgs $LCTL conf_param $FSNAME.sys.jobid_var=$jobid_var
- wait_update $HOSTNAME "$LCTL get_param -n jobid_var" \
- $jobid_var || return 1
+ set_conf_param_and_check client \
+ "$LCTL get_param -n jobid_var" \
+ "$FSNAME.sys.jobid_var" $JOBID_VAR
+ fi
+ else
+ echo "jobstats not supported by server"
fi
if [ $QUOTA_AUTO -ne 0 ]; then
}
is_empty_dir() {
- [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
- return 1
+ [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
+ return 1
}
# empty lustre filesystem may have empty directories lost+found and .lustre
is_empty_fs() {
- [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
- -print | wc -l) = 1 ] || return 1
- [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found && return 0
- [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre && return 0
- return 1
+ # exclude .lustre & lost+found
+ [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
+ -print | wc -l) = 1 ] || return 1
+ [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1
+ if [ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.4.0) ]; then
+ # exclude .lustre/fid (LU-2780)
+ [ $(find $1/.lustre -maxdepth 1 -name fid -prune -o \
+ -print | wc -l) = 1 ] || return 1
+ else
+ [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1
+ fi
+ return 0
}
check_and_setup_lustre() {
set_default_debug_nodes $(comma_list $(nodes_list))
fi
+ if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ local facets=""
+ [ "$(facet_fstype ost1)" = "ldiskfs" ] &&
+ facets="$(get_facets OST)"
+ [ "$(facet_fstype mds1)" = "ldiskfs" ] &&
+ facets="$facets,$(get_facets MDS)"
+ [ "$(facet_fstype mgs)" = "ldiskfs" ] &&
+ facets="$facets,mgs"
+ local nodes="$(facets_hosts ${facets})"
+ if [ -n "$nodes" ] ; then
+ do_nodes $nodes "$LCTL set_param \
+ osd-ldiskfs.track_declares_assert=1 || true"
+ fi
+ fi
+
init_gss
if $GSS; then
set_flavor_all $SEC
}
cleanup_mount () {
- local clients=${CLIENTS:-$HOSTNAME}
- local mntpt=$1
+ local clients=${CLIENTS:-$HOSTNAME}
+ local mntpt=$1
- zconf_umount_clients $clients $mntpt
+ zconf_umount_clients $clients $mntpt
}
cleanup_and_setup_lustre() {
local dev
if [ "$type" == ost ]; then
- devs=$(get_obdfilter_param $node "" mntdev)
+ devs=$(get_osd_param $node "" mntdev)
else
- devs=$(do_node $node \
- "lctl get_param -n osd-*.$FSNAME-M*.mntdev")
+ devs=$(do_node $node $LCTL get_param -n osd-*.$FSNAME-M*.mntdev)
fi
for dev in $devs; do
case $dev in
done
}
-# Get all of the server target devices.
-get_svr_devs() {
- local i
+# Get all of the server target devices.
+get_svr_devs() {
+ local node
+ local i
+
+ # Master MDS parameters used by lfsck
+ MDTNODE=$(facet_active_host $SINGLEMDS)
+ MDTDEV=$(echo $(get_mnt_devs $MDTNODE mdt) | awk '{print $1}')
+
+ # MDT devices
+ i=0
+ for node in $(mdts_nodes); do
+ MDTDEVS[i]=$(get_mnt_devs $node mdt)
+ i=$((i + 1))
+ done
+
+ # OST devices
+ i=0
+ for node in $(osts_nodes); do
+ OSTDEVS[i]=$(get_mnt_devs $node ost)
+ i=$((i + 1))
+ done
+}
+
+# Run e2fsck on MDT or OST device.
+run_e2fsck() {
+ local node=$1
+ local target_dev=$2
+ local extra_opts=$3
+ local cmd="$E2FSCK -d -v -t -t -f $extra_opts $target_dev"
+ local log=$TMP/e2fsck.log
+ local rc=0
+
+ echo $cmd
+ do_node $node $cmd 2>&1 | tee $log
+ rc=${PIPESTATUS[0]}
+ if [ -n "$(grep "DNE mode isn't supported" $log)" ]; then
+ rm -f $log
+ if [ $MDSCOUNT -gt 1 ]; then
+ skip "DNE mode isn't supported!"
+ cleanupall
+ exit_status
+ else
+ error "It's not DNE mode."
+ fi
+ fi
+ rm -f $log
- # MDT device
- MDTDEV=$(get_mnt_devs $(mdts_nodes) mdt)
+ [ $rc -le $FSCK_MAX_ERR ] ||
+ error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- # OST devices
- i=0
- for node in $(osts_nodes); do
- OSTDEVS[i]=$(get_mnt_devs $node ost)
- i=$((i + 1))
- done
+ return 0
}
-# Run e2fsck on MDT or OST device.
-run_e2fsck() {
- local node=$1
- local target_dev=$2
- local extra_opts=$3
+#
+# Run resize2fs on MDT or OST device.
+#
+run_resize2fs() {
+ local facet=$1
+ local device=$2
+ local size=$3
+ shift 3
+ local opts="$@"
- df > /dev/null # update statfs data on disk
- local cmd="$E2FSCK -d -v -t -t -f $extra_opts $target_dev"
- echo $cmd
- local rc=0
- do_node $node $cmd || rc=$?
- [ $rc -le $FSCK_MAX_ERR ] || \
- error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- return 0
+ do_facet $facet "$RESIZE2FS $opts $device $size"
}
# verify a directory is shared among nodes.
check_shared_dir() {
local dir=$1
+ local list=${2:-$(comma_list $(nodes_list))}
[ -z "$dir" ] && return 1
- do_rpc_nodes "$(comma_list $(nodes_list))" check_logdir $dir
- check_write_access $dir || return 1
+ do_rpc_nodes "$list" check_logdir $dir
+ check_write_access $dir "$list" || return 1
return 0
}
# Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
generate_db() {
- local i
- local ostidx
- local dev
+ local i
+ local ostidx
+ local dev
+ local node
[[ $(lustre_version_code $SINGLEMDS) -ne $(version_code 2.2.0) ]] ||
{ skip "Lustre 2.2.0 lacks the patch for LU-1255"; exit 0; }
- check_shared_dir $SHARED_DIRECTORY ||
- error "$SHARED_DIRECTORY isn't a shared directory"
-
- export MDSDB=$SHARED_DIRECTORY/mdsdb
- export OSTDB=$SHARED_DIRECTORY/ostdb
+ check_shared_dir $SHARED_DIRECTORY ||
+ error "$SHARED_DIRECTORY isn't a shared directory"
- [ $MDSCOUNT -eq 1 ] || error "CMD is not supported"
+ export MDSDB=$SHARED_DIRECTORY/mdsdb
+ export OSTDB=$SHARED_DIRECTORY/ostdb
- run_e2fsck $(mdts_nodes) $MDTDEV "-n --mdsdb $MDSDB"
+ # DNE is not supported, so when running e2fsck on a DNE filesystem,
+ # we only pass master MDS parameters.
+ run_e2fsck $MDTNODE $MDTDEV "-n --mdsdb $MDSDB"
i=0
ostidx=0
done
}
+# Run lfsck on server node if lfsck can't be found on client (LU-2571)
+run_lfsck_remote() {
+ local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
+ local client=$1
+ local mounted=true
+ local rc=0
+
+ #Check if lustre is already mounted
+ do_rpc_nodes $client is_mounted $MOUNT || mounted=false
+ if ! $mounted; then
+ zconf_mount $client $MOUNT ||
+ error "failed to mount Lustre on $client"
+ fi
+ #Run lfsck
+ echo $cmd
+ do_node $client $cmd || rc=$?
+ #Umount if necessary
+ if ! $mounted; then
+ zconf_umount $client $MOUNT ||
+ error "failed to unmount Lustre on $client"
+ fi
+
+ [ $rc -le $FSCK_MAX_ERR ] ||
+ error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
+ echo "lfsck finished with rc=$rc"
+
+ return $rc
+}
+
run_lfsck() {
- local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
- echo $cmd
- local rc=0
- eval $cmd || rc=$?
- [ $rc -le $FSCK_MAX_ERR ] || \
- error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- echo "lfsck finished with rc=$rc"
+ local facets="client $SINGLEMDS"
+ local found=false
+ local facet
+ local node
+ local rc=0
- rm -rvf $MDSDB* $OSTDB* || true
- return 0
+ for facet in $facets; do
+ node=$(facet_active_host $facet)
+ if check_progs_installed $node $LFSCK_BIN; then
+ found=true
+ break
+ fi
+ done
+ ! $found && error "None of \"$facets\" supports lfsck"
+
+ run_lfsck_remote $node || rc=$?
+
+ rm -rvf $MDSDB* $OSTDB* || true
+ return $rc
}
check_and_cleanup_lustre() {
case $type in
MGS ) list="$list $name";;
- MDS|OST ) local count=${type}COUNT
+ MDS|OST|AGT ) local count=${type}COUNT
for ((i=1; i<=${!count}; i++)) do
list="$list ${name}$i"
done;;
return $RC
}
+drop_update_reply() {
+# OBD_FAIL_UPDATE_OBJ_NET_REP
+ local index=$1
+ shift 1
+ RC=0
+ do_facet mds${index} lctl set_param fail_loc=0x1701
+ do_facet client "$@" || RC=$?
+ do_facet mds${index} lctl set_param fail_loc=0
+ return $RC
+}
+
pause_bulk() {
#define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214
- RC=0
- do_facet ost1 lctl set_param fail_loc=0x214
- do_facet client "$1" || RC=$?
- do_facet client "sync"
- do_facet ost1 lctl set_param fail_loc=0
- return $RC
+ RC=0
+
+ local timeout=${2:-0}
+ # default is (obd_timeout / 4) if unspecified
+ echo "timeout is $timeout/$2"
+ do_facet ost1 lctl set_param fail_val=$timeout fail_loc=0x80000214
+ do_facet client "$1" || RC=$?
+ do_facet client "sync"
+ do_facet ost1 lctl set_param fail_loc=0
+ return $RC
}
drop_ldlm_cancel() {
}
set_nodes_failloc () {
- do_nodes $(comma_list $1) lctl set_param fail_loc=$2
+ do_nodes $(comma_list $1) lctl set_param fail_val=0 fail_loc=$2
}
cancel_lru_locks() {
##################################
error_noexit() {
- local TYPE=${TYPE:-"FAIL"}
+ local TYPE=${TYPE:-"FAIL"}
- local dump=true
- # do not dump logs if $1=false
- if [ "x$1" = "xfalse" ]; then
- shift
- dump=false
- fi
+ local dump=true
+ # do not dump logs if $1=false
+ if [ "x$1" = "xfalse" ]; then
+ shift
+ dump=false
+ fi
- log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
- log_trace_dump
- mkdir -p $LOGDIR
- # We need to dump the logs on all nodes
- if $dump; then
- gather_logs $(comma_list $(nodes_list))
- fi
+ log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
+ log_trace_dump
+
+ mkdir -p $LOGDIR
+ # We need to dump the logs on all nodes
+ if $dump; then
+ gather_logs $(comma_list $(nodes_list))
+ fi
debugrestore
[ "$TESTSUITELOG" ] &&
echo "$TESTSUITE: $TYPE: $TESTNAME $@" >> $TESTSUITELOG
- echo "$@" > $LOGDIR/err
+ if [ -z "$*" ]; then
+ echo "error() without useful message, please fix" > $LOGDIR/err
+ else
+ if [[ `echo $TYPE | grep ^IGNORE` ]]; then
+ echo "$@" > $LOGDIR/ignore
+ else
+ echo "$@" > $LOGDIR/err
+ fi
+ fi
}
exit_status () {
}
error() {
- error_noexit "$@"
- exit 1
+ error_noexit "$@"
+ exit 1
}
error_exit() {
- error "$@"
+ error "$@"
}
# use only if we are ignoring failures for this test, bugno required.
# (like ALWAYS_EXCEPT, but run the test and ignore the results.)
-# e.g. error_ignore 5494 "your message"
+# e.g. error_ignore bz5494 "your message" or
+# error_ignore LU-5494 "your message"
error_ignore() {
- local TYPE="IGNORE (bz$1)"
- shift
- error_noexit "$@"
+ local TYPE="IGNORE ($1)"
+ shift
+ error_noexit "$@"
}
error_and_remount() {
}
skip_env () {
- $FAIL_ON_SKIP_ENV && error false $@ || skip $@
+ $FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
skip() {
build_test_filter() {
EXCEPT="$EXCEPT $(testslist_filter)"
- [ "$ONLY" ] && log "only running test `echo $ONLY`"
- for O in $ONLY; do
- eval ONLY_${O}=true
- done
+ for O in $ONLY; do
+ if [[ $O = [0-9]*-[0-9]* ]]; then
+ for num in $(seq $(echo $O | tr '-' ' ')); do
+ eval ONLY_$num=true
+ done
+ else
+ eval ONLY_${O}=true
+ fi
+ done
+
[ "$EXCEPT$ALWAYS_EXCEPT" ] && \
log "excepting tests: `echo $EXCEPT $ALWAYS_EXCEPT`"
[ "$EXCEPT_SLOW" ] && \
return $?
fi
LAST_SKIPPED="y"
- echo -n "."
return 0
fi
}
#
-# Run a single test function and cleanup after it.
+# Run a single test function and cleanup after it.
#
# This function should be run in a subshell so the test func can
# exit() without stopping the whole script.
#
run_one() {
- local testnum=$1
- local message=$2
- tfile=f.${TESTSUITE}.${testnum}
- export tdir=d0.${TESTSUITE}/d${base}
- export TESTNAME=test_$testnum
- local SAVE_UMASK=`umask`
- umask 0022
-
- banner "test $testnum: $message"
- test_${testnum} || error "test_$testnum failed with $?"
- cd $SAVE_PWD
- reset_fail_loc
- check_grant ${testnum} || error "check_grant $testnum failed with $?"
- check_catastrophe || error "LBUG/LASSERT detected"
- ps auxww | grep -v grep | grep -q multiop && error "multiop still running"
- unset TESTNAME
- unset tdir
- umask $SAVE_UMASK
- return 0
+ local testnum=$1
+ local message=$2
+ export tfile=f${testnum}.${TESTSUITE}
+ export tdir=d${testnum}.${TESTSUITE}
+ export TESTNAME=test_$testnum
+ local SAVE_UMASK=`umask`
+ umask 0022
+
+ banner "test $testnum: $message"
+ test_${testnum} || error "test_$testnum failed with $?"
+ cd $SAVE_PWD
+ reset_fail_loc
+ check_grant ${testnum} || error "check_grant $testnum failed with $?"
+ check_catastrophe || error "LBUG/LASSERT detected"
+ if [ "$PARALLEL" != "yes" ]; then
+ ps auxww | grep -v grep | grep -q multiop &&
+ error "multiop still running"
+ fi
+ unset TESTNAME
+ unset tdir
+ unset tfile
+ umask $SAVE_UMASK
+ return 0
}
#
local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log
local test_log=$LOGDIR/$name
rm -rf $LOGDIR/err
+ rm -rf $LOGDIR/ignore
rm -rf $LOGDIR/skip
local SAVE_UMASK=`umask`
umask 0022
(run_one $1 "$2") 2>&1 | tee -i $test_log
local RC=${PIPESTATUS[0]}
- [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \
+ [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] &&
echo "test_$1 returned $RC" | tee $LOGDIR/err
duration=$((`date +%s` - $BEFORE))
if [[ -f $LOGDIR/err ]]; then
TEST_ERROR=$(cat $LOGDIR/err)
+ elif [[ -f $LOGDIR/ignore ]]; then
+ TEST_ERROR=$(cat $LOGDIR/ignore)
elif [[ -f $LOGDIR/skip ]]; then
TEST_ERROR=$(cat $LOGDIR/skip)
fi
log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR"
+ if [[ "$TEST_STATUS" != "SKIP" ]] && [[ -f $TF_SKIP ]]; then
+ rm -f $TF_SKIP
+ fi
+
if [ -f $LOGDIR/err ]; then
$FAIL_ON_ERROR && exit $RC
fi
$LFS mdts $2 | sed -ne "/^$1: /s/.* \(.*\) .*$/\1/p"
}
+# Description:
+# Return unique identifier for given hostname
+host_id() {
+ local host_name=$1
+ echo $host_name | md5sum | cut -d' ' -f1
+}
+
+# Description:
+# Returns list of ip addresses for each interface
+local_addr_list() {
+ ip addr | awk '/inet\ / {print $2}' | awk -F\/ '{print $1}'
+}
+
+is_local_addr() {
+ local addr=$1
+ # Cache address list to avoid mutiple execution of local_addr_list
+ LOCAL_ADDR_LIST=${LOCAL_ADDR_LIST:-$(local_addr_list)}
+ local i
+ for i in $LOCAL_ADDR_LIST ; do
+ [[ "$i" == "$addr" ]] && return 0
+ done
+ return 1
+}
+
+local_node() {
+ local host_name=$1
+ local is_local="IS_LOCAL_$(host_id $host_name)"
+ if [ -z "${!is_local-}" ] ; then
+ eval $is_local=0
+ local host_ip=$($LUSTRE/tests/resolveip $host_name)
+ is_local_addr "$host_ip" && eval $is_local=1
+ fi
+ [[ "${!is_local}" == "1" ]]
+}
+
remote_node () {
- local node=$1
- [ "$node" != "$(hostname)" ]
+ local node=$1
+ local_node $node && return 1
+ return 0
}
remote_mds ()
$(single_local_node $(comma_list $(nodes_list)))
}
-mdts_nodes () {
- local MDSNODES
- local NODES_sort
- for num in `seq $MDSCOUNT`; do
- MDSNODES="$MDSNODES $(facet_host mds$num)"
- done
- NODES_sort=$(for i in $MDSNODES; do echo $i; done | sort -u)
-
- echo $NODES_sort
-}
-
remote_servers () {
remote_ost && remote_mds
}
+# Get the active nodes for facets.
facets_nodes () {
- local facets=$1
- local nodes
- local NODES_sort
+ local facets=$1
+ local facet
+ local nodes
+ local nodes_sort
+ local i
- for facet in ${facets//,/ }; do
- if [ "$FAILURE_MODE" = HARD ]; then
- nodes="$nodes $(facet_active_host $facet)"
- else
- nodes="$nodes $(facet_host $facet)"
- fi
- done
- NODES_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ for facet in ${facets//,/ }; do
+ nodes="$nodes $(facet_active_host $facet)"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
- echo $NODES_sort
+# Get all of the active MDS nodes.
+mdts_nodes () {
+ echo -n $(facets_nodes $(get_facets MDS))
}
+# Get all of the active OSS nodes.
osts_nodes () {
- local facets=$(get_facets OST)
- local nodes=$(facets_nodes $facets)
+ echo -n $(facets_nodes $(get_facets OST))
+}
- echo $nodes
+# Get all of the active AGT (HSM agent) nodes.
+agts_nodes () {
+ echo -n $(facets_nodes $(get_facets AGT))
}
+# Get all of the client nodes and active server nodes.
nodes_list () {
- # FIXME. We need a list of clients
- local myNODES=$HOSTNAME
- local myNODES_sort
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
- # CLIENTS (if specified) contains the local client
- [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ }
-
- if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
- myNODES="$myNODES $(facets_nodes $(get_facets))"
- fi
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
- myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u)
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(facets_nodes $(get_facets))"
+ fi
- echo $myNODES_sort
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
+# Get all of the remote client nodes and remote active server nodes.
remote_nodes_list () {
- echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+ echo -n $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g"
+}
+
+# Get all of the MDS nodes, including active and passive nodes.
+all_mdts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $MDSCOUNT); do
+ host=mds${i}_HOST
+ failover_host=mds${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the OSS nodes, including active and passive nodes.
+all_osts_nodes () {
+ local host
+ local failover_host
+ local nodes
+ local nodes_sort
+ local i
+
+ for i in $(seq $OSTCOUNT); do
+ host=ost${i}_HOST
+ failover_host=ost${i}failover_HOST
+ nodes="$nodes ${!host} ${!failover_host}"
+ done
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the server nodes, including active and passive nodes.
+all_server_nodes () {
+ local nodes
+ local nodes_sort
+ local i
+
+ nodes="$mgs_HOST $mgsfailover_HOST $(all_mdts_nodes) $(all_osts_nodes)"
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
+}
+
+# Get all of the client and server nodes, including active and passive nodes.
+all_nodes () {
+ local nodes=$HOSTNAME
+ local nodes_sort
+ local i
+
+ # CLIENTS (if specified) contains the local client
+ [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ }
+
+ if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
+ nodes="$nodes $(all_server_nodes)"
+ fi
+
+ nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
+ echo -n $nodes_sort
}
init_clients_lists () {
}
get_stripe () {
- local file=$1/stripe
- touch $file
- $LFS getstripe -v $file || error
- rm -f $file
+ local file=$1/stripe
+
+ touch $file
+ $LFS getstripe -v $file || error "getstripe $file failed"
+ rm -f $file
}
setstripe_nfsserver () {
$LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
}
-# save_lustre_params(node, parameter_mask)
-# generate a stream of formatted strings (<node> <param name>=<param value>)
+# save_lustre_params(comma separated facet list, parameter_mask)
+# generate a stream of formatted strings (<facet> <param name>=<param value>)
save_lustre_params() {
- local s
- do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done"
+ local facets=$1
+ local facet
+ local nodes
+ local node
+
+ for facet in ${facets//,/ }; do
+ node=$(facet_active_host $facet)
+ [[ *\ $node\ * = " $nodes " ]] && continue
+ nodes="$nodes $node"
+
+ do_node $node "$LCTL get_param $2 |
+ while read s; do echo $facet \\\$s; done"
+ done
}
# restore lustre parameters from input stream, produces by save_lustre_params
restore_lustre_params() {
- local node
- local name
- local val
- while IFS=" =" read node name val; do
- do_node ${node//:/} "lctl set_param -n $name $val"
- done
+ local facet
+ local name
+ local val
+
+ while IFS=" =" read facet name val; do
+ do_facet $facet "$LCTL set_param -n $name $val"
+ done
}
check_catastrophe() {
[ -z "$rnodes" ] && return 0
- do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0);
+ local data
+ data=$(do_nodes "$rnodes" "rc=\\\$([ -f $C ] &&
+ echo \\\$(< $C) || echo 0);
if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
- exit \\\$rc;"
+ exit \\\$rc")
+ local rc=$?
+ if [ -n "$data" ]; then
+ echo $data
+ return $rc
+ fi
+ return 0
}
# CMD: determine mds index where directory inode presents
}
mdsrate_cleanup () {
- if [ -d $4 ]; then
- mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6
- rmdir $4
- fi
+ if [ -d $4 ]; then
+ mpi_run ${MACHINEFILE_OPTION} $2 -np $1 ${MDSRATE} --unlink \
+ --nfiles $3 --dir $4 --filefmt $5 $6
+ rmdir $4
+ fi
}
delayed_recovery_enabled () {
########################
-convert_facet2label() {
+convert_facet2label() {
local facet=$1
if [ x$facet = xost ]; then
if [ -n ${!varsvc} ]; then
echo ${!varsvc}
- else
+ else
error "No lablel for $facet!"
fi
}
get_clientosc_proc_path() {
- echo "${1}-osc-[^M]*"
+ echo "${1}-osc-*"
}
get_lustre_version () {
local CONN_STATE
local i=0
- CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
+ CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
while [ "${CONN_STATE}" != "${expected}" ]; do
if [ "${expected}" == "DISCONN" ]; then
# for disconn we can check after proc entry is removed
error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \
return 1
sleep 1
- CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
+ # Add uniq for multi-mount case
+ CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
i=$(($i + 1))
done
done
}
+wait_import_state_mount() {
+ if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
+ return 0
+ fi
+
+ wait_import_state $*
+}
+
# One client request could be timed out because server was not ready
# when request was sent by client.
# The request timeout calculation details :
echo $(( init_connect_timeout + at_min ))
}
-wait_osc_import_state() {
- local facet=$1
- local ost_facet=$2
- local expected=$3
- local ost=$(get_osc_import_name $facet $ost_facet)
-
- local param="osc.${ost}.ost_server_uuid"
-
- # 1. wait the deadline of client 1st request (it could be skipped)
- # 2. wait the deadline of client 2nd request
- local maxtime=$(( 2 * $(request_timeout $facet)))
-
- if ! do_rpc_nodes "$(facet_host $facet)" \
- _wait_import_state $expected $param $maxtime; then
+_wait_osc_import_state() {
+ local facet=$1
+ local ost_facet=$2
+ local expected=$3
+ local ost=$(get_osc_import_name $facet $ost_facet)
+ local param="osc.${ost}.ost_server_uuid"
+ local params=$param
+ local i=0
+
+ # 1. wait the deadline of client 1st request (it could be skipped)
+ # 2. wait the deadline of client 2nd request
+ local maxtime=$(( 2 * $(request_timeout $facet)))
+
+ if [[ $facet == client* ]]; then
+ # During setup time, the osc might not be setup, it need wait
+ # until list_param can return valid value. And also if there
+ # are mulitple osc entries we should list all of them before
+ # go to wait.
+ params=$($LCTL list_param $param 2>/dev/null || true)
+ while [ -z "$params" ]; do
+ if [ $i -ge $maxtime ]; then
+ echo "can't get $param in $maxtime secs"
+ return 1
+ fi
+ sleep 1
+ i=$((i + 1))
+ params=$($LCTL list_param $param 2>/dev/null || true)
+ done
+ fi
+ if ! do_rpc_nodes "$(facet_active_host $facet)" \
+ wait_import_state $expected "$params" $maxtime; then
error "import is not in ${expected} state"
return 1
fi
return 0
}
+wait_osc_import_state() {
+ local facet=$1
+ local ost_facet=$2
+ local expected=$3
+ local num
+
+ if [[ $facet = mds ]]; then
+ for num in $(seq $MDSCOUNT); do
+ _wait_osc_import_state mds$num "$ost_facet" "$expected"
+ done
+ else
+ _wait_osc_import_state "$facet" "$ost_facet" "$expected"
+ fi
+}
+
get_clientmdc_proc_path() {
echo "${1}-mdc-*"
}
[ -z "$list" ] && return 0
# Add paths to lustre tests for 32 and 64 bit systems.
- local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
+ local LIBPATH="/usr/lib/lustre/tests:/usr/lib64/lustre/tests:"
+ local TESTPATH="$RLUSTRE/tests:"
+ local RPATH="PATH=${TESTPATH}${LIBPATH}${PATH}:/sbin:/bin:/usr/sbin:"
do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ "
}
local params=$(expand_list $params $proc_path)
done
- if ! do_rpc_nodes "$list" wait_import_state $expected $params; then
+ if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params; then
error "import is not in ${expected} state"
return 1
fi
}
check_write_access() {
- local dir=$1
- local node
- local file
+ local dir=$1
+ local list=${2:-$(comma_list $(nodes_list))}
+ local node
+ local file
- for node in $(nodes_list); do
- file=$dir/check_file.$(short_hostname $node)
- if [[ ! -f "$file" ]]; then
- # Logdir not accessible/writable from this node.
- return 1
- fi
- rm -f $file || return 1
- done
- return 0
+ for node in ${list//,/ }; do
+ file=$dir/check_file.$(short_nodename $node)
+ if [[ ! -f "$file" ]]; then
+ # Logdir not accessible/writable from this node.
+ return 1
+ fi
+ rm -f $file || return 1
+ done
+ return 0
}
init_logging() {
llverfs $partial_arg $llverfs_opts $dir
}
+#Remove objects from OST
+remove_ost_objects() {
+ local facet=$1
+ local ostdev=$2
+ local group=$3
+ shift 3
+ local objids="$@"
+ local mntpt=$(facet_mntpt $facet)
+ local opts=$OST_MOUNT_OPTS
+ local i
+ local rc
+
+ echo "removing objects from $ostdev on $facet: $objids"
+ if ! test -b $ostdev; then
+ opts=$(csa_add "$opts" -o loop)
+ fi
+ mount -t $(facet_fstype $facet) $opts $ostdev $mntpt ||
+ return $?
+ rc=0
+ for i in $objids; do
+ rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; }
+ done
+ umount -f $mntpt || return $?
+ return $rc
+}
+
+#Remove files from MDT
remove_mdt_files() {
local facet=$1
local mdtdev=$2
echo "removing files from $mdtdev on $facet: $files"
if [ $(facet_fstype $facet) == ldiskfs ] &&
- ! do_facet $facet test -b ${!dev}; then
+ ! do_facet $facet test -b $mdtdev; then
opts=$(csa_add "$opts" -o loop)
fi
mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt ||
return $?
- rc=0;
+ rc=0
for f in $files; do
rm $mntpt/ROOT/$f || { rc=$?; break; }
done
echo "duplicating files on $mdtdev on $facet: $files"
mkdir -p $mntpt || return $?
if [ $(facet_fstype $facet) == ldiskfs ] &&
- ! do_facet $facet test -b ${!dev}; then
+ ! do_facet $facet test -b $mdtdev; then
opts=$(csa_add "$opts" -o loop)
fi
mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt ||
$LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1
}
+#
+# Get the available size (KB) of a given obd target.
+#
+get_obd_size() {
+ local facet=$1
+ local obd=$2
+ local size
+
+ [[ $facet != client ]] || return 0
+
+ size=$(do_facet $facet $LCTL get_param -n *.$obd.kbytesavail | head -n1)
+ echo -n $size
+}
+
+#
+# Get the page size (bytes) on a given facet node.
+#
+get_page_size() {
+ local facet=$1
+ local size
+
+ size=$(do_facet $facet getconf PAGE_SIZE)
+ [[ ${PIPESTATUS[0]} = 0 && -n "$size" ]] || size=4096
+ echo -n $size
+}
+
+#
+# Get the block count of the filesystem.
+#
+get_block_count() {
+ local facet=$1
+ local device=$2
+ local count
+
+ count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ awk '/^Block count:/ {print $3}')
+ echo -n $count
+}
+
# Get the block size of the filesystem.
get_block_size() {
local facet=$1
local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
- do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | grep -q large_xattr"
+ do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 |
+ grep -E -q '(ea_inode|large_xattr)'"
return ${PIPESTATUS[0]}
}
}
reformat_external_journal() {
+ local facet=$1
+
if [ ! -z ${EJOURNAL} ]; then
- local rcmd="do_facet ${SINGLEMDS}"
+ local rcmd="do_facet $facet"
- echo "reformat external journal on ${SINGLEMDS}:${EJOURNAL}"
+ echo "reformat external journal on $facet:${EJOURNAL}"
${rcmd} mke2fs -O journal_dev ${EJOURNAL} || return 1
fi
}
# MDT file-level backup/restore
mds_backup_restore() {
- local devname=$(mdsdevname ${SINGLEMDS//mds/})
+ local facet=$1
+ local igif=$2
+ local devname=$(mdsdevname $(facet_number $facet))
local mntpt=$(facet_mntpt brpt)
- local rcmd="do_facet ${SINGLEMDS}"
+ local rcmd="do_facet $facet"
local metaea=${TMP}/backup_restore.ea
local metadata=${TMP}/backup_restore.tgz
local opts=${MDS_MOUNT_OPTS}
- local svc=${SINGLEMDS}_svc
+ local svc=${facet}_svc
if ! ${rcmd} test -b ${devname}; then
opts=$(csa_add "$opts" -o loop)
fi
- echo "file-level backup/restore on ${SINGLEMDS}:${devname}"
+ echo "file-level backup/restore on $facet:${devname}"
# step 1: build mount point
${rcmd} mkdir -p $mntpt
${rcmd} rm -f $metaea $metadata
# step 3: mount dev
${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 1
+ if [ ! -z $igif ]; then
+ # step 3.5: rm .lustre
+ ${rcmd} rm -rf $mntpt/ROOT/.lustre || return 1
+ fi
# step 4: backup metaea
echo "backup EA"
${rcmd} "cd $mntpt && getfattr -R -d -m '.*' -P . > $metaea && cd -" ||
# step 6: umount
${rcmd} umount -d $mntpt || return 4
# step 7: reformat external journal if needed
- reformat_external_journal || return 5
+ reformat_external_journal $facet || return 5
# step 8: reformat dev
echo "reformat new device"
- add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS}) --backfstype ldiskfs \
- --reformat $devname > /dev/null || return 6
+ add $facet $(mkfs_opts $facet ${devname}) --backfstype ldiskfs \
+ --reformat ${devname} $(mdsvdevname $(facet_number $facet)) \
+ > /dev/null || exit 6
# step 9: mount dev
${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7
# step 10: restore metadata
# remove OI files
mds_remove_ois() {
- local devname=$(mdsdevname ${SINGLEMDS//mds/})
+ local facet=$1
+ local idx=$2
+ local devname=$(mdsdevname $(facet_number $facet))
local mntpt=$(facet_mntpt brpt)
- local rcmd="do_facet ${SINGLEMDS}"
- local idx=$1
+ local rcmd="do_facet $facet"
local opts=${MDS_MOUNT_OPTS}
if ! ${rcmd} test -b ${devname}; then
opts=$(csa_add "$opts" -o loop)
fi
- echo "remove OI files: idx=${idx}"
+ echo "removing OI files on $facet: idx=${idx}"
# step 1: build mount point
${rcmd} mkdir -p $mntpt
echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log"
}
+
+# make directory on different MDTs
+test_mkdir() {
+ local option
+ local parent
+ local child
+ local path
+ local rc=0
+
+ case $# in
+ 1) path=$1;;
+ 2) option=$1
+ path=$2;;
+ *) error "Only creating single directory is supported";;
+ esac
+
+ child=$(basename $path)
+ parent=$(dirname $path)
+
+ if [ "$option" == "-p" -a -d $parent/$child ]; then
+ return $rc
+ fi
+
+ if [ ! -d ${parent} ]; then
+ if [ "$option" == "-p" ]; then
+ mkdir -p ${parent}
+ else
+ return 1
+ fi
+ fi
+
+ if [ $MDSCOUNT -le 1 ]; then
+ mkdir $option $parent/$child || rc=$?
+ else
+ local mdt_idx=$($LFS getstripe -M $parent)
+ local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
+
+ if [ "$mdt_idx" -ne 0 ]; then
+ mkdir $option $parent/$child || rc=$?
+ else
+ mdt_idx=$((test_num % MDSCOUNT))
+ echo "mkdir $mdt_idx for $parent/$child"
+ $LFS setdirstripe -i $mdt_idx $parent/$child || rc=$?
+ fi
+ fi
+ return $rc
+}
+
+# find the smallest and not in use file descriptor
+free_fd()
+{
+ local max_fd=$(ulimit -n)
+ local fd=3
+ while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do
+ ((++fd))
+ done
+ [ $fd -lt $max_fd ] || error "finding free file descriptor failed"
+ echo $fd
+}