export SK_S2S=${SK_S2S:-false}
export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
+export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
export IDENTITY_UPCALL=default
export QUOTA_AUTO=1
+export FLAKEY=${FLAKEY:-true}
# specify environment variable containing batch job name for server statistics
export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
. $LUSTRE/tests/functions.sh
. $LUSTRE/tests/yaml.sh
-export LD_LIBRARY_PATH=${LUSTRE}/utils:${LD_LIBRARY_PATH}
+export LD_LIBRARY_PATH=${LUSTRE}/utils/.libs:${LUSTRE}/utils:${LD_LIBRARY_PATH}
LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
}
print_summary () {
- trap 0
- [ -z "$DEFAULT_SUITES"] && return 0
- [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
- local details
- local form="%-13s %-17s %-9s %s %s\n"
- printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
- echo "------------------------------------------------------------------------------------"
+ trap 0
+ [ -z "$DEFAULT_SUITES" ] && return 0
+ [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
+ local details
+ local form="%-13s %-17s %-9s %s %s\n"
+
+ printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
+ echo "---------------------------------------------------------------"
for O in $DEFAULT_SUITES; do
O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
[ "${!O}" = "no" ] && continue || true
fi
}
+# Get information about the Lustre environment. The information collected
+# will be used in Lustre tests.
+# usage: get_lustre_env
+# input: No required or optional arguments
+# output: No return values, environment variables are exported
+
+get_lustre_env() {
+
+ export mds1_FSTYPE=${mds1_FSTYPE:-$(facet_fstype mds1)}
+ export ost1_FSTYPE=${ost1_FSTYPE:-$(facet_fstype ost1)}
+
+ export MGS_VERSION=$(lustre_version_code mgs)
+ export MDS1_VERSION=$(lustre_version_code mds1)
+ export OST1_VERSION=$(lustre_version_code ost1)
+ export CLIENT_VERSION=$(lustre_version_code client)
+
+ # Prefer using "mds1" directly instead of SINGLEMDS.
+ # Keep this for compat until it is removed from scripts.
+ export SINGLEMDS=${SINGLEMDS:-mds1}
+}
+
init_test_env() {
export LUSTRE=$(absolute_path $LUSTRE)
export TESTSUITE=$(basename $0 .sh)
export RPC_MODE=${RPC_MODE:-false}
export DO_CLEANUP=${DO_CLEANUP:-true}
export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
+ export CLEANUP_DM_DEV=false
+ export PAGE_SIZE=$(get_page_size client)
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
[ ! -f "$LCTL" ] && export LCTL=$(which lctl)
export LFS=${LFS:-"$LUSTRE/utils/lfs"}
[ ! -f "$LFS" ] && export LFS=$(which lfs)
- SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"}
- GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"}
+
+ export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
if [ ! -f "$L_GETIDENTITY" ]; then
export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod}
[ ! -f "$LUSTRE_RMMOD" ] &&
export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null)
- export LUSTRE_ROUTES_COVERSION=${LUSTRE_ROUTES_CONVERSION:-$LUSTRE/scripts/lustre_routes_conversion}
+ export LUSTRE_ROUTES_CONVERSION=${LUSTRE_ROUTES_CONVERSION:-$LUSTRE/scripts/lustre_routes_conversion}
[ ! -f "$LUSTRE_ROUTES_CONVERSION" ] &&
export LUSTRE_ROUTES_CONVERSION=$(which lustre_routes_conversion 2> /dev/null)
export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate}
[ ! -f "$LR_READER" ] &&
export LR_READER=$(which lr_reader 2> /dev/null)
[ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader"
+ export LSOM_SYNC=${LSOM_SYNC:-"$LUSTRE/utils/llsom_sync"}
+ [ ! -f "$LSOM_SYNC" ] &&
+ export LSOM_SYNC=$(which llsom_sync 2> /dev/null)
+ [ -z "$LSOM_SYNC" ] && export LSOM_SYNC="/usr/sbin/llsom_sync"
export NAME=${NAME:-local}
export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"}
[ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] &&
export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"}
[ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null)
+ export DMSETUP=${DMSETUP:-dmsetup}
+ export DM_DEV_PATH=${DM_DEV_PATH:-/dev/mapper}
+ export LOSETUP=${LOSETUP:-losetup}
+
if [ "$ACCEPTOR_PORT" ]; then
export PORT_OPT="--port $ACCEPTOR_PORT"
fi
export RLUSTRE=${RLUSTRE:-$LUSTRE}
export RPWD=${RPWD:-$PWD}
export I_MOUNTED=${I_MOUNTED:-"no"}
+ export AUSTER_CLEANUP=${AUSTER_CLEANUP:-false}
if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \
fi
export TF_FAIL=${TF_FAIL:-$TMP/tf.fail}
+
+ # Constants used in more than one test script
+ export LOV_MAX_STRIPE_COUNT=2000
+
+ export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
+ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+ get_lustre_env
+
+ # use localrecov to enable recovery for local clients, LU-12722
+ [[ $MDS1_VERSION -lt $(version_code 2.13.52) ]] ||
+ export MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-"-o localrecov"}
+ [[ $OST1_VERSION -lt $(version_code 2.13.52) ]] ||
+ export OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-"-o localrecov"}
}
check_cpt_number() {
# split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3"
eval set -- $(tr "[:punct:]" " " <<< $*)
- echo -n "$((($1 << 16) | ($2 << 8) | $3))"
+ echo -n "$(((${1:-0} << 16) | (${2:-0} << 8) | ${3:-0}))"
}
export LINUX_VERSION=$(uname -r | sed -e "s/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1/")
# /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used.
#
load_module() {
- local optvar
- EXT=".ko"
- module=$1
- shift
- BASE=$(basename $module $EXT)
-
- module_loaded ${BASE} && return
-
- # If no module arguments were passed, get them from $MODOPTS_<MODULE>,
- # else from modprobe.conf
- if [ $# -eq 0 ]; then
- # $MODOPTS_<MODULE>; we could use associative arrays, but that's not in
- # Bash until 4.x, so we resort to eval.
- optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
- eval set -- \$$optvar
- if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
- # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
- local opt
- opt=$(awk -v var="^options $BASE" '$0 ~ var \
- {gsub("'"options $BASE"'",""); print}' $MODPROBECONF)
- set -- $(echo -n $opt)
-
- # Ensure we have accept=all for lnet
- if [ $(basename $module) = lnet ]; then
- # OK, this is a bit wordy...
- local arg accept_all_present=false
-
- for arg in "$@"; do
- [ "$arg" = accept=all ] && \
- accept_all_present=true
- done
- $accept_all_present || set -- "$@" accept=all
+ local module=$1 # '../libcfs/libcfs/libcfs', 'obdclass/obdclass', ...
+ shift
+ local ext=".ko"
+ local base=$(basename $module $ext)
+ local path
+ local -A module_is_loaded_aa
+ local optvar
+ local mod
+
+ for mod in $(lsmod | awk '{ print $1; }'); do
+ module_is_loaded_aa[${mod//-/_}]=true
+ done
+
+ module_is_loaded() {
+ ${module_is_loaded_aa[${1//-/_}]:-false}
+ }
+
+ if module_is_loaded $base; then
+ return
+ fi
+
+ if [[ -f $LUSTRE/$module$ext ]]; then
+ path=$LUSTRE/$module$ext
+ elif [[ "$base" == lnet_selftest ]] &&
+ [[ -f $LUSTRE/../lnet/selftest/$base$ext ]]; then
+ path=$LUSTRE/../lnet/selftest/$base$ext
+ else
+ path=''
+ fi
+
+ if [[ -n "$path" ]]; then
+ # Try to load any non-Lustre modules that $module depends on.
+ for mod in $(modinfo --field=depends $path | tr ',' ' '); do
+ if ! module_is_loaded $mod; then
+ modprobe $mod
+ fi
+ done
+ fi
+
+ # If no module arguments were passed then get them from
+ # $MODOPTS_<MODULE>, otherwise from modprobe.conf.
+ if [ $# -eq 0 ]; then
+ # $MODOPTS_<MODULE>; we could use associative arrays, but that's
+ # not in Bash until 4.x, so we resort to eval.
+ optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
+ eval set -- \$$optvar
+ if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
+ # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
+ local opt
+ opt=$(awk -v var="^options $base" '$0 ~ var \
+ {gsub("'"options $base"'",""); print}' \
+ $MODPROBECONF)
+ set -- $(echo -n $opt)
+
+ # Ensure we have accept=all for lnet
+ if [[ "$base" == lnet ]]; then
+ # OK, this is a bit wordy...
+ local arg accept_all_present=false
+
+ for arg in "$@"; do
+ [[ "$arg" == accept=all ]] &&
+ accept_all_present=true
+ done
+
+ $accept_all_present || set -- "$@" accept=all
+ fi
+
+ export $optvar="$*"
fi
- export $optvar="$*"
- fi
- fi
+ fi
- [ $# -gt 0 ] && echo "${module} options: '$*'"
+ [ $# -gt 0 ] && echo "${module} options: '$*'"
# Note that insmod will ignore anything in modprobe.conf, which is why
- # we're passing options on the command-line.
- if [[ "$BASE" == "lnet_selftest" ]] &&
- [[ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]]; then
- lustre_insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
- elif [[ -f ${LUSTRE}/${module}${EXT} ]]; then
- [[ "$BASE" != "ptlrpc_gss" ]] || modprobe sunrpc
- lustre_insmod ${LUSTRE}/${module}${EXT} "$@"
- else
- # must be testing a "make install" or "rpm" installation
- # note failed to load ptlrpc_gss is considered not fatal
- if [[ "$BASE" == "ptlrpc_gss" ]]; then
- modprobe $BASE "$@" 2>/dev/null ||
- echo "gss/krb5 is not supported"
- else
- modprobe $BASE "$@"
+ # we're passing options on the command-line. If $path does not exist
+ # then we must be testing a "make install" or"rpm" installation. Also
+ # note that failing to load ptlrpc_gss is not considered fatal.
+ if [[ -n "$path" ]]; then
+ lustre_insmod $path "$@"
+ elif [[ "$base" == ptlrpc_gss ]]; then
+ if ! modprobe $base "$@" 2>/dev/null; then
+ echo "gss/krb5 is not supported"
fi
+ else
+ modprobe $base "$@"
fi
}
return 0
fi
+ # Create special udev test rules on every node
+ if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
+ sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
+ } else {
+ echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
+ } fi
+ udevadm control --reload-rules
+ udevadm trigger
+
+ # For kmemleak-enabled kernels we need clear all past state
+ # that obviously has nothing to do with this Lustre run
+ # Disable automatic memory scanning to avoid perf hit.
+ if [ -f /sys/kernel/debug/kmemleak ] ; then
+ echo scan=off > /sys/kernel/debug/kmemleak
+ echo scan > /sys/kernel/debug/kmemleak
+ echo clear > /sys/kernel/debug/kmemleak
+ fi
+
echo Loading modules from $LUSTRE
local ncpus
load_module mgc/mgc
load_module obdecho/obdecho
if ! client_only; then
- SYMLIST=/proc/kallsyms
- grep -q crc16 $SYMLIST ||
- { modprobe crc16 2>/dev/null || true; }
- grep -q -w jbd2 $SYMLIST ||
- { modprobe jbd2 2>/dev/null || true; }
load_module lfsck/lfsck
[ "$LQUOTA" != "no" ] &&
load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
- lsmod | grep zfs >&/dev/null || modprobe zfs
load_module osd-zfs/osd_zfs
- fi
- if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
- grep -q exportfs_decode_fh $SYMLIST ||
- { modprobe exportfs 2> /dev/null || true; }
- grep -q -w mbcache $SYMLIST ||
- { modprobe mbcache 2>/dev/null || true; }
+ elif [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
load_module ../ldiskfs/ldiskfs
load_module osd-ldiskfs/osd_ldiskfs
fi
$LUSTRE_RMMOD ldiskfs || return 2
+ [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
+ rm /etc/udev/rules.d/99-lustre-test.rules
+ udevadm control --reload-rules
+ udevadm trigger
+
if $LOAD_MODULES_REMOTE; then
local list=$(comma_list $(remote_nodes_list))
if [ -n "$list" ]; then
echo "unloading modules on: '$list'"
do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
do_rpc_nodes "$list" check_mem_leak
+ do_rpc_nodes "$list" "rm -f /etc/udev/rules.d/99-lustre-test.rules"
+ do_rpc_nodes "$list" "udevadm control --reload-rules"
+ do_rpc_nodes "$list" "udevadm trigger"
fi
fi
fs_log_size() {
local facet=${1:-$SINGLEMDS}
- local fstype=$(facet_fstype $facet)
local size=0
- case $fstype in
+
+ case $(facet_fstype $facet) in
ldiskfs) size=50;; # largest seen is 44, leave some headroom
- zfs) size=512;; # largest seen is 512
+ # grant_block_size is in bytes, allow at least 2x max blocksize
+ zfs) size=$(lctl get_param osc.$FSNAME*.import |
+ awk '/grant_block_size:/ {print $2/512; exit;}')
+ ;;
esac
- echo -n $size
+ echo -n $((size * MDSCOUNT))
}
fs_inode_ksize() {
echo -n $mt_opts
}
+from_build_tree() {
+ local from_tree
+
+ case $LUSTRE in
+ /usr/lib/lustre/* | /usr/lib64/lustre/* | /usr/lib/lustre | \
+ /usr/lib64/lustre )
+ from_tree=false
+ ;;
+ *)
+ from_tree=true
+ ;;
+ esac
+
+ [ $from_tree = true ]
+}
+
init_gss() {
if $SHARED_KEY; then
GSS=true
# security ctx config for keyring
SK_NO_KEY=false
- mkdir -p $SK_OM_PATH
- mount -o bind $SK_OM_PATH /etc/request-key.d/
- local lgssc_conf_line='create lgssc * * '
- lgssc_conf_line+=$(which lgss_keyring)
- lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
-
local lgssc_conf_file="/etc/request-key.d/lgssc.conf"
- echo "$lgssc_conf_line" > $lgssc_conf_file
+
+ if from_build_tree; then
+ mkdir -p $SK_OM_PATH
+ if grep -q request-key /proc/mounts > /dev/null; then
+ echo "SSK: Request key already mounted."
+ else
+ mount -o bind $SK_OM_PATH /etc/request-key.d/
+ fi
+ local lgssc_conf_line='create lgssc * * '
+ lgssc_conf_line+=$(which lgss_keyring)
+ lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
+ echo "$lgssc_conf_line" > $lgssc_conf_file
+ fi
+
[ -e $lgssc_conf_file ] ||
error_exit "Could not find key options in $lgssc_conf_file"
+ echo "$lgssc_conf_file content is:"
+ cat $lgssc_conf_file
if ! local_mode; then
- do_nodes $(comma_list $(all_nodes)) "mkdir -p \
- $SK_OM_PATH"
- do_nodes $(comma_list $(all_nodes)) "mount \
- -o bind $SK_OM_PATH \
- /etc/request-key.d/"
- do_nodes $(comma_list $(all_nodes)) "rsync -aqv \
- $HOSTNAME:$lgssc_conf_file \
- $lgssc_conf_file >/dev/null 2>&1"
+ if from_build_tree; then
+ do_nodes $(comma_list $(all_nodes)) "mkdir -p \
+ $SK_OM_PATH"
+ do_nodes $(comma_list $(all_nodes)) "mount \
+ -o bind $SK_OM_PATH \
+ /etc/request-key.d/"
+ do_nodes $(comma_list $(all_nodes)) "rsync \
+ -aqv $HOSTNAME:$lgssc_conf_file \
+ $lgssc_conf_file >/dev/null 2>&1"
+ else
+ do_nodes $(comma_list $(all_nodes)) \
+ "echo $lgssc_conf_file: ; \
+ cat $lgssc_conf_file"
+ fi
fi
# create shared key on all nodes
done
# Distribute keys
if ! local_mode; then
- do_nodes $(comma_list $(all_nodes)) "rsync -av \
- $HOSTNAME:$SK_PATH/ $SK_PATH >/dev/null 2>&1"
+ for lnode in $(all_nodes); do
+ scp -r $SK_PATH ${lnode}:$(dirname $SK_PATH)/
+ done
fi
# Set client keys to client type to generate prime P
if local_mode; then
-m $SK_PATH/$FSNAME-nmclient.key \
>/dev/null 2>&1"
fi
+ fi
+ if $GSS_SK; then
# mount options for servers and clients
MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
OST_MOUNT_OPTS=$(add_sk_mntflag $OST_MOUNT_OPTS)
MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
SEC=$SK_FLAVOR
+ if [ -z "$LGSS_KEYRING_DEBUG" ]; then
+ LGSS_KEYRING_DEBUG=4
+ fi
fi
- if [ -n "$LGSS_KEYRING_DEBUG" ]; then
+ if [ -n "$LGSS_KEYRING_DEBUG" ] && \
+ ( local_mode || from_build_tree ); then
+ lctl set_param -n \
+ sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG
+ elif [ -n "$LGSS_KEYRING_DEBUG" ]; then
+ do_nodes $(comma_list $(all_nodes)) "modprobe ptlrpc_gss && \
lctl set_param -n \
- sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG
+ sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG"
fi
}
$RPC_MODE || echo "Cleaning up Shared Key.."
do_nodes $(comma_list $(all_nodes)) "rm -f \
$SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
- # Remove the mount and clean up the files we added to SK_PATH
- do_nodes $(comma_list $(all_nodes)) "umount \
- /etc/request-key.d/"
- do_nodes $(comma_list $(all_nodes)) "rm -f \
- $SK_OM_PATH/lgssc.conf"
- do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
+ do_nodes $(comma_list $(all_nodes)) "keyctl show | \
+ awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
+ if from_build_tree; then
+ # Remove the mount and clean up the files we added to
+ # SK_PATH
+ do_nodes $(comma_list $(all_nodes)) "while grep -q \
+ request-key.d /proc/mounts; do umount \
+ /etc/request-key.d/; done"
+ do_nodes $(comma_list $(all_nodes)) "rm -f \
+ $SK_OM_PATH/lgssc.conf"
+ do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
+ fi
SK_NO_KEY=true
fi
}
virt=$(dmidecode -s system-product-name | awk '{print $1}')
case $virt in
- VMware|KVM|VirtualBox|Parallels)
+ VMware|KVM|VirtualBox|Parallels|Bochs)
echo $virt | tr '[A-Z]' '[a-z]' ;;
*) ;;
esac
import_zpool() {
local facet=$1
shift
- local opts=${@:-"-o cachefile=none"}
+ local opts=${@:-"-o cachefile=none -o failmode=panic"}
local poolname
poolname=$(zpool_name $facet)
}
set_default_debug () {
- local debug=${1:-"$PTLDEBUG"}
- local subsys=${2:-"$SUBSYSTEM"}
- local debug_size=${3:-$DEBUG_SIZE}
+ local debug=${1:-"$PTLDEBUG"}
+ local subsys=${2:-"$SUBSYSTEM"}
+ local debug_size=${3:-$DEBUG_SIZE}
- [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
- [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
+ [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
+ [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
- [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
+ [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
}
set_default_debug_nodes () {
local nodes="$1"
+ local debug="${2:-"$PTLDEBUG"}"
+ local subsys="${3:-"$SUBSYSTEM"}"
+ local debug_size="${4:-$DEBUG_SIZE}"
if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
set_default_debug
fi
- do_rpc_nodes "$nodes" set_default_debug \
- \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
+ [[ -z "$nodes" ]] ||
+ do_rpc_nodes "$nodes" set_default_debug \
+ \\\"$debug\\\" \\\"$subsys\\\" $debug_size || true
}
set_default_debug_facet () {
- local facet=$1
- local node=$(facet_active_host $facet)
- [ -z "$node" ] && echo "No host defined for facet $facet" && exit 1
+ local facet=$1
+ local debug="${2:-"$PTLDEBUG"}"
+ local subsys="${3:-"$SUBSYSTEM"}"
+ local debug_size="${4:-$DEBUG_SIZE}"
+ local node=$(facet_active_host $facet)
+
+ [ -n "$node" ] || error "No host defined for facet $facet"
- set_default_debug_nodes $node
+ set_default_debug_nodes $node "$debug" "$subsys" $debug_size
}
set_hostid () {
[ $RC -eq 0 ] && continue
if [ "$TESTSUITE.$TESTNAME" = "replay-dual.test_0a" ]; then
- skip "Restart of $facet failed!." && touch $LU482_FAILED
+ skip_noexit "Restart of $facet failed!." &&
+ touch $LU482_FAILED
else
error "Restart of $facet failed!"
fi
echo -n "$opts"
}
+#
+# Associate loop device with a given regular file.
+# Return the loop device.
+#
+setup_loop_device() {
+ local facet=$1
+ local file=$2
+
+ do_facet $facet "loop_dev=\\\$($LOSETUP -j $file | cut -d : -f 1);
+ if [[ -z \\\$loop_dev ]]; then
+ loop_dev=\\\$($LOSETUP -f);
+ $LOSETUP \\\$loop_dev $file || loop_dev=;
+ fi;
+ echo -n \\\$loop_dev"
+}
+
+#
+# Detach a loop device.
+#
+cleanup_loop_device() {
+ local facet=$1
+ local loop_dev=$2
+
+ do_facet $facet "! $LOSETUP $loop_dev >/dev/null 2>&1 ||
+ $LOSETUP -d $loop_dev"
+}
+
+#
+# Check if a given device is a block device.
+#
+is_blkdev() {
+ local facet=$1
+ local dev=$2
+ local size=${3:-""}
+
+ [[ -n "$dev" ]] || return 1
+ do_facet $facet "test -b $dev" || return 1
+ if [[ -n "$size" ]]; then
+ local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
+ count=1 skip=$size 2>&1" |
+ awk '($3 == "in") { print $1 }')
+ [[ "$in" = "1+0" ]] || return 1
+ fi
+}
+
+#
+# Check if a given device is a device-mapper device.
+#
+is_dm_dev() {
+ local facet=$1
+ local dev=$2
+
+ [[ -n "$dev" ]] || return 1
+ do_facet $facet "$DMSETUP status $dev >/dev/null 2>&1"
+}
+
+#
+# Check if a given device is a device-mapper flakey device.
+#
+is_dm_flakey_dev() {
+ local facet=$1
+ local dev=$2
+ local type
+
+ [[ -n "$dev" ]] || return 1
+
+ type=$(do_facet $facet "$DMSETUP status $dev 2>&1" |
+ awk '{print $3}')
+ [[ $type = flakey ]] && return 0 || return 1
+}
+
+#
+# Check if device-mapper flakey device is supported by the kernel
+# of $facet node or not.
+#
+dm_flakey_supported() {
+ local facet=$1
+
+ $FLAKEY || return 1
+ do_facet $facet "modprobe dm-flakey;
+ $DMSETUP targets | grep -q flakey" &> /dev/null
+}
+
+#
+# Get the device-mapper flakey device name of a given facet.
+#
+dm_facet_devname() {
+ local facet=$1
+ [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
+
+ echo -n ${facet}_flakey
+}
+
+#
+# Get the device-mapper flakey device of a given facet.
+# A device created by dmsetup will appear as /dev/mapper/<device-name>.
+#
+dm_facet_devpath() {
+ local facet=$1
+
+ echo -n $DM_DEV_PATH/$(dm_facet_devname $facet)
+}
+
+#
+# Set a device-mapper device with a new table.
+#
+# The table has the following format:
+# <logical_start_sector> <num_sectors> <target_type> <target_args>
+#
+# flakey <target_args> includes:
+# <destination_device> <offset> <up_interval> <down_interval> \
+# [<num_features> [<feature_arguments>]]
+#
+# linear <target_args> includes:
+# <destination_device> <start_sector>
+#
+dm_set_dev_table() {
+ local facet=$1
+ local dm_dev=$2
+ local target_type=$3
+ local num_sectors
+ local real_dev
+ local tmp
+ local table
+
+ read tmp num_sectors tmp real_dev tmp \
+ <<< $(do_facet $facet "$DMSETUP table $dm_dev")
+
+ case $target_type in
+ flakey)
+ table="0 $num_sectors flakey $real_dev 0 0 1800 1 drop_writes"
+ ;;
+ linear)
+ table="0 $num_sectors linear $real_dev 0"
+ ;;
+ *) error "invalid target type $target_type" ;;
+ esac
+
+ do_facet $facet "$DMSETUP suspend --nolockfs --noflush $dm_dev" ||
+ error "failed to suspend $dm_dev"
+ do_facet $facet "$DMSETUP load $dm_dev --table \\\"$table\\\"" ||
+ error "failed to load $target_type table into $dm_dev"
+ do_facet $facet "$DMSETUP resume $dm_dev" ||
+ error "failed to resume $dm_dev"
+}
+
+#
+# Set a device-mapper flakey device as "read-only" by using the "drop_writes"
+# feature parameter.
+#
+# drop_writes:
+# All write I/O is silently ignored.
+# Read I/O is handled correctly.
+#
+dm_set_dev_readonly() {
+ local facet=$1
+ local dm_dev=${2:-$(dm_facet_devpath $facet)}
+
+ dm_set_dev_table $facet $dm_dev flakey
+}
+
+#
+# Set a device-mapper device to traditional linear mapping mode.
+#
+dm_clear_dev_readonly() {
+ local facet=$1
+ local dm_dev=${2:-$(dm_facet_devpath $facet)}
+
+ dm_set_dev_table $facet $dm_dev linear
+}
+
+#
+# Set the device of a given facet as "read-only".
+#
+set_dev_readonly() {
+ local facet=$1
+ local svc=${facet}_svc
+
+ if [[ $(facet_fstype $facet) = zfs ]] ||
+ ! dm_flakey_supported $facet; then
+ do_facet $facet $LCTL --device ${!svc} readonly
+ else
+ dm_set_dev_readonly $facet
+ fi
+}
+
+#
+# Get size in 512-byte sectors (BLKGETSIZE64 / 512) of a given device.
+#
+get_num_sectors() {
+ local facet=$1
+ local dev=$2
+ local num_sectors
+
+ num_sectors=$(do_facet $facet "blockdev --getsz $dev 2>/dev/null")
+ [[ ${PIPESTATUS[0]} = 0 && -n "$num_sectors" ]] || num_sectors=0
+ echo -n $num_sectors
+}
+
+#
+# Create a device-mapper device with a given block device or regular file (will
+# be associated with loop device).
+# Return the full path of the device-mapper device.
+#
+dm_create_dev() {
+ local facet=$1
+ local real_dev=$2 # destination device
+ local dm_dev_name=${3:-$(dm_facet_devname $facet)} # device name
+ local dm_dev=$DM_DEV_PATH/$dm_dev_name # device-mapper device
+
+ # check if the device-mapper device to be created already exists
+ if is_dm_dev $facet $dm_dev; then
+ # if the existing device was set to "read-only", then clear it
+ ! is_dm_flakey_dev $facet $dm_dev ||
+ dm_clear_dev_readonly $facet $dm_dev
+
+ echo -n $dm_dev
+ return 0
+ fi
+
+ # check if the destination device is a block device, and if not,
+ # associate it with a loop device
+ is_blkdev $facet $real_dev ||
+ real_dev=$(setup_loop_device $facet $real_dev)
+ [[ -n "$real_dev" ]] || { echo -n $real_dev; return 2; }
+
+ # now create the device-mapper device
+ local num_sectors=$(get_num_sectors $facet $real_dev)
+ local table="0 $num_sectors linear $real_dev 0"
+ local rc=0
+
+ do_facet $facet "$DMSETUP create $dm_dev_name --table \\\"$table\\\"" ||
+ { rc=${PIPESTATUS[0]}; dm_dev=; }
+ do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
+
+ echo -n $dm_dev
+ return $rc
+}
+
+#
+# Map the facet name to its device variable name.
+#
+facet_device_alias() {
+ local facet=$1
+ local dev_alias=$facet
+
+ case $facet in
+ fs2mds) dev_alias=mds1_2 ;;
+ fs2ost) dev_alias=ost1_2 ;;
+ fs3ost) dev_alias=ost2_2 ;;
+ *) ;;
+ esac
+
+ echo -n $dev_alias
+}
+
+#
+# Save the original value of the facet device and export the new value.
+#
+export_dm_dev() {
+ local facet=$1
+ local dm_dev=$2
+
+ local active_facet=$(facet_active $facet)
+ local dev_alias=$(facet_device_alias $active_facet)
+ local dev_name=${dev_alias}_dev
+ local dev=${!dev_name}
+
+ if [[ $active_facet = $facet ]]; then
+ local failover_dev=${dev_alias}failover_dev
+ if [[ ${!failover_dev} = $dev ]]; then
+ eval export ${failover_dev}_saved=$dev
+ eval export ${failover_dev}=$dm_dev
+ fi
+ else
+ dev_alias=$(facet_device_alias $facet)
+ local facet_dev=${dev_alias}_dev
+ if [[ ${!facet_dev} = $dev ]]; then
+ eval export ${facet_dev}_saved=$dev
+ eval export ${facet_dev}=$dm_dev
+ fi
+ fi
+
+ eval export ${dev_name}_saved=$dev
+ eval export ${dev_name}=$dm_dev
+}
+
+#
+# Restore the saved value of the facet device.
+#
+unexport_dm_dev() {
+ local facet=$1
+
+ [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
+ local dev_alias=$(facet_device_alias $facet)
+
+ local saved_dev=${dev_alias}_dev_saved
+ [[ -z ${!saved_dev} ]] ||
+ eval export ${dev_alias}_dev=${!saved_dev}
+
+ saved_dev=${dev_alias}failover_dev_saved
+ [[ -z ${!saved_dev} ]] ||
+ eval export ${dev_alias}failover_dev=${!saved_dev}
+}
+
+#
+# Remove a device-mapper device.
+# If the destination device is a loop device, then also detach it.
+#
+dm_cleanup_dev() {
+ local facet=$1
+ local dm_dev=${2:-$(dm_facet_devpath $facet)}
+ local major
+ local minor
+
+ is_dm_dev $facet $dm_dev || return 0
+
+ read major minor <<< $(do_facet $facet "$DMSETUP table $dm_dev" |
+ awk '{ print $4 }' | awk -F: '{ print $1" "$2 }')
+
+ do_facet $facet "$DMSETUP remove $dm_dev"
+ do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
+
+ unexport_dm_dev $facet
+
+ # detach a loop device
+ [[ $major -ne 7 ]] || cleanup_loop_device $facet /dev/loop$minor
+
+ # unload dm-flakey module
+ do_facet $facet "modprobe -r dm-flakey" || true
+}
+
mount_facet() {
local facet=$1
shift
- local dev=$(facet_active $facet)_dev
+ local active_facet=$(facet_active $facet)
+ local dev_alias=$(facet_device_alias $active_facet)
+ local dev=${dev_alias}_dev
local opt=${facet}_opt
local mntpt=$(facet_mntpt $facet)
local opts="${!opt} $@"
local fstype=$(facet_fstype $facet)
local devicelabel
+ local dm_dev=${!dev}
module_loaded lustre || load_modules
- if [ $(facet_fstype $facet) == ldiskfs ] &&
- ! do_facet $facet test -b ${!dev}; then
- opts=$(csa_add "$opts" -o loop)
- fi
+ case $fstype in
+ ldiskfs)
+ if dm_flakey_supported $facet; then
+ dm_dev=$(dm_create_dev $facet ${!dev})
+ [[ -n "$dm_dev" ]] || dm_dev=${!dev}
+ fi
- if [[ $(facet_fstype $facet) == zfs ]]; then
+ is_blkdev $facet $dm_dev || opts=$(csa_add "$opts" -o loop)
+
+ devicelabel=$(do_facet ${facet} "$E2LABEL $dm_dev");;
+ zfs)
# import ZFS storage pool
import_zpool $facet || return ${PIPESTATUS[0]}
- fi
- case $fstype in
- ldiskfs)
- devicelabel=$(do_facet ${facet} "$E2LABEL ${!dev}");;
- zfs)
devicelabel=$(do_facet ${facet} "$ZFS get -H -o value \
- lustre:svname ${!dev}");;
+ lustre:svname $dm_dev");;
*)
error "unknown fstype!";;
esac
- echo "Starting ${facet}: $opts ${!dev} $mntpt"
+ echo "Starting ${facet}: $opts $dm_dev $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
RC=2
else
- do_facet ${facet} "mkdir -p $mntpt; $MOUNT_CMD $opts \
- ${!dev} $mntpt"
+ do_facet ${facet} \
+ "mkdir -p $mntpt; $MOUNT_CMD $opts $dm_dev $mntpt"
RC=${PIPESTATUS[0]}
fi
if [ $RC -ne 0 ]; then
- echo "Start of ${!dev} on ${facet} failed ${RC}"
+ echo "Start of $dm_dev on ${facet} failed ${RC}"
return $RC
fi
set_default_debug_facet $facet
- if [[ $facet == mds* ]]; then
- do_facet $facet \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 2>/dev/null
- fi
-
if [[ $opts =~ .*nosvc.* ]]; then
- echo "Start ${!dev} without service"
+ echo "Start $dm_dev without service"
else
case $fstype in
ldiskfs)
- wait_update_facet ${facet} "$E2LABEL ${!dev} \
+ wait_update_facet ${facet} "$E2LABEL $dm_dev \
2>/dev/null | grep -E ':[a-zA-Z]{3}[0-9]{4}'" \
- "" || error "${!dev} failed to initialize!";;
+ "" || error "$dm_dev failed to initialize!";;
zfs)
wait_update_facet ${facet} "$ZFS get -H -o value \
- lustre:svname ${!dev} 2>/dev/null | \
+ lustre:svname $dm_dev 2>/dev/null | \
grep -E ':[a-zA-Z]{3}[0-9]{4}'" "" ||
- error "${!dev} failed to initialize!";;
+ error "$dm_dev failed to initialize!";;
*)
error "unknown fstype!";;
fi
- label=$(devicelabel ${facet} ${!dev})
- [ -z "$label" ] && echo no label for ${!dev} && exit 1
+ label=$(devicelabel ${facet} $dm_dev)
+ [ -z "$label" ] && echo no label for $dm_dev && exit 1
eval export ${facet}_svc=${label}
echo Started ${label}
+ export_dm_dev $facet $dm_dev
+
return $RC
}
shift
local device=$1
shift
- eval export ${facet}_dev=${device}
+ local dev_alias=$(facet_device_alias $facet)
+
+ eval export ${dev_alias}_dev=${device}
eval export ${facet}_opt=\"$@\"
- local varname=${facet}failover_dev
+ local varname=${dev_alias}failover_dev
if [ -n "${!varname}" ] ; then
- eval export ${facet}failover_dev=${!varname}
+ eval export ${dev_alias}failover_dev=${!varname}
else
- eval export ${facet}failover_dev=$device
+ eval export ${dev_alias}failover_dev=$device
fi
local mntpt=$(facet_mntpt $facet)
mount_facet ${facet}
RC=$?
- if [[ $facet == mds* ]]; then
- do_facet $facet \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- fi
-
return $RC
}
stop() {
- local running
- local facet=$1
- shift
- local HOST=`facet_active_host $facet`
- [ -z $HOST ] && echo stop: no host for $facet && return 0
-
- local mntpt=$(facet_mntpt $facet)
- running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
- if [ ${running} -ne 0 ]; then
- echo "Stopping $mntpt (opts:$@) on $HOST"
- do_facet ${facet} $UMOUNT $@ $mntpt
- fi
-
- # umount should block, but we should wait for unrelated obd's
+ local running
+ local facet=$1
+ shift
+ local HOST=$(facet_active_host $facet)
+ [[ -z $HOST ]] && echo stop: no host for $facet && return 0
+
+ local mntpt=$(facet_mntpt $facet)
+ running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts || true")
+ if [ ${running} -ne 0 ]; then
+ echo "Stopping $mntpt (opts:$@) on $HOST"
+ do_facet ${facet} $UMOUNT $@ $mntpt
+ fi
+
+ # umount should block, but we should wait for unrelated obd's
# like the MGS or MGC to also stop.
wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
if [[ $(facet_fstype $facet) == zfs ]]; then
# export ZFS storage pool
[ "$KEEP_ZPOOL" = "true" ] || export_zpool $facet
+ elif dm_flakey_supported $facet; then
+ local host=${facet}_HOST
+ local failover_host=${facet}failover_HOST
+ if [[ -n ${!failover_host} && ${!failover_host} != ${!host} ]]||
+ $CLEANUP_DM_DEV || [[ $facet = fs* ]]; then
+ dm_cleanup_dev $facet
+ fi
fi
}
-# save quota version (both administrative and operational quotas)
-# add an additional parameter if mountpoint is ever different from $MOUNT
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-quota_save_version() {
- local fsname=${2:-$FSNAME}
- local spec=$1
- local ver=$(tr -c -d "123" <<< $spec)
- local type=$(tr -c -d "ug" <<< $spec)
-
- [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
-
- [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
-
- do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec"
- local varsvc
- local osts=$(get_facets OST)
- for ost in ${osts//,/ }; do
- varsvc=${ost}_svc
- do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
- done
-}
-
-# client could mount several lustre
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-quota_type() {
- local fsname=${1:-$FSNAME}
- local rc=0
- do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type ||
- rc=$?
- do_nodes $(comma_list $(osts_nodes)) \
- lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
- return $rc
-}
-
# get mdt quota type
mdt_quota_type() {
local varsvc=${SINGLEMDS}_svc
# restore old quota type settings
restore_quota() {
if [ "$old_MDT_QUOTA_TYPE" ]; then
- do_facet mgs $LCTL conf_param \
- $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs $PERM_CMD \
+ osd-*.$FSNAME-MDT*.quota_slave.enable = \
+ $old_MDT_QUOTA_TYPE
+ else
+ do_facet mgs $PERM_CMD \
+ $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+ fi
fi
if [ "$old_OST_QUOTA_TYPE" ]; then
- do_facet mgs $LCTL conf_param \
- $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs $PERM_CMD \
+ osd-*.$FSNAME-OST*.quota_slave.enable = \
+ $old_OST_QUOTA_TYPE
+ else
+ do_facet mgs $LCTL conf_param \
+ $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+ fi
fi
}
export old_MDT_QUOTA_TYPE=$mdt_qtype
export old_OST_QUOTA_TYPE=$ost_qtype
- do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE ||
- error "set mdt quota type failed"
- do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE ||
- error "set ost quota type failed"
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ do_facet mgs $PERM_CMD \
+ osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE
+ do_facet mgs $PERM_CMD \
+ osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE
+ else
+ do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE ||
+ error "set mdt quota type failed"
+ do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
+ error "set ost quota type failed"
+ fi
local quota_usrs=$QUOTA_USERS
exit 1
fi
+ if $GSS_SK; then
+ # update mount option with skpath
+ opts=$(add_sk_mntflag $opts)
+ fi
+
echo "Starting client: $client: $flags $opts $device $mnt"
do_node $client mkdir -p $mnt
if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
}
zconf_umount() {
- local client=$1
- local mnt=$2
- local force
- local busy
- local need_kill
-
- [ "$3" ] && force=-f
- local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
- if [ $running -ne 0 ]; then
- echo "Stopping client $client $mnt (opts:$force)"
- do_node $client lsof -t $mnt || need_kill=no
- if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then
- pids=$(do_node $client lsof -t $mnt | sort -u);
- if [ -n $pids ]; then
- do_node $client kill -9 $pids || true
- fi
- fi
+ local client=$1
+ local mnt=$2
+ local force
+ local busy
+ local need_kill
+ local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
+
+ [ "$3" ] && force=-f
+ [ $running -eq 0 ] && return 0
+
+ echo "Stopping client $client $mnt (opts:$force)"
+ do_node $client lsof -t $mnt || need_kill=no
+ if [ "x$force" != "x" ] && [ "x$need_kill" != "xno" ]; then
+ pids=$(do_node $client lsof -t $mnt | sort -u);
+ if [ -n "$pids" ]; then
+ do_node $client kill -9 $pids || true
+ fi
+ fi
- busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true
- if [ $busy -ne 0 ] ; then
- echo "$mnt is still busy, wait one second" && sleep 1
- do_node $client umount $force $mnt
- fi
- fi
+ busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") ||
+ true
+ if [ $busy -ne 0 ] ; then
+ echo "$mnt is still busy, wait one second" && sleep 1
+ do_node $client umount $force $mnt
+ fi
}
-# Mount the file system on the MGS
-mount_mgs_client() {
- do_facet mgs "mkdir -p $MOUNT"
- zconf_mount $mgs_HOST $MOUNT $MOUNT_OPTS ||
- error "unable to mount $MOUNT on MGS"
+# Mount the file system on the MDS
+mount_mds_client() {
+ local mds_HOST=${SINGLEMDS}_HOST
+ echo $mds_HOST
+ do_facet $SINGLEMDS "mkdir -p $MOUNT2"
+ zconf_mount $mds1_HOST $MOUNT2 $MOUNT_OPTS ||
+ error "unable to mount $MOUNT2 on MDS"
}
-# Unmount the file system on the MGS
-umount_mgs_client() {
- zconf_umount $mgs_HOST $MOUNT
- do_facet mgs "rm -rf $MOUNT"
+# Unmount the file system on the MDS
+umount_mds_client() {
+ local mds_HOST=${SINGLEMDS}_HOST
+ zconf_umount $mds1_HOST $MOUNT2
+ do_facet $SINGLEMDS "rm -rf $MOUNT2"
}
# nodes is comma list
fi
echo "Starting client $clients: $flags $opts $device $mnt"
- if [ -n "$FILESET" -a ! -n "$SKIP_FILESET" ]; then
+ do_nodes $clients mkdir -p $mnt
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
# Mount with own nodemap key
local i=0
}
shutdown_facet() {
- local facet=$1
+ local facet=$1
+ local affected_facet
+ local affected_facets
+
+ if [[ "$FAILURE_MODE" = HARD ]]; then
+ if [[ $(facet_fstype $facet) = ldiskfs ]] &&
+ dm_flakey_supported $facet; then
+ affected_facets=$(affected_facets $facet)
+ for affected_facet in ${affected_facets//,/ }; do
+ unexport_dm_dev $affected_facet
+ done
+ fi
- if [ "$FAILURE_MODE" = HARD ]; then
- shutdown_node_hard $(facet_active_host $facet)
- else
- stop $facet
- fi
+ shutdown_node_hard $(facet_active_host $facet)
+ else
+ stop $facet
+ fi
}
reboot_node() {
LFS=$LFS \
LCTL=$LCTL \
FSNAME=$FSNAME \
+ MPIRUN=$MPIRUN \
+ MPIRUN_OPTIONS=\\\"$MPIRUN_OPTIONS\\\" \
+ MACHINEFILE_OPTION=\\\"$MACHINEFILE_OPTION\\\" \
+ num_clients=$(get_node_count ${CLIENTS//,/ }) \
+ ior_THREADS=$ior_THREADS ior_iteration=$ior_iteration \
+ ior_blockSize=$ior_blockSize \
+ ior_blockUnit=$ior_blockUnit \
+ ior_xferSize=$ior_xferSize ior_type=$ior_type \
+ ior_DURATION=$ior_DURATION \
+ ior_stripe_params=\\\"$ior_stripe_params\\\" \
+ ior_custom_params=\\\"$ior_custom_param\\\" \
+ mpi_ior_custom_threads=$mpi_ior_custom_threads \
run_${load}.sh" &
local ppid=$!
log "Started client load: ${load} on $client"
}
start_client_loads () {
- local -a clients=(${1//,/ })
- local numloads=${#CLIENT_LOADS[@]}
- local testnum
+ local -a clients=(${1//,/ })
+ local numloads=${#CLIENT_LOADS[@]}
- for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
- testnum=$((nodenum % numloads))
- start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]}
- done
- # bug 22169: wait the background threads to start
- sleep 2
+ for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
+ local load=$((nodenum % numloads))
+ start_client_load ${clients[nodenum]} ${CLIENT_LOADS[load]}
+ done
+ # bug 22169: wait the background threads to start
+ sleep 2
}
# only for remote client
}
wait_zfs_commit() {
+ local zfs_wait=${2:-5}
+
# the occupied disk space will be released
- # only after DMUs are committed
+ # only after TXGs are committed
if [[ $(facet_fstype $1) == zfs ]]; then
- echo "sleep $2 for ZFS OSD"
- sleep $2
+ echo "sleep $zfs_wait for ZFS $(facet_fstype $1)"
+ sleep $zfs_wait
+ fi
+}
+
+fill_ost() {
+ local filename=$1
+ local ost_idx=$2
+ local lwm=$3 #low watermark
+ local size_mb #how many MB should we write to pass watermark
+ local ost_name=$(ostname_from_index $ost_idx)
+
+ free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+ size_mb=0
+ if (( $free_kb / 1024 > lwm )); then
+ size_mb=$((free_kb / 1024 - lwm))
+ fi
+ #If 10% of free space cross low watermark use it
+ if (( $free_kb / 10240 > size_mb )); then
+ size_mb=$((free_kb / 10240))
+ else
+ #At least we need to store 1.1 of difference between
+ #free space and low watermark
+ size_mb=$((size_mb + size_mb / 10))
+ fi
+ if (( lwm <= $free_kb / 1024 )) ||
+ [ ! -f $DIR/${filename}.fill_ost$ost_idx ]; then
+ $LFS setstripe -i $ost_idx -c1 $DIR/${filename}.fill_ost$ost_idx
+ dd if=/dev/zero of=$DIR/${filename}.fill_ost$ost_idx bs=1M \
+ count=$size_mb oflag=append conv=notrunc
+ fi
+
+ sleep_maxage
+
+ free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+ echo "OST still has $((free_kb / 1024)) MB free"
+}
+
+# This checks only the primary MDS
+ost_watermarks_get() {
+ local ost_idx=$1
+ local ost_name=$(ostname_from_index $ost_idx)
+ local mdtosc_proc=$(get_mdtosc_proc_path $SINGLEMDS $ost_name)
+
+ local hwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ osp.$mdtosc_proc.reserved_mb_high)
+ local lwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
+ osp.$mdtosc_proc.reserved_mb_low)
+
+ echo "$lwm $hwm"
+}
+
+# Note that we set watermarks on all MDSes (necessary for striped dirs)
+ost_watermarks_set() {
+ local ost_idx=$1
+ local lwm=$2
+ local hwm=$3
+ local ost_name=$(ostname_from_index $ost_idx)
+ local facets=$(get_facets MDS)
+
+ do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
+ osp.*$ost_name*.reserved_mb_low=$lwm \
+ osp.*$ost_name*.reserved_mb_high=$hwm > /dev/null
+
+ # sleep to ensure we see the change
+ sleep_maxage
+}
+
+ost_watermarks_set_low_space() {
+ local ost_idx=$1
+ local wms=$(ost_watermarks_get $ost_idx)
+ local ost_name=$(ostname_from_index $ost_idx)
+
+ local old_lwm=$(echo $wms | awk '{ print $1 }')
+ local old_hwm=$(echo $wms | awk '{ print $2 }')
+
+ local blocks=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+ # minimal extension size is 64M
+ local new_lwm=50
+ if (( $blocks / 1024 > 50 )); then
+ new_lwm=$((blocks / 1024 - 50))
fi
+ local new_hwm=$((new_lwm + 5))
+
+ ost_watermarks_set $ost_idx $new_lwm $new_hwm
+ echo "watermarks: $old_lwm $old_hwm $new_lwm $new_hwm"
+}
+
+# Set watermarks to ~current available space & then write data to fill it
+# Note OST is not *actually* full after this, it just reports ENOSPC in the
+# internal statfs used by the stripe allocator
+#
+# first parameter is the filename-prefix, which must get under t-f cleanup
+# requirements (rm -rf $DIR/[Rdfs][0-9]*), i.e. $tfile work fine
+ost_watermarks_set_enospc() {
+ local filename=$1
+ local ost_idx=$2
+ # on the mdt's osc
+ local ost_name=$(ostname_from_index $ost_idx)
+ local facets=$(get_facets MDS)
+ local wms
+ local MDS
+
+ for MDS in ${facets//,/ }; do
+ local mdtosc_proc=$(get_mdtosc_proc_path $MDS $ost_name)
+
+ do_facet $MDS $LCTL get_param -n \
+ osp.$mdtosc_proc.reserved_mb_high ||
+ skip "remote MDS does not support reserved_mb_high"
+ done
+
+ wms=$(ost_watermarks_set_low_space $ost_idx)
+ local new_lwm=$(echo $wms | awk '{ print $4 }')
+ fill_ost $filename $ost_idx $new_lwm
+ #First enospc could execute orphan deletion so repeat
+ fill_ost $filename $ost_idx $new_lwm
+ echo $wms
+}
+
+ost_watermarks_enospc_delete_files() {
+ local filename=$1
+ local ost_idx=$2
+
+ rm -f $DIR/${filename}.fill_ost$ost_idx
+
+ wait_delete_completed
+ wait_mds_ost_sync
+}
+
+# clean up from "ost_watermarks_set_enospc"
+ost_watermarks_clear_enospc() {
+ local filename=$1
+ local ost_idx=$2
+ local old_lwm=$4
+ local old_hwm=$5
+
+ ost_watermarks_enospc_delete_files $filename $ost_idx
+ ost_watermarks_set $ost_idx $old_lwm $old_hwm
+ echo "set OST$ost_idx lwm back to $old_lwm, hwm back to $old_hwm"
}
wait_delete_completed_mds() {
- local MAX_WAIT=${1:-20}
- # for ZFS, waiting more time for DMUs to be committed
- local ZFS_WAIT=${2:-5}
+ local max_wait=${1:-20}
local mds2sync=""
local stime=$(date +%s)
local etime
mds2sync="$mds2sync $node"
done
if [ -z "$mds2sync" ]; then
- wait_zfs_commit $SINGLEMDS $ZFS_WAIT
- return
+ wait_zfs_commit $SINGLEMDS
+ return 0
fi
mds2sync=$(comma_list $mds2sync)
# do this upon commit
local WAIT=0
- while [[ $WAIT -ne $MAX_WAIT ]]; do
+ while [[ $WAIT -ne $max_wait ]]; do
changes=$(do_nodes $mds2sync \
"$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum)
#echo "$node: $changes changes on all"
if [[ $changes -eq 0 ]]; then
- wait_zfs_commit $SINGLEMDS $ZFS_WAIT
- return
+ wait_zfs_commit $SINGLEMDS
+
+ # the occupied disk space will be released
+ # only after TXGs are committed
+ wait_zfs_commit ost1
+ return 0
fi
sleep 1
- WAIT=$(( WAIT + 1))
+ WAIT=$((WAIT + 1))
done
etime=$(date +%s)
echo "Delete is not completed in $((etime - stime)) seconds"
do_nodes $mds2sync "$LCTL get_param osc.*MDT*.sync_*"
+ return 1
}
wait_for_host() {
done
# show which nodes are not finished.
+ cmd=$(echo $cmd | sed 's/-n//')
do_nodes $list "$cmd"
echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS"
return 1
wait_delete_completed() {
wait_delete_completed_mds $1 || return $?
- wait_destroy_complete
+ wait_destroy_complete || return $?
}
wait_exit_ST () {
local running
# conf-sanity 31 takes a long time cleanup
while [ $WAIT -lt 300 ]; do
- running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null && lctl dl | grep ' ST '") || true
+ running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null &&
+lctl dl | grep ' ST ' || true")
[ -z "${running}" ] && return 0
echo "waited $WAIT for${running}"
[ $INTERVAL -lt 64 ] && INTERVAL=$((INTERVAL + INTERVAL))
change_active ${affecteds[index]}
wait_for_facet ${affecteds[index]}
+ if $GSS_SK; then
+ init_gss
+ init_facets_vars_simple
+ fi
# start mgs first if it is affected
if ! combined_mgs_mds &&
list_member ${affecteds[index]} mgs; then
affected=$(exclude_items_from_list ${affecteds[index]} mgs)
echo mount facets: ${affecteds[index]}
mount_facets ${affecteds[index]}
+ if $GSS_SK; then
+ do_nodes $(comma_list $(all_nodes)) \
+ "keyctl show | grep lustre | cut -c1-11 |
+ sed -e 's/ //g;' |
+ xargs -IX keyctl setperm X 0x3f3f3f3f"
+ fi
done
}
# handled by stop() and mount_facet() separately, which are used
# inside fail() and fail_abort().
#
- do_facet $facet $LCTL --device ${!svc} readonly
+ set_dev_readonly $facet
do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
$LCTL mark "local REPLAY BARRIER on ${!svc}"
}
local svc=${facet}_svc
echo Replay barrier on ${!svc}
do_facet $facet $LCTL --device ${!svc} notransno
- do_facet $facet $LCTL --device ${!svc} readonly
+ set_dev_readonly $facet
do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
$LCTL mark "local REPLAY BARRIER on ${!svc}"
}
local svc=${facet}_svc
echo Replay barrier on ${!svc}
do_facet $facet $LCTL --device ${!svc} notransno
- do_facet $facet $LCTL --device ${!svc} readonly
+ set_dev_readonly $facet
do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
$LCTL mark "local REPLAY BARRIER on ${!svc}"
}
local facets=$1
local clients=${CLIENTS:-$HOSTNAME}
+ SK_NO_KEY_save=$SK_NO_KEY
+ if $GSS_SK; then
+ export SK_NO_KEY=false
+ fi
facet_failover $* || error "failover: $?"
- wait_clients_import_state "$clients" "$facets" FULL
+ export SK_NO_KEY=$SK_NO_KEY_save
+ # to initiate all OSC idling connections
+ clients_up
+ wait_clients_import_state "$clients" "$facets" "\(FULL\|IDLE\)"
clients_up || error "post-failover stat: $?"
}
$myPDSH $HOST "$LCTL mark \"$@\"" > /dev/null 2>&1 || :
fi
- if [ "$myPDSH" = "rsh" ]; then
-# we need this because rsh does not return exit code of an executed command
- local command_status="$TMP/cs"
- rsh $HOST ":> $command_status"
- rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin;
- cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\") ||
- echo command failed >$command_status"
- [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
- return 0
- fi
+ if [[ "$myPDSH" == "rsh" ]] ||
+ [[ "$myPDSH" == *pdsh* && "$myPDSH" != *-S* ]]; then
+ # we need this because rsh and pdsh do not return
+ # exit code of an executed command
+ local command_status="$TMP/cs"
+ eval $myPDSH $HOST ":> $command_status"
+ eval $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests;
+ PATH=\$PATH:/sbin:/usr/sbin;
+ cd $RPWD;
+ LUSTRE=\"$RLUSTRE\" sh -c \"$@\") ||
+ echo command failed >$command_status"
+ [[ -n "$($myPDSH $HOST cat $command_status)" ]] && return 1 ||
+ return 0
+ fi
if $verbose ; then
# print HOSTNAME for myPDSH="no_dsh"
case $fstype in
ldiskfs )
+ local dev=ost${num}_dev
+ [[ -n ${!dev} ]] && eval DEVPTR=${!dev} ||
#if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
zfs )
case $fstype in
ldiskfs )
+ local dev=mds${num}_dev
+ [[ -n ${!dev} ]] && eval DEVPTR=${!dev} ||
#if $MDSDEVn isn't defined, default is $MDSDEVBASE{n}
eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
zfs )
case $fstype in
ldiskfs )
if [ $(facet_host mgs) = $(facet_host mds1) ] &&
- ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsdevname 1) ] ); then
+ ( [ -z "$MGSDEV" ] || [ $MGSDEV = $MDSDEV1 ] ); then
DEVPTR=$(mdsdevname 1)
else
+ [[ -n $mgs_dev ]] && DEVPTR=$mgs_dev ||
DEVPTR=$MGSDEV
fi;;
zfs )
local dev=$(facet_device $facet)
local mnt=${2:-$(facet_mntpt $facet)}
local opts
+ local dm_dev=$dev
- if ! do_facet $facet test -b $dev; then
- opts="-o loop"
+ if dm_flakey_supported $facet; then
+ dm_dev=$(dm_create_dev $facet $dev)
+ [[ -n "$dm_dev" ]] || dm_dev=$dev
fi
- do_facet $facet mount -t ldiskfs $opts $dev $mnt
+ is_blkdev $facet $dm_dev || opts=$(csa_add "$opts" -o loop)
+ export_dm_dev $facet $dm_dev
+
+ do_facet $facet mount -t ldiskfs $opts $dm_dev $mnt
}
unmount_ldiskfs() {
## MountConf setup
stopall() {
- # make sure we are using the primary server, so test-framework will
- # be able to clean up properly.
- activemds=`facet_active mds1`
- if [ $activemds != "mds1" ]; then
- fail mds1
- fi
+ # make sure we are using the primary server, so test-framework will
+ # be able to clean up properly.
+ activemds=`facet_active mds1`
+ if [ $activemds != "mds1" ]; then
+ fail mds1
+ fi
- local clients=$CLIENTS
- [ -z $clients ] && clients=$(hostname)
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
- zconf_umount_clients $clients $MOUNT "$*" || true
- [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
+ zconf_umount_clients $clients $MOUNT "$*" || true
+ [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
- [ -n "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
- # The add fn does rm ${facet}active file, this would be enough
- # if we use do_facet <facet> only after the facet added, but
- # currently we use do_facet mds in local.sh
- for num in `seq $MDSCOUNT`; do
- stop mds$num -f
- rm -f ${TMP}/mds${num}active
- done
- combined_mgs_mds && rm -f $TMP/mgsactive
+ # The add fn does rm ${facet}active file, this would be enough
+ # if we use do_facet <facet> only after the facet added, but
+ # currently we use do_facet mds in local.sh
+ for num in `seq $MDSCOUNT`; do
+ stop mds$num -f
+ rm -f ${TMP}/mds${num}active
+ done
+ combined_mgs_mds && rm -f $TMP/mgsactive
- for num in `seq $OSTCOUNT`; do
- stop ost$num -f
- rm -f $TMP/ost${num}active
- done
+ for num in `seq $OSTCOUNT`; do
+ stop ost$num -f
+ rm -f $TMP/ost${num}active
+ done
- if ! combined_mgs_mds ; then
- stop mgs
- fi
+ if ! combined_mgs_mds ; then
+ stop mgs
+ fi
- return 0
+ if $SHARED_KEY; then
+ export SK_MOUNTED=false
+ fi
+
+ return 0
}
cleanup_echo_devs () {
nfs_client_mode && return
cifs_client_mode && return
- stopall $*
cleanup_echo_devs
+ CLEANUP_DM_DEV=true stopall $*
unload_modules
cleanup_sk
echo -n "$1" | tr '[:lower:]' '[:upper:]'
}
+squash_opt() {
+ local var="$*"
+ local other=""
+ local opt_o=""
+ local opt_e=""
+ local first_e=0
+ local first_o=0
+ local take=""
+
+ var=$(echo "$var" | sed -e 's/,\( \)*/,/g')
+ for i in $(echo "$var"); do
+ if [ "$i" == "-O" ]; then
+ take="o";
+ first_o=$(($first_o + 1))
+ continue;
+ fi
+ if [ "$i" == "-E" ]; then
+ take="e";
+ first_e=$(($first_e + 1 ))
+ continue;
+ fi
+ case $take in
+ "o")
+ [ $first_o -gt 1 ] && opt_o+=",";
+ opt_o+="$i";
+ ;;
+ "e")
+ [ $first_e -gt 1 ] && opt_e+=",";
+ opt_e+="$i";
+ ;;
+ *)
+ other+=" $i";
+ ;;
+ esac
+ take=""
+ done
+
+ echo -n "$other"
+ [ -n "$opt_o" ] && echo " -O $opt_o"
+ [ -n "$opt_e" ] && echo " -E $opt_e"
+}
+
mkfs_opts() {
local facet=$1
local dev=$2
if [ $type == MDS ]; then
opts+=${MDSCAPA:+" --param-mdt.capa=$MDSCAPA"}
- opts+=${STRIPE_BYTES:+" --param=lov.stripesize=$STRIPE_BYTES"}
- opts+=${STRIPES_PER_OBJ:+" --param=lov.stripecount=$STRIPES_PER_OBJ"}
+ opts+=${DEF_STRIPE_SIZE:+" --param=lov.stripesize=$DEF_STRIPE_SIZE"}
+ opts+=${DEF_STRIPE_COUNT:+" --param=lov.stripecount=$DEF_STRIPE_COUNT"}
opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
if [ $fstype == ldiskfs ]; then
- # Check for wide striping
- if [ $OSTCOUNT -gt 160 ]; then
- MDSJOURNALSIZE=${MDSJOURNALSIZE:-4096}
- fs_mkfs_opts+="-O large_xattr"
- fi
+ fs_mkfs_opts+="-O ea_inode,large_dir"
var=${facet}_JRN
if [ -n "${!var}" ]; then
var=${type}_FS_MKFS_OPTS
fs_mkfs_opts+=${!var:+" ${!var}"}
+ [ $fstype == ldiskfs ] && fs_mkfs_opts=$(squash_opt $fs_mkfs_opts)
+
if [ -n "${fs_mkfs_opts## }" ]; then
opts+=" --mkfsoptions=\\\"${fs_mkfs_opts## }\\\""
fi
[[ ! "$device" =~ ^/dev/ ]] || [[ "$device" =~ ^/dev/shm/ ]] ||
error "$facet: device '$device' does not exist"
+ # zpool create doesn't like empty files
+ [[ $(facet_fstype $facet) == zfs ]] && return 0
+
do_facet $facet "touch \"${device}\""
}
# (Assumes MDS version is also OSS version)
if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ];
then
- do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+ do_rpc_nodes "$(comma_list $(all_server_nodes))" set_hostid
fi
# We need ldiskfs here, may as well load them all
}
mountmds() {
+ local num
+ local devname
+ local host
+ local varname
for num in $(seq $MDSCOUNT); do
- DEVNAME=$(mdsdevname $num)
- start mds$num $DEVNAME $MDS_MOUNT_OPTS
-
- # We started mds, now we should set failover variables properly.
- # Set mds${num}failover_HOST if unset (the default
- # failnode).
- local varname=mds${num}failover_HOST
- if [ -z "${!varname}" ]; then
- eval mds${num}failover_HOST=$(facet_host mds$num)
- fi
-
+ devname=$(mdsdevname $num)
+ start mds$num $devname $MDS_MOUNT_OPTS
+
+ # We started mds$num, now we should set mds${num}_HOST
+ # and mds${num}failover_HOST variables properly if they
+ # are not set.
+ host=$(facet_host mds$num)
+ for varname in mds${num}_HOST mds${num}failover_HOST; do
+ if [[ -z "${!varname}" ]]; then
+ eval $varname=$host
+ fi
+ done
if [ $IDENTITY_UPCALL != "default" ]; then
switch_identity $num $IDENTITY_UPCALL
fi
}
mountoss() {
+ local num
+ local devname
+ local host
+ local varname
for num in $(seq $OSTCOUNT); do
- DEVNAME=$(ostdevname $num)
- start ost$num $DEVNAME $OST_MOUNT_OPTS
-
- # We started ost$num, now we should set ost${num}failover
- # variable properly. Set ost${num}failover_HOST if it is not
- # set (the default failnode).
- varname=ost${num}failover_HOST
- if [ -z "${!varname}" ]; then
- eval ost${num}failover_HOST=$(facet_host ost${num})
- fi
-
+ devname=$(ostdevname $num)
+ start ost$num $devname $OST_MOUNT_OPTS
+
+ # We started ost$num, now we should set ost${num}_HOST
+ # and ost${num}failover_HOST variables properly if they
+ # are not set.
+ host=$(facet_host ost$num)
+ for varname in ost${num}_HOST ost${num}failover_HOST; do
+ if [[ -z "${!varname}" ]]; then
+ eval $varname=$host
+ fi
+ done
done
}
if $GSS_SK; then
set_rule $FSNAME any cli2mdt $SK_FLAVOR
set_rule $FSNAME any cli2ost $SK_FLAVOR
- wait_flavor cli2mdt $SK_FLAVOR
- wait_flavor cli2ost $SK_FLAVOR
+ if $SK_SKIPFIRST; then
+ export SK_SKIPFIRST=false
+
+ sleep 30
+ do_nodes $CLIENTS \
+ "lctl set_param osc.*.idle_connect=1"
+ return
+ else
+ wait_flavor cli2mdt $SK_FLAVOR
+ wait_flavor cli2ost $SK_FLAVOR
+ fi
else
set_flavor_all $SEC
fi
fi
}
+init_facets_vars_simple () {
+ local devname
+
+ if ! remote_mds_nodsh; then
+ for num in $(seq $MDSCOUNT); do
+ devname=$(mdsdevname $num)
+ eval export mds${num}_dev=${devname}
+ eval export mds${num}_opt=\"${MDS_MOUNT_OPTS}\"
+ done
+ fi
+
+ if ! combined_mgs_mds ; then
+ eval export mgs_dev=$(mgsdevname)
+ eval export mgs_opt=\"${MGS_MOUNT_OPTS}\"
+ fi
+
+ if ! remote_ost_nodsh; then
+ for num in $(seq $OSTCOUNT); do
+ devname=$(ostdevname $num)
+ eval export ost${num}_dev=${devname}
+ eval export ost${num}_opt=\"${OST_MOUNT_OPTS}\"
+ done
+ fi
+}
+
osc_ensure_active () {
local facet=$1
local timeout=$2
error "check $PARAM failed!"
}
+set_persistent_param() {
+ local myfacet=$1
+ local test_param=$2
+ local param=$3
+ local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+ if [ $# -gt 3 ]; then
+ local final=$4
+ else
+ local -i final
+ final=$((orig + 5))
+ fi
+
+ if [[ $PERM_CMD == *"set_param -P"* ]]; then
+ echo "Setting $test_param from $orig to $final"
+ do_facet mgs "$PERM_CMD $test_param='$final'" ||
+ error "$PERM_CMD $test_param failed"
+ else
+ echo "Setting $param from $orig to $final"
+ do_facet mgs "$PERM_CMD $param='$final'" ||
+ error "$PERM_CMD $param failed"
+ fi
+}
+
+set_persistent_param_and_check() {
+ local myfacet=$1
+ local test_param=$2
+ local param=$3
+ local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+ if [ $# -gt 3 ]; then
+ local final=$4
+ else
+ local -i final
+ final=$((orig + 5))
+ fi
+
+ set_persistent_param $myfacet $test_param $param "$final"
+
+ wait_update_facet $myfacet "$LCTL get_param -n $test_param" "$final" ||
+ error "check $param failed!"
+}
+
init_param_vars () {
TIMEOUT=$(lctl get_param -n timeout)
TIMEOUT=${TIMEOUT:-20}
+ if [ -n "$arg1" ]; then
+ [ "$arg1" = "server_only" ] && return
+ fi
+
remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
osc_ensure_active $SINGLEMDS $TIMEOUT
osc_ensure_active client $TIMEOUT
+ $LCTL set_param osc.*.idle_timeout=debug
if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then
local current_jobid_var=$($LCTL get_param -n jobid_var)
if [ $JOBID_VAR = "existing" ]; then
echo "keeping jobstats as $current_jobid_var"
elif [ $current_jobid_var != $JOBID_VAR ]; then
- echo "seting jobstats to $JOBID_VAR"
+ echo "setting jobstats to $JOBID_VAR"
- set_conf_param_and_check client \
- "$LCTL get_param -n jobid_var" \
- "$FSNAME.sys.jobid_var" $JOBID_VAR
+ set_persistent_param_and_check client \
+ "jobid_var" "$FSNAME.sys.jobid_var" $JOBID_VAR
fi
else
echo "jobstats not supported by server"
# $LFS quotaoff -ug $MOUNT > /dev/null 2>&1
fi
fi
+
+ do_nodes $(comma_list $(mdts_nodes)) \
+ "$LCTL set_param lod.*.mdt_hash=crush"
return 0
}
}
is_mounted () {
- local mntpt=$1
- [ -z $mntpt ] && return 1
- local mounted=$(mounted_lustre_filesystems)
+ local mntpt=$1
+ [ -z $mntpt ] && return 1
+ local mounted=$(mounted_lustre_filesystems)
- echo $mounted' ' | grep -w -q $mntpt' '
+ echo $mounted' ' | grep -w -q $mntpt' '
}
is_empty_dir() {
# 1.
# both MOUNT and MOUNT2 are not mounted
if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
- [ "$REFORMAT" = "yes" ] && formatall
+ [ "$REFORMAT" = "yes" ] && CLEANUP_DM_DEV=true formatall
# setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
setupall
is_mounted $MOUNT || error "NAME=$NAME not mounted"
fi
fi
- init_gss
+ if [ -n "$fs_STRIPEPARAMS" ]; then
+ setstripe_getstripe $MOUNT $fs_STRIPEPARAMS
+ fi
if $GSS_SK; then
set_flavor_all null
elif $GSS; then
set_flavor_all $SEC
fi
- if [ -z "$CLIENTONLY" ]; then
- # Enable remote MDT create for testing
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- done
- fi
-
if [ "$ONLY" == "setup" ]; then
exit 0
fi
if [ -n "$(grep "DNE mode isn't supported" $log)" ]; then
rm -f $log
if [ $MDSCOUNT -gt 1 ]; then
- skip "DNE mode isn't supported!"
+ skip_noexit "DNE mode isn't supported!"
cleanupall
exit_status
else
cleanup_mount $MOUNT2
fi
- if [ "$I_MOUNTED" = "yes" ]; then
+ if [[ "$I_MOUNTED" = "yes" ]] && ! $AUSTER_CLEANUP; then
cleanupall -f || error "cleanup failed"
unset I_MOUNTED
fi
drop_request() {
# OBD_FAIL_MDS_ALL_REQUEST_NET
RC=0
- do_facet $SINGLEMDS lctl set_param fail_loc=0x123
+ do_facet $SINGLEMDS lctl set_param fail_val=0 fail_loc=0x123
do_facet client "$1" || RC=$?
do_facet $SINGLEMDS lctl set_param fail_loc=0
return $RC
return $rc
}
-drop_ldlm_reply() {
-#define OBD_FAIL_LDLM_REPLY 0x30c
+drop_mdt_ldlm_reply() {
+#define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
RC=0
- local list=$(comma_list $(mdts_nodes) $(osts_nodes))
- do_nodes $list lctl set_param fail_loc=0x30c
+ local list=$(comma_list $(mdts_nodes))
+ do_nodes $list lctl set_param fail_loc=0x157
do_facet client "$@" || RC=$?
return $RC
}
-drop_ldlm_reply_once() {
-#define OBD_FAIL_LDLM_REPLY 0x30c
+drop_mdt_ldlm_reply_once() {
+#define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
RC=0
- local list=$(comma_list $(mdts_nodes) $(osts_nodes))
- do_nodes $list lctl set_param fail_loc=0x8000030c
+ local list=$(comma_list $(mdts_nodes))
+ do_nodes $list lctl set_param fail_loc=0x80000157
do_facet client "$@" || RC=$?
flock_is_enabled()
{
+ local mountpath=${1:-$MOUNT}
local RC=0
- [ -z "$(mount | grep "$MOUNT.*flock" | grep -v noflock)" ] && RC=1
+
+ [ -z "$(mount | grep "$mountpath .*flock" | grep -v noflock)" ] && RC=1
return $RC
}
}
start_full_debug_logging() {
- debugsave
- debug_size_save
+ debugsave
+ debug_size_save
- local FULLDEBUG=-1
- local DEBUG_SIZE=150
+ local fulldebug=-1
+ local debug_size=150
+ local nodes=$(comma_list $(nodes_list))
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE"
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=$FULLDEBUG;"
+ do_nodes $nodes "$LCTL set_param debug=$fulldebug debug_mb=$debug_size"
}
stop_full_debug_logging() {
- debug_size_restore
- debugrestore
+ debug_size_restore
+ debugrestore
}
# prints bash call stack
# usage: stack_trap arg sigspec
#
# stack_trap() behaves like bash's built-in trap, except that it "stacks" the
-# command ``arg`` on top of previously defined commands for ``sigspec`` instead
+# command "arg" on top of previously defined commands for "sigspec" instead
# of overwriting them.
# stacked traps are executed in reverse order of their registration
#
stack_trap()
{
local arg="$1"
- local sigspec="$2"
+ local sigspec="${2:-EXIT}"
- local cmd="$(trap -p $sigspec)"
+ # Use "trap -p" to get the quoting right
+ local old_trap="$(trap -p "$sigspec")"
+ # Append ";" and remove the leading "trap -- '" added by "trap -p"
+ old_trap="${old_trap:+"; ${old_trap#trap -- \'}"}"
- cmd="${cmd#trap -- \'}"
- cmd="${cmd%\'*}"
- [ -n "$cmd" ] && cmd="; $cmd"
- cmd="${arg}$cmd"
+ # Once again, use "trap -p" to get the quoting right
+ local new_trap="$(trap -- "$arg" "$sigspec"
+ trap -p "$sigspec"
+ trap -- '' "$sigspec")"
- trap "$cmd" $sigspec
+ # Remove the trailing "' $sigspec" part added by "trap -p" and merge
+ #
+ # The resulting string should be safe to "eval" as it is (supposedly
+ # correctly) quoted by "trap -p"
+ eval "${new_trap%\' $sigspec}${old_trap:-"' $sigspec"}"
}
error_noexit() {
local status=0
local log=$TESTSUITELOG
- [ -f "$log" ] && grep -q FAIL $log && status=1
+ [ -f "$log" ] && grep -qw FAIL $log && status=1
exit $status
}
fi
}
+#
+# Function: skip_env()
+# Purpose: to skip a test during developer testing because some tool
+# is missing, but fail the test in release testing because the test
+# environment is not configured properly".
+#
skip_env () {
$FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
-skip() {
+skip_noexit() {
echo
log " SKIP: $TESTSUITE $TESTNAME $@"
[[ -n "$TESTSUITELOG" ]] &&
echo "$TESTSUITE: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true
+ unset TESTNAME
+}
+
+skip() {
+ skip_noexit $@
+ exit 0
}
build_test_filter() {
- EXCEPT="$EXCEPT $(testslist_filter)"
+ EXCEPT="$EXCEPT $(testslist_filter)"
for O in $ONLY; do
if [[ $O = [0-9]*-[0-9]* ]]; then
fi
done
- [ "$EXCEPT$ALWAYS_EXCEPT" ] && \
- log "excepting tests: `echo $EXCEPT $ALWAYS_EXCEPT`"
- [ "$EXCEPT_SLOW" ] && \
- log "skipping tests SLOW=no: `echo $EXCEPT_SLOW`"
- for E in $EXCEPT; do
- eval EXCEPT_${E}=true
- done
- for E in $ALWAYS_EXCEPT; do
- eval EXCEPT_ALWAYS_${E}=true
- done
- for E in $EXCEPT_SLOW; do
- eval EXCEPT_SLOW_${E}=true
- done
- for G in $GRANT_CHECK_LIST; do
- eval GCHECK_ONLY_${G}=true
- done
+ [ "$EXCEPT$ALWAYS_EXCEPT" ] &&
+ log "excepting tests: `echo $EXCEPT $ALWAYS_EXCEPT`"
+ [ "$EXCEPT_SLOW" ] &&
+ log "skipping tests SLOW=no: `echo $EXCEPT_SLOW`"
+ for E in $EXCEPT; do
+ eval EXCEPT_${E}=true
+ done
+ for E in $ALWAYS_EXCEPT; do
+ eval EXCEPT_ALWAYS_${E}=true
+ done
+ for E in $EXCEPT_SLOW; do
+ eval EXCEPT_SLOW_${E}=true
+ done
+ for G in $GRANT_CHECK_LIST; do
+ eval GCHECK_ONLY_${G}=true
+ done
}
basetest() {
if [[ $1 = [a-z]* ]]; then
echo $1
else
- echo ${1%%[a-z]*}
+ echo ${1%%[a-zA-Z]*}
fi
}
export LAST_SKIPPED=
export ALWAYS_SKIPPED=
#
-# Main entry into test-framework. This is called with the name and
-# description of a test. The name is used to find the function to run
+# Main entry into test-framework. This is called with the number and
+# description of a test. The number is used to find the function to run
# the test using "test_$name".
#
# This supports a variety of methods of specifying specific test to
-# run or not run. These need to be documented...
+# run or not run:
+# - ONLY= env variable with space-separated list of test numbers to run
+# - EXCEPT= env variable with space-separated list of test numbers to exclude
#
run_test() {
assert_DIR
+ local testnum=$1
+ local testmsg=$2
+ export base=$(basetest $testnum)
+ export TESTNAME=test_$testnum
+ LAST_SKIPPED=
+ ALWAYS_SKIPPED=
- export base=$(basetest $1)
+ # Check the EXCEPT, ALWAYS_EXCEPT and SLOW lists to see if we
+ # need to skip the current test. If so, set the ALWAYS_SKIPPED flag.
+ local isexcept=EXCEPT_$testnum
+ local isexcept_base=EXCEPT_$base
+ if [ ${!isexcept}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping excluded test $testnum"
+ elif [ ${!isexcept_base}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping excluded test $testnum (base $base)"
+ fi
+
+ isexcept=EXCEPT_ALWAYS_$testnum
+ isexcept_base=EXCEPT_ALWAYS_$base
+ if [ ${!isexcept}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping ALWAYS excluded test $testnum"
+ elif [ ${!isexcept_base}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping ALWAYS excluded test $testnum (base $base)"
+ fi
+
+ isexcept=EXCEPT_SLOW_$testnum
+ isexcept_base=EXCEPT_SLOW_$base
+ if [ ${!isexcept}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping SLOW test $testnum"
+ elif [ ${!isexcept_base}x != x ]; then
+ ALWAYS_SKIPPED="y"
+ skip_message="skipping SLOW test $testnum (base $base)"
+ fi
+
+ # If there are tests on the ONLY list, check if the current test
+ # is on that list and, if so, check if the test is to be skipped
+ # and if we are supposed to honor the skip lists.
if [ -n "$ONLY" ]; then
- testname=ONLY_$1
- if [ ${!testname}x != x ]; then
- [ -n "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
- run_one_logged $1 "$2"
- return $?
- fi
- testname=ONLY_$base
- if [ ${!testname}x != x ]; then
- [ -n "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
- run_one_logged $1 "$2"
- return $?
+ local isonly=ONLY_$testnum
+ local isonly_base=ONLY_$base
+ if [[ ${!isonly}x != x || ${!isonly_base}x != x ]]; then
+
+ if [[ -n "$ALWAYS_SKIPPED" && -n "$HONOR_EXCEPT" ]]; then
+ LAST_SKIPPED="y"
+ skip_noexit "$skip_message"
+ return 0
+ else
+ [ -n "$LAST_SKIPPED" ] &&
+ echo "" && LAST_SKIPPED=
+ ALWAYS_SKIPPED=
+ run_one_logged $testnum "$testmsg"
+ return $?
+ fi
+
+ else
+ LAST_SKIPPED="y"
+ return 0
fi
- LAST_SKIPPED="y"
- return 0
fi
- LAST_SKIPPED="y"
- ALWAYS_SKIPPED="y"
- testname=EXCEPT_$1
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping excluded test $1"
- return 0
- fi
- testname=EXCEPT_$base
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping excluded test $1 (base $base)"
- return 0
- fi
- testname=EXCEPT_ALWAYS_$1
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping ALWAYS excluded test $1"
- return 0
- fi
- testname=EXCEPT_ALWAYS_$base
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping ALWAYS excluded test $1 (base $base)"
- return 0
- fi
- testname=EXCEPT_SLOW_$1
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping SLOW test $1"
- return 0
- fi
- testname=EXCEPT_SLOW_$base
- if [ ${!testname}x != x ]; then
- TESTNAME=test_$1 skip "skipping SLOW test $1 (base $base)"
+ if [ -n "$ALWAYS_SKIPPED" ]; then
+ LAST_SKIPPED="y"
+ skip_noexit "$skip_message"
return 0
+ else
+ run_one_logged $testnum "$testmsg"
+ return $?
fi
-
- LAST_SKIPPED=
- ALWAYS_SKIPPED=
- run_one_logged $1 "$2"
-
- return $?
}
log() {
#
run_one() {
local testnum=$1
- local message=$2
- export tfile=f${testnum}.${TESTSUITE}
- export tdir=d${testnum}.${TESTSUITE}
- export TESTNAME=test_$testnum
+ local testmsg="$2"
local SAVE_UMASK=`umask`
umask 0022
$SETUP
fi
- banner "test $testnum: $message"
+ banner "test $testnum: $testmsg"
test_${testnum} || error "test_$testnum failed with $?"
cd $SAVE_PWD
reset_fail_loc
check_node_health
check_dmesg_for_errors || error "Error in dmesg detected"
if [ "$PARALLEL" != "yes" ]; then
- ps auxww | grep -v grep | grep -q multiop &&
+ ps auxww | grep -v grep | grep -q "multiop " &&
error "multiop still running"
fi
- unset TESTNAME
- unset tdir
- unset tfile
umask $SAVE_UMASK
$CLEANUP
return 0
# - test result is saved to data file
#
run_one_logged() {
- local BEFORE=$(date +%s)
- local TEST_ERROR
- local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log
+ local before=$SECONDS
+ local testnum=$1
+ local testmsg=$2
+ export tfile=f${testnum}.${TESTSUITE}
+ export tdir=d${testnum}.${TESTSUITE}
+ local name=$TESTSUITE.$TESTNAME.test_log.$(hostname -s).log
local test_log=$LOGDIR/$name
- local zfs_log_name=${TESTSUITE}.test_${1}.zfs_log
+ local zfs_log_name=$TESTSUITE.$TESTNAME.zfs_log
local zfs_debug_log=$LOGDIR/$zfs_log_name
- rm -rf $LOGDIR/err
- rm -rf $LOGDIR/ignore
- rm -rf $LOGDIR/skip
local SAVE_UMASK=$(umask)
+ local rc=0
umask 0022
+ rm -f $LOGDIR/err $LOGDIR/ignore $LOGDIR/skip
echo
- log_sub_test_begin test_${1}
- (run_one $1 "$2") 2>&1 | tee -i $test_log
- local RC=${PIPESTATUS[0]}
-
- [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] &&
- echo "test_$1 returned $RC" | tee $LOGDIR/err
-
- duration=$(($(date +%s) - $BEFORE))
- pass "$1" "(${duration}s)"
+ # if ${ONLY_$testnum} set, repeat $ONLY_REPEAT times, otherwise once
+ local isonly=ONLY_$testnum
+ local repeat=${!isonly:+$ONLY_REPEAT}
+
+ for testiter in $(seq ${repeat:-1}); do
+ local before_sub=$SECONDS
+ log_sub_test_begin $TESTNAME
+
+ # remove temp files between repetitions to avoid test failures
+ [ -n "$append" -a -n "$DIR" -a -n "$tdir" -a -n "$tfile" ] &&
+ rm -rf $DIR/$tdir* $DIR/$tfile*
+ # loop around subshell so stack_trap EXIT triggers each time
+ (run_one $testnum "$testmsg") 2>&1 | tee -i $append $test_log
+ rc=${PIPESTATUS[0]}
+ local append=-a
+ local duration_sub=$((SECONDS - before_sub))
+ local test_error
+
+ [[ $rc != 0 && ! -f $LOGDIR/err ]] &&
+ echo "$TESTNAME returned $rc" | tee $LOGDIR/err
+
+ if [[ -f $LOGDIR/err ]]; then
+ test_error=$(cat $LOGDIR/err)
+ TEST_STATUS="FAIL"
+ elif [[ -f $LOGDIR/ignore ]]; then
+ test_error=$(cat $LOGDIR/ignore)
+ elif [[ -f $LOGDIR/skip ]]; then
+ test_error=$(cat $LOGDIR/skip)
+ TEST_STATUS="SKIP"
+ else
+ TEST_STATUS="PASS"
+ fi
- if [[ -f $LOGDIR/err ]]; then
- TEST_ERROR=$(cat $LOGDIR/err)
- elif [[ -f $LOGDIR/ignore ]]; then
- TEST_ERROR=$(cat $LOGDIR/ignore)
- elif [[ -f $LOGDIR/skip ]]; then
- TEST_ERROR=$(cat $LOGDIR/skip)
- fi
- log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR"
+ pass "$testnum" "($((SECONDS - before))s)"
+ log_sub_test_end $TEST_STATUS $duration_sub "$rc" "$test_error"
+ [[ $rc != 0 ]] && break
+ done
- if [[ "$TEST_STATUS" != "SKIP" ]] && [[ -f $TF_SKIP ]]; then
+ if [[ "$TEST_STATUS" != "SKIP" && -f $TF_SKIP ]]; then
rm -f $TF_SKIP
fi
if [ -f $LOGDIR/err ]; then
log_zfs_info "$zfs_debug_log"
- $FAIL_ON_ERROR && exit $RC
+ $FAIL_ON_ERROR && exit $rc
fi
umask $SAVE_UMASK
+ unset TESTNAME
+ unset tdir
+ unset tfile
+
return 0
}
(cd $(dirname $1); echo $PWD/$(basename $1))
}
+grant_from_clients() {
+ local nodes="$1"
+
+ # get client grant
+ do_nodes $nodes "$LCTL get_param -n osc.${FSNAME}-*.cur_*grant_bytes" |
+ calc_sum
+}
+
+grant_from_servers() {
+ local nodes="$1"
+
+ # get server grant
+ # which is tot_granted less grant_precreate
+ do_nodes $nodes "$LCTL get_param obdfilter.${FSNAME}-OST*.tot_granted" \
+ " obdfilter.${FSNAME}-OST*.tot_pending" \
+ " obdfilter.${FSNAME}-OST*.grant_precreate" |
+ tr '=' ' ' | awk '/tot_granted/{ total += $2 };
+ /tot_pending/{ total -= $2 };
+ /grant_precreate/{ total -= $2 };
+ END { printf("%0.0f", total) }'
+}
check_grant() {
export base=$(basetest $1)
[ "$CHECK_GRANT" == "no" ] && return 0
- testnamebase=GCHECK_ONLY_${base}
- testname=GCHECK_ONLY_$1
- [ ${!testnamebase}x == x -a ${!testname}x == x ] && return 0
+ local isonly_base=GCHECK_ONLY_${base}
+ local isonly=GCHECK_ONLY_$1
+ [ ${!isonly_base}x == x -a ${!isonly}x == x ] && return 0
echo -n "checking grant......"
+ local osts=$(comma_list $(osts_nodes))
local clients=$CLIENTS
[ -z "$clients" ] && clients=$(hostname)
# sync all the data and make sure no pending data on server
do_nodes $clients sync
+ clients_up # initiate all idling connections
# get client grant
- client_grant=$(do_nodes $clients \
- "$LCTL get_param -n osc.${FSNAME}-*.cur_*grant_bytes" |
- awk '{ total += $1 } END { printf("%0.0f", total) }')
+ cli_grant=$(grant_from_clients $clients)
# get server grant
# which is tot_granted less grant_precreate
- server_grant=$(do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param "\
- "obdfilter.${FSNAME}-OST*.{tot_granted,tot_pending,grant_precreate}" |
- sed 's/=/ /'| awk '/tot_granted/{ total += $2 };
- /tot_pending/{ total -= $2 };
- /grant_precreate/{ total -= $2 };
- END { printf("%0.0f", total) }')
+ srv_grant=$(grant_from_servers $osts)
+ count=0
# check whether client grant == server grant
- if [[ $client_grant -ne $server_grant ]]; then
+ while [[ $cli_grant != $srv_grant && count++ -lt 30 ]]; do
+ echo "wait for client:$cli_grant == server:$srv_grant"
+ sleep 1
+ cli_grant=$(grant_from_clients $clients)
+ srv_grant=$(grant_from_servers $osts)
+ done
+ if [[ $cli_grant -ne $srv_grant ]]; then
do_nodes $(comma_list $(osts_nodes)) \
"$LCTL get_param obdfilter.${FSNAME}-OST*.tot*" \
- "obdfilter.${FSNAME}-OST*.grant_*"
+ "obdfilter.${FSNAME}-OST*.grant_*"
do_nodes $clients "$LCTL get_param osc.${FSNAME}-*.cur_*_bytes"
- error "failed: client:${client_grant} server: ${server_grant}."
+ error "failed grant check: client:$cli_grant server:$srv_grant"
else
- echo "pass: client:${client_grant} server: ${server_grant}"
+ echo "pass grant check: client:$cli_grant server:$srv_grant"
fi
}
echo ${uuid/_UUID/}
}
+mdtname_from_index() {
+ local uuid=$(mdtuuid_from_index $1)
+ echo ${uuid/_UUID/}
+}
+
+mdssize_from_index () {
+ local mdt=$(mdtname_from_index $2)
+ $LFS df $1 | grep $mdt | awk '{ print $2 }'
+}
+
index_from_ostuuid()
{
$LFS osts $2 | sed -ne "/${1}/s/\(.*\): .* .*$/\1/p"
all_mdts_nodes () {
local host
local failover_host
- local nodes="${mds_HOST} ${mdsfailover_HOST}"
+ local nodes
local nodes_sort
local i
nodes="$nodes ${!host} ${!failover_host}"
done
+ [ -n "$nodes" ] || nodes="${mds_HOST} ${mdsfailover_HOST}"
nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
echo -n $nodes_sort
}
all_osts_nodes () {
local host
local failover_host
- local nodes="${ost_HOST} ${ostfailover_HOST}"
+ local nodes=
local nodes_sort
local i
nodes="$nodes ${!host} ${!failover_host}"
done
+ [ -n "$nodes" ] || nodes="${ost_HOST} ${ostfailover_HOST}"
nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
echo -n $nodes_sort
}
}
calc_osc_kbytes () {
- df $MOUNT > /dev/null
+ $LFS df $MOUNT > /dev/null
$LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
}
local val
while IFS=" =" read facet name val; do
- do_facet $facet "$LCTL set_param -n $name $val"
+ do_facet $facet "$LCTL set_param -n $name=$val"
done
}
########################
convert_facet2label() {
- local facet=$1
+ local facet=$1
- if [ x$facet = xost ]; then
- facet=ost1
- fi
+ if [ x$facet = xost ]; then
+ facet=ost1
+ elif [ x$facet = xmgs ] && combined_mgs_mds ; then
+ facet=mds1
+ fi
- local varsvc=${facet}_svc
+ local varsvc=${facet}_svc
- if [ -n ${!varsvc} ]; then
- echo ${!varsvc}
- else
- error "No lablel for $facet!"
- fi
+ if [ -n ${!varsvc} ]; then
+ echo ${!varsvc}
+ else
+ error "No label for $facet!"
+ fi
}
get_clientosc_proc_path() {
- echo "${1}-osc-ffff*"
+ echo "${1}-osc-[-0-9a-f]*"
}
# If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names
local i=0
CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
- while [ "${CONN_STATE}" != "${expected}" ]; do
+ while ! echo "${CONN_STATE}" | egrep -q "^${expected}\$" ; do
if [ "${expected}" == "DISCONN" ]; then
# for disconn we can check after proc entry is removed
[ "x${CONN_STATE}" == "x" ] && return 0
local ost_facet=$2
local expected=$3
local target=$(get_osc_import_name $facet $ost_facet)
- local param="osc.${target}.ost_server_uuid"
+ local param="os[cp].${target}.ost_server_uuid"
local params=$param
local i=0
fi
}
+wait_osc_import_ready() {
+ wait_osc_import_state $1 $2 "\(FULL\|IDLE\)"
+}
+
_wait_mgc_import_state() {
local facet=$1
local expected=$2
if [ $MDSCOUNT -gt 1 ]; then
for num in $(seq $MDSCOUNT); do
- wait_osc_import_state mds mds$num FULL
+ wait_osc_import_ready mds mds$num
done
fi
}
local LIBPATH="/usr/lib/lustre/tests:/usr/lib64/lustre/tests:"
local TESTPATH="$RLUSTRE/tests:"
local RPATH="PATH=${TESTPATH}${LIBPATH}${PATH}:/sbin:/bin:/usr/sbin:"
- do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ "
+ do_nodesv $list "${RPATH} NAME=${NAME} bash rpc.sh $@ "
}
wait_clients_import_state () {
local params=$(expand_list $params $proc_path)
done
- if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params;
+ if ! do_rpc_nodes "$list" wait_import_state_mount "$expected" $params;
then
error "import is not in ${expected} state"
return 1
# wait until all MDTs are in the expected state
for ((num = 1; num <= $MDSCOUNT; num++)); do
local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name})
+ local wait=0
local mproc
if [ $facet = "mds" ]; then
sleep 5
local result=$(do_facet mds${num} "$LCTL get_param -n $mproc")
local max=30
- local wait=0
[ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
if [ $result -eq $expected ]; then
- echo -n "target updated after"
+ echo -n "target updated after "
echo "$wait sec (got $result)"
break
fi
local fsname=${1%%.*}
local poolname=${1##$fsname.}
- trap "destroy_test_pools $fsname" EXIT
+ stack_trap "destroy_test_pools $fsname" EXIT
do_facet mgs lctl pool_new $1
local RC=$?
# get param should return err unless pool is created
for ost in $OSTS; do
do_facet mgs lctl pool_remove $1 $ost
done
+ wait_update_facet $SINGLEMDS "lctl pool_list $1 | wc -l" "1" ||
+ error "MDS: pool_list $1 failed"
do_facet mgs lctl pool_destroy $1
}
}
gather_logs () {
- local list=$1
+ local list=$1
- local ts=$(date +%s)
- local docp=true
+ local ts=$(date +%s)
+ local docp=true
- if [[ ! -f "$YAML_LOG" ]]; then
- # init_logging is not performed before gather_logs,
- # so the $LOGDIR needs to be checked here
- check_shared_dir $LOGDIR && touch $LOGDIR/shared
- fi
+ if [[ ! -f "$YAML_LOG" ]]; then
+ # init_logging is not performed before gather_logs,
+ # so the $LOGDIR needs to be checked here
+ check_shared_dir $LOGDIR && touch $LOGDIR/shared
+ fi
- [ -f $LOGDIR/shared ] && docp=false
+ [ -f $LOGDIR/shared ] && docp=false
- # dump lustre logs, dmesg
+ # dump lustre logs, dmesg, and journal if GSS_SK=true
- prefix="$TESTLOG_PREFIX.$TESTNAME"
- suffix="$ts.log"
- echo "Dumping lctl log to ${prefix}.*.${suffix}"
+ prefix="$TESTLOG_PREFIX.$TESTNAME"
+ suffix="$ts.log"
+ echo "Dumping lctl log to ${prefix}.*.${suffix}"
- if [ -n "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
- echo "Dumping logs only on local client."
- $LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
- dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
- return
- fi
+ if [ -n "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ echo "Dumping logs only on local client."
+ $LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
+ dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
+ [ "$SHARED_KEY" = true ] && find $SK_PATH -name '*.key' -exec \
+ lgss_sk -r {} \; &> \
+ ${prefix}.ssk_keys.$(hostname -s).${suffix}
+ [ "$SHARED_KEY" = true ] && lctl get_param 'nodemap.*.*' > \
+ ${prefix}.nodemaps.$(hostname -s).${suffix}
+ [ "$GSS_SK" = true ] && keyctl show > \
+ ${prefix}.keyring.$(hostname -s).${suffix}
+ [ "$GSS_SK" = true ] && journalctl -a > \
+ ${prefix}.journal.$(hostname -s).${suffix}
+ return
+ fi
- do_nodesv $list \
- "$LCTL dk > ${prefix}.debug_log.\\\$(hostname -s).${suffix};
- dmesg > ${prefix}.dmesg.\\\$(hostname -s).${suffix}"
+ do_nodesv $list \
+ "$LCTL dk > ${prefix}.debug_log.\\\$(hostname -s).${suffix};
+ dmesg > ${prefix}.dmesg.\\\$(hostname -s).${suffix}"
+ if [ "$SHARED_KEY" = true ]; then
+ do_nodesv $list "find $SK_PATH -name '*.key' -exec \
+ lgss_sk -r {} \; &> \
+ ${prefix}.ssk_keys.\\\$(hostname -s).${suffix}"
+ do_facet mds1 "lctl get_param 'nodemap.*.*' > \
+ ${prefix}.nodemaps.\\\$(hostname -s).${suffix}"
+ fi
+ if [ "$GSS_SK" = true ]; then
+ do_nodesv $list "keyctl show > \
+ ${prefix}.keyring.\\\$(hostname -s).${suffix}"
+ do_nodesv $list "journalctl -a > \
+ ${prefix}.journal.\\\$(hostname -s).${suffix}"
+ fi
- if [ ! -f $LOGDIR/shared ]; then
- do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR
- fi
+ if [ ! -f $LOGDIR/shared ]; then
+ do_nodes $list rsync -az "${prefix}.*.${suffix}" \
+ $HOSTNAME:$LOGDIR
+ fi
}
do_ls () {
local clients=${CLIENTS:-$HOSTNAME}
for c in ${clients//,/ }; do
+ # reconnect if idle
+ do_node $c lctl set_param osc.*.idle_connect=1 >/dev/null 2>&1
local output=$(do_node $c lctl get_param -n \
osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null)
local tmpcnt=$(count_flvr "$output" $flavor)
mdtosc=$(get_mdtosc_proc_path mds$num)
mdtosc=${mdtosc/-MDT*/-MDT\*}
local output=$(do_facet mds$num lctl get_param -n \
- osc.$mdtosc.$PROC_CLI 2>/dev/null)
+ os[cp].$mdtosc.$PROC_CLI 2>/dev/null)
local tmpcnt=$(count_flvr "$output" $flavor)
if $GSS_SK && [ $flavor != "null" ]; then
# tmpcnt=min(contexts,flavors) to ensure SK context is on
output=$(do_facet mds$num lctl get_param -n \
- osc.$mdtosc.$PROC_CON 2>/dev/null)
+ os[cp].$mdtosc.$PROC_CON 2>/dev/null)
local outcon=$(count_contexts "$output")
if [ "$outcon" -lt "$tmpcnt" ]; then
tmpcnt=$outcon
# returns the canonical name for an ldiskfs device
ldiskfs_canon() {
- local dev="$1"
- local facet="$2"
-
- do_facet $facet "dv=\\\$(lctl get_param -n $dev);
-if foo=\\\$(lvdisplay -c \\\$dv 2>/dev/null); then
- echo dm-\\\${foo##*:};
-else
- echo \\\$(basename \\\$dv);
-fi;"
+ local dev="$1"
+ local facet="$2"
+
+ do_facet $facet "dv=\\\$($LCTL get_param -n $dev);
+ if foo=\\\$(lvdisplay -c \\\$dv 2>/dev/null); then
+ echo dm-\\\${foo##*:};
+ else
+ name=\\\$(basename \\\$dv);
+ if [[ \\\$name = *flakey* ]]; then
+ name=\\\$(lsblk -o NAME,KNAME |
+ awk /\\\$name/'{print \\\$NF}');
+ fi;
+ echo \\\$name;
+ fi;"
}
is_sanity_benchmark() {
#
# Get the page size (bytes) on a given facet node.
+# The local client page_size is directly available in PAGE_SIZE.
#
get_page_size() {
local facet=$1
- local size=$(getconf PAGE_SIZE 2>/dev/null)
+ local page_size=$(getconf PAGE_SIZE 2>/dev/null)
- [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
- echo -n ${size:-4096}
+ [ -z "$CLIENTONLY" -a "$facet" != "client" ] &&
+ page_size=$(do_facet $facet getconf PAGE_SIZE)
+ echo -n ${page_size:-4096}
}
#
echo -n ${size:-0}
}
-# Check whether the "large_xattr" feature is enabled or not.
+# Check whether the "ea_inode" feature is enabled or not, to allow
+# ldiskfs xattrs over one block in size. Allow both the historical
+# Lustre feature name (large_xattr) and the upstream name (ea_inode).
large_xattr_enabled() {
- [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 0
+ [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 1
local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
# Get the maximum xattr size supported by the filesystem.
max_xattr_size() {
- local size
-
- if large_xattr_enabled; then
- # include/linux/limits.h: #define XATTR_SIZE_MAX 65536
- size=65536
- else
- local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
- local block_size=$(get_block_size $SINGLEMDS $mds_dev)
-
- # maximum xattr size = size of block - size of header -
- # size of 1 entry - 4 null bytes
- size=$((block_size - 32 - 32 - 4))
- fi
-
- echo $size
+ $LCTL get_param -n llite.*.max_easize
}
# Dump the value of the named xattr from a file.
local rcmd="do_facet $facet"
local metaea=${TMP}/backup_restore.ea
local metadata=${TMP}/backup_restore.tgz
- local opts=${MDS_MOUNT_OPTS}
+ local opts=${MDS_MOUNT_FS_OPTS}
local svc=${facet}_svc
if ! ${rcmd} test -b ${devname}; then
local devname=$(mdsdevname $(facet_number $facet))
local mntpt=$(facet_mntpt brpt)
local rcmd="do_facet $facet"
- local opts=${MDS_MOUNT_OPTS}
+ local opts=${MDS_MOUNT_FS_OPTS}
if ! ${rcmd} test -b ${devname}; then
opts=$(csa_add "$opts" -o loop)
test_mkdir() {
local path
local p_option
- local stripe_count=2
- local stripe_index=-1
+ local hash_type
+ local hash_name=("all_char" "fnv_1a_64" "crush")
+ local dirstripe_count=${DIRSTRIPE_COUNT:-"2"}
+ local dirstripe_index=${DIRSTRIPE_INDEX:-$((base % $MDSCOUNT))}
local OPTIND=1
- while getopts "c:i:p" opt; do
+ while getopts "c:H:i:p" opt; do
case $opt in
- c) stripe_count=$OPTARG;;
- i) stripe_index=$OPTARG;;
+ c) dirstripe_count=$OPTARG;;
+ H) hash_type=$OPTARG;;
+ i) dirstripe_index=$OPTARG;;
p) p_option="-p";;
- \?) error "only support -i -c -p";;
+ \?) error "only support -c -H -i -p";;
esac
done
if [ $MDSCOUNT -le 1 ]; then
mkdir $path || error "mkdir '$path' failed"
else
- local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
local mdt_index
- if [ $stripe_index -eq -1 ]; then
- mdt_index=$((test_num % MDSCOUNT))
+ if [ $dirstripe_index -eq -1 ]; then
+ mdt_index=$((base % MDSCOUNT))
else
- mdt_index=$stripe_index
+ mdt_index=$dirstripe_index
fi
- echo "striped dir -i$mdt_index -c$stripe_count $path"
- $LFS mkdir -i$mdt_index -c$stripe_count $path ||
- error "mkdir -i $mdt_index -c$stripe_count $path failed"
+
+ # randomly choose hash type
+ [ -z "$hash_type" ] &&
+ hash_type=${hash_name[$((RANDOM % ${#hash_name[@]}))]}
+
+ if (($MDS1_VERSION >= $(version_code 2.8.0))); then
+ if [ $dirstripe_count -eq -1 ]; then
+ dirstripe_count=$((RANDOM % MDSCOUNT + 1))
+ fi
+ else
+ dirstripe_count=1
+ fi
+
+ echo "striped dir -i$mdt_index -c$dirstripe_count -H $hash_type $path"
+ $LFS mkdir -i$mdt_index -c$dirstripe_count -H $hash_type $path ||
+ error "mkdir -i $mdt_index -c$dirstripe_count -H $hash_type $path failed"
fi
}
local file=$1
local pool=$2
local tlist="$3"
- local res=$($GETSTRIPE $file | grep 0x | cut -f2)
+ local res=$($LFS getstripe $file | grep 0x | cut -f2)
for i in $res
do
for t in $tlist ; do
local last=$3
local step=${4:-1}
- local list=$(seq $first $step $last)
+ if [ -z $last ]; then
+ local list=$first
+ else
+ local list=$(seq $first $step $last)
+ fi
local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
do_facet mgs $LCTL pool_add \
local tdir=$2
echo "Setting pool on directory $tdir"
- $SETSTRIPE -c 2 -p $pool $tdir && return 0
+ $LFS setstripe -c 2 -p $pool $tdir && return 0
error_noexit "Cannot set pool $pool to $tdir"
return 1
local tdir=$2
echo "Checking pool on directory $tdir"
- local res=$($GETSTRIPE --pool $tdir | sed "s/\s*$//")
+ local res=$($LFS getstripe --pool $tdir | sed "s/\s*$//")
[ "$res" = "$pool" ] && return 0
error_noexit "Pool on '$tdir' is '$res', not '$pool'"
for i in $(seq -w 1 $count)
do
local file=$tdir/spoo-$i
- $SETSTRIPE -p $pool $file
+ $LFS setstripe -p $pool $file
check_file_in_pool $file $pool "$tlist" || \
failed=$((failed + 1))
done
mkdir -p $tdir ||
{ error_noexit "unable to create $tdir"; return 1 ; }
local file="/..$tdir/$tfile-1"
- $SETSTRIPE -p $pool $file ||
+ $LFS setstripe -p $pool $file ||
{ error_noexit "unable to create $file" ; return 2 ; }
cd $tdir
- $SETSTRIPE -p $pool $tfile-2 || {
+ $LFS setstripe -p $pool $tfile-2 || {
error_noexit "unable to create $tfile-2 in $tdir"
return 3
}
return 2
}
# setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
+ $LFS setstripe -p $pool $file 2>/dev/null && {
error_noexit "expected failure when creating file" \
"with empty pool"
return 3
return 1
}
# setstripe on an empty pool should fail
- $SETSTRIPE -p $pool $file 2>/dev/null && {
+ $LFS setstripe -p $pool $file 2>/dev/null && {
error_noexit "expected failure when creating file" \
"with missing pool"
return 2
[[ -z "$file" || -z "$expected" ]] &&
error "check_stripe_count: invalid argument"
- local cmd="$GETSTRIPE -c $file"
+ local cmd="$LFS getstripe -c $file"
actual=$($cmd) || error "$cmd failed"
actual=${actual%% *}
if [[ $actual -ne $expected ]]; then
- [[ $expected -eq -1 ]] ||
- error "$cmd wrong: found $actual, expected $expected"
- [[ $actual -eq $OSTCOUNT ]] ||
- error "$cmd wrong: found $actual, expected $OSTCOUNT"
+ [[ $expected -eq -1 ]] || { $LFS getstripe $file;
+ error "$cmd not expected ($expected): found $actual"; }
+ [[ $actual -eq $OSTCOUNT ]] || { $LFS getstripe $file;
+ error "$cmd not OST count ($OSTCOUNT): found $actual"; }
fi
}
[[ -z "$file" || -z "$expected" ]] &&
error "check_obdidx: invalid argument!"
- obdidx=$(comma_list $($GETSTRIPE $file | grep -A $OSTCOUNT obdidx |
+ obdidx=$(comma_list $($LFS getstripe $file | grep -A $OSTCOUNT obdidx |
grep -v obdidx | awk '{print $1}' | xargs))
[[ $obdidx = $expected ]] ||
[[ -z "$file" || -z "$expected" ]] &&
error "check_start_ost_idx: invalid argument!"
- start_ost_idx=$($GETSTRIPE $file | grep -A 1 obdidx | grep -v obdidx |
- awk '{print $1}')
+ start_ost_idx=$($LFS getstripe $file | grep -A 1 obdidx |
+ grep -v obdidx | awk '{print $1}')
[[ $start_ost_idx = $expected ]] ||
error "OST index of the first stripe on $file is" \
if ! combined_mgs_mds ; then
[ $(facet_fstype mgs) != zfs ] &&
- skip "Lustre snapshot 1 only works for ZFS backend" &&
- exit 0
+ skip "Lustre snapshot 1 only works for ZFS backend"
local host=$(facet_active_host mgs)
local dir=$(dirname $(facet_vdevice mgs))
for num in `seq $MDSCOUNT`; do
[ $(facet_fstype mds$num) != zfs ] &&
- skip "Lustre snapshot 1 only works for ZFS backend" &&
- exit 0
+ skip "Lustre snapshot 1 only works for ZFS backend"
lss_gen_conf_one mds$num MDT $((num - 1)) ||
lss_err "generate lss conf (mds$num)"
for num in `seq $OSTCOUNT`; do
[ $(facet_fstype ost$num) != zfs ] &&
- skip "Lustre snapshot 1 only works for ZFS backend" &&
- exit 0
+ skip "Lustre snapshot 1 only works for ZFS backend"
lss_gen_conf_one ost$num OST $((num - 1)) ||
lss_err "generate lss conf (ost$num)"
do_facet mgs "cat $LSNAPSHOT_CONF"
}
+# Parse 'lfs getstripe -d <path_with_dir_name>' for non-composite dir
+parse_plain_dir_param()
+{
+ local invalues=($1)
+ local param=""
+
+ if [[ ${invalues[0]} =~ "stripe_count:" ]]; then
+ param="-c ${invalues[1]}"
+ fi
+ if [[ ${invalues[2]} =~ "stripe_size:" ]]; then
+ param="$param -S ${invalues[3]}"
+ fi
+ if [[ ${invalues[4]} =~ "pattern:" ]]; then
+ if [[ ${invalues[5]} =~ "stripe_offset:" ]]; then
+ param="$param -i ${invalues[6]}"
+ else
+ param="$param -L ${invalues[5]} -i ${invalues[7]}"
+ fi
+ elif [[ ${invalues[4]} =~ "stripe_offset:" ]]; then
+ param="$param -i ${invalues[5]}"
+ fi
+ echo "$param"
+}
+
parse_plain_param()
{
local line=$1
echo "-S $val"
elif [[ $line =~ ^"lmm_stripe_offset:" ]]; then
echo "-i $val"
+ elif [[ $line =~ ^"lmm_pattern:" ]]; then
+ echo "-L $val"
fi
}
local param=""
while read line; do
- if [[ -z $mode ]]; then
- if [[ $line =~ ^"stripe_count:" ]]; then
- mode="plain_dir"
- elif [[ $line =~ ^"lmm_stripe_count:" ]]; then
- mode="plain_file"
- elif [[ $line =~ ^"lcm_layout_gen:" ]]; then
- mode="pfl"
+ if [[ ! -z $line ]]; then
+ if [[ -z $mode ]]; then
+ if [[ $line =~ ^"stripe_count:" ]]; then
+ mode="plain_dir"
+ elif [[ $line =~ ^"lmm_stripe_count:" ]]; then
+ mode="plain_file"
+ elif [[ $line =~ ^"lcm_layout_gen:" ]]; then
+ mode="pfl"
+ fi
fi
- fi
- if [[ $mode = "plain_dir" ]]; then
- param=$(echo $line |
- awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
- elif [[ $mode = "plain_file" ]]; then
- val=$(parse_plain_param "$line")
- [[ ! -z $val ]] && param="$param $val"
- elif [[ $mode = "pfl" ]]; then
- val=$(echo $line | awk '{print $2}')
- if [[ $line =~ ^"lcme_extent.e_end:" ]]; then
- if [[ $val = "EOF" ]]; then
- param="$param -E -1"
- else
- param="$param -E $val"
- fi
- elif [[ $line =~ ^"stripe_count:" ]]; then
- # pfl dir
- val=$(echo $line |
- awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
- param="$param $val"
- else
- #pfl file
+ if [[ $mode = "plain_dir" ]]; then
+ param=$(parse_plain_dir_param "$line")
+ elif [[ $mode = "plain_file" ]]; then
val=$(parse_plain_param "$line")
[[ ! -z $val ]] && param="$param $val"
+ elif [[ $mode = "pfl" ]]; then
+ val=$(echo $line | awk '{print $2}')
+ if [[ $line =~ ^"lcme_extent.e_end:" ]]; then
+ if [[ $val = "EOF" ]]; then
+ param="$param -E -1"
+ else
+ param="$param -E $val"
+ fi
+ elif [[ $line =~ ^"stripe_count:" ]]; then
+ # pfl dir
+ val=$(parse_plain_dir_param "$line")
+ param="$param $val"
+ else
+ #pfl file
+ val=$(parse_plain_param "$line")
+ [[ ! -z $val ]] && param="$param $val"
+ fi
fi
fi
done
# controllable
cancel_lru_locks mdc
cancel_lru_locks osc
-
+
# make sure PFID is set correctly for files
do_nodes $(comma_list $(osts_nodes)) \
"$LCTL set_param -n obdfilter.${FSNAME}-OST*.lfsck_verify_pfid=1"
[ $? -eq 0 ] || error "$osc state is not FULL"
done
}
+
+#Changelogs
+__changelog_deregister() {
+ local facet=$1
+ local mdt="$(facet_svc $facet)"
+ local cl_user=$2
+ local rc=0
+
+ # skip cleanup if no user registered for this MDT
+ [ -z "$cl_user" ] && echo "$mdt: no changelog user" && return 0
+ # user is no longer registered, skip cleanup
+ changelog_users "$facet" | grep -q "$cl_user" ||
+ { echo "$mdt: changelog user '$cl_user' not found"; return 0; }
+
+ # From this point, if any operation fails, it is an error
+ __changelog_clear $facet $cl_user 0 ||
+ error_noexit "$mdt: changelog_clear $cl_user 0 fail: $rc"
+ do_facet $facet $LCTL --device $mdt changelog_deregister $cl_user ||
+ error_noexit "$mdt: changelog_deregister '$cl_user' fail: $rc"
+}
+
+declare -Ax CL_USERS
+changelog_register() {
+ for M in $(seq $MDSCOUNT); do
+ local facet=mds$M
+ local mdt="$(facet_svc $facet)"
+ local cl_mask
+
+ cl_mask=$(do_facet $facet $LCTL get_param \
+ mdd.${mdt}.changelog_mask -n)
+ stack_trap "do_facet $facet $LCTL \
+ set_param mdd.$mdt.changelog_mask=\'$cl_mask\' -n" EXIT
+ do_facet $facet $LCTL set_param mdd.$mdt.changelog_mask=+hsm ||
+ error "$mdt: changelog_mask=+hsm failed: $?"
+
+ local cl_user
+ cl_user=$(do_facet $facet \
+ $LCTL --device $mdt changelog_register -n) ||
+ error "$mdt: register changelog user failed: $?"
+ stack_trap "__changelog_deregister $facet $cl_user" EXIT
+
+ stack_trap "CL_USERS[$facet]='${CL_USERS[$facet]}'" EXIT
+ # Bash does not support nested arrays, but the format of a
+ # cl_user is constrained enough to use whitespaces as separators
+ CL_USERS[$facet]+="$cl_user "
+ done
+ echo "Registered $MDSCOUNT changelog users: '${CL_USERS[@]% }'"
+}
+
+changelog_deregister() {
+ local cl_user
+ # bash assoc arrays do not guarantee to list keys in created order
+ # so reorder to get same order than in changelog_register()
+ local cl_facets=$(echo "${!CL_USERS[@]}" | tr " " "\n" | sort |
+ tr "\n" " ")
+
+ for facet in $cl_facets; do
+ for cl_user in ${CL_USERS[$facet]}; do
+ __changelog_deregister $facet $cl_user || return $?
+ done
+ unset CL_USERS[$facet]
+ done
+}
+
+changelog_users() {
+ local facet=$1
+ local service=$(facet_svc $facet)
+
+ do_facet $facet $LCTL get_param -n mdd.$service.changelog_users
+}
+
+changelog_user_rec() {
+ local facet=$1
+ local cl_user=$2
+ local service=$(facet_svc $facet)
+
+ changelog_users $facet | awk '$1 == "'$cl_user'" { print $2 }'
+}
+
+changelog_chmask() {
+ local mask=$1
+
+ do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL set_param mdd.*.changelog_mask="$mask"
+}
+
+# usage: __changelog_clear FACET CL_USER [+]INDEX
+__changelog_clear()
+{
+ local facet=$1
+ local mdt="$(facet_svc $facet)"
+ local cl_user=$2
+ local -i rec
+
+ case "$3" in
+ +*)
+ # Remove the leading '+'
+ rec=${3:1}
+ rec+=$(changelog_user_rec $facet $cl_user)
+ ;;
+ *)
+ rec=$3
+ ;;
+ esac
+
+ if [ $rec -eq 0 ]; then
+ echo "$mdt: clear the changelog for $cl_user of all records"
+ else
+ echo "$mdt: clear the changelog for $cl_user to record #$rec"
+ fi
+ $LFS changelog_clear $mdt $cl_user $rec
+}
+
+# usage: changelog_clear [+]INDEX
+#
+# If INDEX is prefixed with '+', increment every changelog user's record index
+# by INDEX. Otherwise, clear the changelog up to INDEX for every changelog
+# users.
+changelog_clear() {
+ local rc
+ # bash assoc arrays do not guarantee to list keys in created order
+ # so reorder to get same order than in changelog_register()
+ local cl_facets=$(echo "${!CL_USERS[@]}" | tr " " "\n" | sort |
+ tr "\n" " ")
+
+ for facet in $cl_facets; do
+ for cl_user in ${CL_USERS[$facet]}; do
+ __changelog_clear $facet $cl_user $1 || rc=${rc:-$?}
+ done
+ done
+
+ return ${rc:-0}
+}
+
+changelog_dump() {
+ for M in $(seq $MDSCOUNT); do
+ local facet=mds$M
+ local mdt="$(facet_svc $facet)"
+
+ $LFS changelog $mdt | sed -e 's/^/'$mdt'./'
+ done
+}
+
+changelog_extract_field() {
+ local cltype=$1
+ local file=$2
+ local identifier=$3
+
+ changelog_dump | gawk "/$cltype.*$file$/ {
+ print gensub(/^.* "$identifier'(\[[^\]]*\]).*$/,"\\1",1)}' |
+ tail -1
+}
+
+# Prints a changelog record produced by "lfs changelog" as an associative array
+#
+# Example:
+# $> changelog2array 16 01CREAT 10:28:46.968438800 2018.03.09 0x0 \
+# t=[0x200000401:0x10:0x0] j=touch.501 ef=0xf u=501:501 \
+# nid=0@lo p=[0x200000007:0x1:0x0] blob
+# ([index]='16' [type]='CREAT' [time]='10:28:46.968438800'
+# [date]='2018.03.09' [flags]=0x0 ['target-fid']='[0x200000401:0x10:0x0]'
+# ['jobid']='touch.501' ['extra-flags']='0x0f' [uid]='0' ['gid']='0'
+# ['nid']='0@lo' ['parent-fid']='[0x200000007:0x1:0x0]')
+#
+# Note that the changelog record is not quoted
+# Also note that the line breaks in the output were only added for readability
+#
+# Typically, you want to eval the output of the command to fill an actual
+# associative array, like this:
+# $> eval declare -A changelog=$(changelog2array $entry)
+#
+# It can then be accessed like any bash associative array:
+# $> echo "${changelog[index]}" "${changelog[type]}" "${changelog[flags]}"
+# 16 CREAT 0x0
+# $> echo "${changelog[uid]}":"${changelog[gid]}"
+# 501:501
+#
+changelog2array()
+{
+ # Start the array
+ printf '('
+
+ # A changelog, as printed by "lfs changelog" typically looks like this:
+ # <index> <type> <time> <date> <flags> <key1=value1> <key2=value2> ...
+
+ # Parse the positional part of the changelog
+
+ # changelog_dump() prefixes records with their mdt's name
+ local index="${1##*.}"
+
+ printf "[index]='%s' [type]='%s' [time]='%s' [date]='%s' [flags]='%s'" \
+ "$index" "${2:2}" "$3" "$4" "$5"
+
+ # Parse the key/value part of the changelog
+ for arg in "${@:5}"; do
+ # Check it matches a key=value syntax
+ [[ "$arg" =~ ^[[:alpha:]]+= ]] || continue
+
+ local key="${arg%%=*}"
+ local value="${arg#*=}"
+
+ case "$key" in
+ u)
+ # u is actually for uid AND gid: u=UID:GID
+ printf " [uid]='%s'" "${value%:*}"
+ key=gid
+ value="${value#*:}"
+ ;;
+ t)
+ key=target-fid
+ value="${value#[}"
+ value="${value%]}"
+ ;;
+ j)
+ key=jobid
+ ;;
+ p)
+ key=parent-fid
+ value="${value#[}"
+ value="${value%]}"
+ ;;
+ ef)
+ key=extra-flags
+ ;;
+ m)
+ key=mode
+ ;;
+ x)
+ key=xattr
+ ;;
+ *)
+ ;;
+ esac
+
+ printf " ['%s']='%s'" "$key" "$value"
+ done
+
+ # end the array
+ printf ')'
+}
+
+# Format and print a changelog record
+#
+# Interpreted sequences are:
+# %% a single %
+# %f the "flags" attribute of a changelog record
+__changelog_printf()
+{
+ local format="$1"
+
+ local -i i
+ for ((i = 0; i < ${#format}; i++)); do
+ local char="${format:$i:1}"
+ if [ "$char" != % ]; then
+ printf '%c' "$char"
+ continue
+ fi
+
+ i+=1
+ char="${format:$i:1}"
+ case "$char" in
+ f)
+ printf '%s' "${changelog[flags]}"
+ ;;
+ %)
+ printf '%'
+ ;;
+ esac
+ done
+ printf '\n'
+}
+
+# Filter changelog records
+changelog_find()
+{
+ local -A filter
+ local action='print'
+ local format
+
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -print)
+ action='print'
+ ;;
+ -printf)
+ action='printf'
+ format="$2"
+ shift
+ ;;
+ -*)
+ filter[${1#-}]="$2"
+ shift
+ ;;
+ esac
+ shift
+ done
+
+ local found=false
+ local record
+ changelog_dump | { while read -r record; do
+ eval local -A changelog=$(changelog2array $record)
+ for key in "${!filter[@]}"; do
+ case "$key" in
+ *)
+ [ "${changelog[$key]}" == "${filter[$key]}" ]
+ ;;
+ esac || continue 2
+ done
+
+ found=true
+
+ case "${action:-print}" in
+ print)
+ printf '%s\n' "$record"
+ ;;
+ printf)
+ __changelog_printf "$format"
+ ;;
+ esac
+ done; $found; }
+}
+
+restore_layout() {
+ local dir=$1
+ local layout=$2
+
+ [ ! -d "$dir" ] && return
+
+ [ -z "$layout" ] && {
+ $LFS setstripe -d $dir || error "error deleting stripe '$dir'"
+ return
+ }
+
+ setfattr -n trusted.lov -v $layout $dir ||
+ error "error restoring layout '$layout' to '$dir'"
+}
+
+# save the layout of a directory, the returned string will be used by
+# restore_layout() to restore the layout
+save_layout() {
+ local dir=$1
+ local str=$(getfattr -n trusted.lov --absolute-names -e hex $dir \
+ 2> /dev/null | awk -F'=' '/trusted.lov/{ print $2 }')
+ echo "$str"
+}
+
+# save layout of a directory and restore it at exit
+save_layout_restore_at_exit() {
+ local dir=$1
+ local layout=$(save_layout $dir)
+
+ stack_trap "restore_layout $dir $layout" EXIT
+}
+
+verify_yaml_layout() {
+ local src=$1
+ local dst=$2
+ local temp=$3
+ local msg_prefix=$4
+
+ echo "getstripe --yaml $src"
+ $LFS getstripe --yaml $src > $temp || error "getstripe $src failed"
+ echo "setstripe --yaml=$temp $dst"
+ $LFS setstripe --yaml=$temp $dst|| error "setstripe $dst failed"
+
+ echo "compare"
+ local layout1=$(get_layout_param $src)
+ local layout2=$(get_layout_param $dst)
+ # compare their layout info
+ [ "$layout1" == "$layout2" ] ||
+ error "$msg_prefix $src/$dst layouts are not equal"
+}
+
+is_project_quota_supported() {
+ $ENABLE_PROJECT_QUOTAS || return 1
+
+ [[ "$(facet_fstype $SINGLEMDS)" == "ldiskfs" &&
+ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.55) ]] &&
+ do_facet mds1 lfs --help |& grep -q project && return 0
+
+ [[ "$(facet_fstype $SINGLEMDS)" == "zfs" &&
+ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.10.53) ]] &&
+ do_facet mds1 $ZPOOL get all | grep -q project_quota && return 0
+
+ return 1
+}
+
+# ZFS project quota enable/disable:
+# This feature will become active as soon as it is enabled and will never
+# return to being disabled. Each filesystem will be upgraded automatically
+# when remounted or when [a] new file is created under that filesystem. The
+# upgrade can also be triggered on filesystems via `zfs set version=current
+# <pool/fs>`. The upgrade process runs in the background and may take a
+# while to complete for the filesystems containing a large number of files.
+enable_project_quota() {
+ is_project_quota_supported || return 0
+ local zkeeper=${KEEP_ZPOOL}
+ stack_trap "KEEP_ZPOOL=$zkeeper" EXIT
+ KEEP_ZPOOL="true"
+ stopall || error "failed to stopall (1)"
+
+ local zfeat_en="feature@project_quota=enabled"
+ for facet in $(seq -f mds%g $MDSCOUNT) $(seq -f ost%g $OSTCOUNT); do
+ local facet_fstype=${facet:0:3}1_FSTYPE
+ local devname
+
+ if [ "${!facet_fstype}" = "zfs" ]; then
+ devname=$(zpool_name ${facet})
+ do_facet ${facet} $ZPOOL set "$zfeat_en" $devname ||
+ error "$ZPOOL set $zfeat_en $devname"
+ else
+ [ ${facet:0:3} == "mds" ] &&
+ devname=$(mdsdevname ${facet:3}) ||
+ devname=$(ostdevname ${facet:3})
+ do_facet ${facet} $TUNE2FS -O project $devname ||
+ error "tune2fs $devname failed"
+ fi
+ done
+
+ KEEP_ZPOOL="${zkeeper}"
+ mount
+ setupall
+}
+
+disable_project_quota() {
+ is_project_quota_supported || return 0
+ [ "$mds1_FSTYPE" != "ldiskfs" ] && return 0
+ stopall || error "failed to stopall (1)"
+
+ for num in $(seq $MDSCOUNT); do
+ do_facet mds$num $TUNE2FS -Q ^prj $(mdsdevname $num) ||
+ error "tune2fs $(mdsdevname $num) failed"
+ done
+
+ for num in $(seq $OSTCOUNT); do
+ do_facet ost$num $TUNE2FS -Q ^prj $(ostdevname $num) ||
+ error "tune2fs $(ostdevname $num) failed"
+ done
+
+ mount
+ setupall
+}
+
+#
+# In order to test multiple remote HSM agents, a new facet type named "AGT" and
+# the following associated variables are added:
+#
+# AGTCOUNT: number of agents
+# AGTDEV{N}: target HSM mount point (root path of the backend)
+# agt{N}_HOST: hostname of the agent agt{N}
+# SINGLEAGT: facet of the single agent
+#
+# The number of agents is initialized as the number of remote client nodes.
+# By default, only single copytool is started on a remote client/agent. If there
+# was no remote client, then the copytool will be started on the local client.
+#
+init_agt_vars() {
+ local n
+ local agent
+
+ export AGTCOUNT=${AGTCOUNT:-$((CLIENTCOUNT - 1))}
+ [[ $AGTCOUNT -gt 0 ]] || AGTCOUNT=1
+
+ export SHARED_DIRECTORY=${SHARED_DIRECTORY:-$TMP}
+ if [[ $CLIENTCOUNT -gt 1 ]] &&
+ ! check_shared_dir $SHARED_DIRECTORY $CLIENTS; then
+ skip_env "SHARED_DIRECTORY should be accessible"\
+ "on all client nodes"
+ exit 0
+ fi
+
+ # We used to put the HSM archive in $SHARED_DIRECTORY but that
+ # meant NFS issues could hose sanity-hsm sessions. So now we
+ # use $TMP instead.
+ for n in $(seq $AGTCOUNT); do
+ eval export AGTDEV$n=\$\{AGTDEV$n:-"$TMP/arc$n"\}
+ agent=CLIENT$((n + 1))
+ if [[ -z "${!agent}" ]]; then
+ [[ $CLIENTCOUNT -eq 1 ]] && agent=CLIENT1 ||
+ agent=CLIENT2
+ fi
+ eval export agt${n}_HOST=\$\{agt${n}_HOST:-${!agent}\}
+ local var=agt${n}_HOST
+ [[ ! -z "${!var}" ]] || error "agt${n}_HOST is empty!"
+ done
+
+ export SINGLEAGT=${SINGLEAGT:-agt1}
+
+ export HSMTOOL=${HSMTOOL:-"lhsmtool_posix"}
+ export HSMTOOL_VERBOSE=${HSMTOOL_VERBOSE:-""}
+ export HSMTOOL_UPDATE_INTERVAL=${HSMTOOL_UPDATE_INTERVAL:=""}
+ export HSMTOOL_EVENT_FIFO=${HSMTOOL_EVENT_FIFO:=""}
+ export HSMTOOL_TESTDIR
+ export HSMTOOL_BASE=$(basename "$HSMTOOL" | cut -f1 -d" ")
+
+ HSM_ARCHIVE_NUMBER=2
+
+ # The test only support up to 10 MDTs
+ MDT_PREFIX="mdt.$FSNAME-MDT000"
+ HSM_PARAM="${MDT_PREFIX}0.hsm"
+
+ # archive is purged at copytool setup
+ HSM_ARCHIVE_PURGE=true
+
+ # Don't allow copytool error upon start/setup
+ HSMTOOL_NOERROR=false
+}
+
+# Get the backend root path for the given agent facet.
+copytool_device() {
+ local facet=$1
+ local dev=AGTDEV$(facet_number $facet)
+
+ echo -n ${!dev}
+}
+
+get_mdt_devices() {
+ local mdtno
+ # get MDT device for each mdc
+ for mdtno in $(seq 1 $MDSCOUNT); do
+ local idx=$(($mdtno - 1))
+ MDT[$idx]=$($LCTL get_param -n \
+ mdc.$FSNAME-MDT000${idx}-mdc-*.mds_server_uuid |
+ awk '{gsub(/_UUID/,""); print $1}' | head -n1)
+ done
+}
+
+search_copytools() {
+ local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+ do_nodesv $hosts "pgrep -x $HSMTOOL_BASE"
+}
+
+kill_copytools() {
+ local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+
+ echo "Killing existing copytools on $hosts"
+ do_nodesv $hosts "killall -q $HSMTOOL_BASE" || true
+}
+
+wait_copytools() {
+ local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+ local wait_timeout=200
+ local wait_start=$SECONDS
+ local wait_end=$((wait_start + wait_timeout))
+ local sleep_time=100000 # 0.1 second
+
+ while ((SECONDS < wait_end)); do
+ if ! search_copytools $hosts; then
+ echo "copytools stopped in $((SECONDS - wait_start))s"
+ return 0
+ fi
+
+ echo "copytools still running on $hosts"
+ usleep $sleep_time
+ [ $sleep_time -lt 32000000 ] && # 3.2 seconds
+ sleep_time=$(bc <<< "$sleep_time * 2")
+ done
+
+ # try to dump Copytool's stack
+ do_nodesv $hosts "echo 1 >/proc/sys/kernel/sysrq ; " \
+ "echo t >/proc/sysrq-trigger"
+
+ echo "copytools failed to stop in ${wait_timeout}s"
+
+ return 1
+}
+
+copytool_monitor_cleanup() {
+ local facet=${1:-$SINGLEAGT}
+ local agent=$(facet_active_host $facet)
+
+ if [ -n "$HSMTOOL_MONITOR_DIR" ]; then
+ # Should die when the copytool dies, but just in case.
+ local cmd="kill \\\$(cat $HSMTOOL_MONITOR_DIR/monitor_pid)"
+ cmd+=" 2>/dev/null || true"
+ do_node $agent "$cmd"
+ do_node $agent "rm -fr $HSMTOOL_MONITOR_DIR"
+ export HSMTOOL_MONITOR_DIR=
+ fi
+
+ # The pdsh should die on its own when the monitor dies. Just
+ # in case, though, try to clean up to avoid any cruft.
+ if [ -n "$HSMTOOL_MONITOR_PDSH" ]; then
+ kill $HSMTOOL_MONITOR_PDSH 2>/dev/null || true
+ export HSMTOOL_MONITOR_PDSH=
+ fi
+}
+
+copytool_logfile()
+{
+ local host="$(facet_host "$1")"
+ local prefix=$TESTLOG_PREFIX
+ [ -n "$TESTNAME" ] && prefix+=.$TESTNAME
+
+ printf "${prefix}.copytool${archive_id}_log.${host}.log"
+}
+
+__lhsmtool_rebind()
+{
+ do_facet $facet $HSMTOOL -p "$hsm_root" --rebind "$@" "$mountpoint"
+}
+
+__lhsmtool_import()
+{
+ mkdir -p "$(dirname "$2")" ||
+ error "cannot create directory '$(dirname "$2")'"
+ do_facet $facet $HSMTOOL -p "$hsm_root" --import "$@" "$mountpoint"
+}
+
+__lhsmtool_setup()
+{
+ local cmd="$HSMTOOL $HSMTOOL_VERBOSE --daemon --hsm-root \"$hsm_root\""
+ [ -n "$bandwidth" ] && cmd+=" --bandwidth $bandwidth"
+ [ -n "$archive_id" ] && cmd+=" --archive $archive_id"
+ [ ${#misc_options[@]} -gt 0 ] &&
+ cmd+=" $(IFS=" " echo "$@")"
+ cmd+=" \"$mountpoint\""
+
+ echo "Starting copytool $facet on $(facet_host $facet)"
+ stack_trap "do_facet $facet libtool execute pkill -x '$HSMTOOL' || true" EXIT
+ do_facet $facet "$cmd < /dev/null > \"$(copytool_logfile $facet)\" 2>&1"
+}
+
+hsm_root() {
+ local facet="${1:-$SINGLEAGT}"
+
+ printf "$(copytool_device "$facet")/${TESTSUITE}.${TESTNAME}/"
+}
+
+# Main entry point to perform copytool related operations
+#
+# Sub-commands:
+#
+# setup setup a copytool to run in the background, that copytool will be
+# killed on EXIT
+# import import a file from an HSM backend
+# rebind rebind an archived file to a new fid
+#
+# Although the semantics might suggest otherwise, one does not need to 'setup'
+# a copytool before a call to 'copytool import' or 'copytool rebind'.
+#
+copytool()
+{
+ local action=$1
+ shift
+
+ # Use default values
+ local facet=$SINGLEAGT
+ local mountpoint="${MOUNT2:-$MOUNT}"
+ local hsm_root="${hsm_root:-$(hsm_root "$facet")}"
+
+ # Parse arguments
+ local fail_on_error=true
+ local -a misc_options
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -f|--facet)
+ shift
+ facet="$1"
+ ;;
+ -m|--mountpoint)
+ shift
+ mountpoint="$1"
+ ;;
+ -a|--archive-id)
+ shift
+ local archive_id="$1"
+ ;;
+ -h|--hsm-root)
+ shift
+ hsm_root="$1"
+ ;;
+ -b|--bwlimit)
+ shift
+ local bandwidth="$1" # in MB/s
+ ;;
+ -n|--no-fail)
+ local fail_on_error=false
+ ;;
+ *)
+ # Uncommon(/copytool dependent) option
+ misc_options+=("$1")
+ ;;
+ esac
+ shift
+ done
+
+ stack_trap "do_facet $facet rm -rf '$hsm_root'" EXIT
+ do_facet $facet mkdir -p "$hsm_root" ||
+ error "mkdir '$hsm_root' failed"
+
+ case "$HSMTOOL" in
+ lhsmtool_posix)
+ local copytool=lhsmtool
+ ;;
+ esac
+
+ __${copytool}_${action} "${misc_options[@]}"
+ if [ $? -ne 0 ]; then
+ local error_msg
+
+ case $action in
+ setup)
+ local host="$(facet_host $facet)"
+ error_msg="Failed to start copytool $facet on '$host'"
+ ;;
+ import)
+ local src="${misc_options[0]}"
+ local dest="${misc_options[1]}"
+ error_msg="Failed to import '$src' to '$dest'"
+ ;;
+ rebind)
+ error_msg="could not rebind file"
+ ;;
+ esac
+
+ $fail_on_error && error "$error_msg" || echo "$error_msg"
+ fi
+}
+
+needclients() {
+ local client_count=$1
+ if [[ $CLIENTCOUNT -lt $client_count ]]; then
+ skip "Need $client_count or more clients, have $CLIENTCOUNT"
+ return 1
+ fi
+ return 0
+}
+
+path2fid() {
+ $LFS path2fid $1 | tr -d '[]'
+ return ${PIPESTATUS[0]}
+}
+
+get_hsm_flags() {
+ local f=$1
+ local u=$2
+ local st
+
+ if [[ $u == "user" ]]; then
+ st=$($RUNAS $LFS hsm_state $f)
+ else
+ u=root
+ st=$($LFS hsm_state $f)
+ fi
+
+ [[ $? == 0 ]] || error "$LFS hsm_state $f failed (run as $u)"
+
+ st=$(echo $st | cut -f 2 -d" " | tr -d "()," )
+ echo $st
+}
+
+check_hsm_flags() {
+ local f=$1
+ local fl=$2
+
+ local st=$(get_hsm_flags $f)
+ [[ $st == $fl ]] || error "hsm flags on $f are $st != $fl"
+}
+
+mdts_set_param() {
+ local arg=$1
+ local key=$2
+ local value=$3
+ local mdtno
+ local rc=0
+ if [[ "$value" != "" ]]; then
+ value="=$value"
+ fi
+ for mdtno in $(seq 1 $MDSCOUNT); do
+ local idx=$(($mdtno - 1))
+ local facet=mds${mdtno}
+ # if $arg include -P option, run 1 set_param per MDT on the MGS
+ # else, run set_param on each MDT
+ [[ $arg = *"-P"* ]] && facet=mgs
+ do_facet $facet $LCTL set_param $arg mdt.${MDT[$idx]}.$key$value
+ [[ $? != 0 ]] && rc=1
+ done
+ return $rc
+}
+
+wait_result() {
+ local facet=$1
+ shift
+ wait_update --verbose $(facet_active_host $facet) "$@"
+}
+
+mdts_check_param() {
+ local key="$1"
+ local target="$2"
+ local timeout="$3"
+ local mdtno
+ for mdtno in $(seq 1 $MDSCOUNT); do
+ local idx=$(($mdtno - 1))
+ wait_result mds${mdtno} \
+ "$LCTL get_param -n $MDT_PREFIX${idx}.$key" "$target" \
+ $timeout ||
+ error "$key state is not '$target' on mds${mdtno}"
+ done
+}
+
+cdt_set_mount_state() {
+ mdts_set_param "-P" hsm_control "$1"
+ # set_param -P is asynchronous operation and could race with set_param.
+ # In such case configs could be retrieved and applied at mgc after
+ # set_param -P completion. Sleep here to avoid race with set_param.
+ # We need at least 20 seconds. 10 for mgc_requeue_thread to wake up
+ # MGC_TIMEOUT_MIN_SECONDS + MGC_TIMEOUT_RAND_CENTISEC(5 + 5)
+ # and 10 seconds to retrieve config from server.
+ sleep 20
+}
+
+cdt_check_state() {
+ mdts_check_param hsm_control "$1" 20
+}
+
+cdt_set_sanity_policy() {
+ if [[ "$CDT_POLICY_HAD_CHANGED" ]]
+ then
+ # clear all
+ mdts_set_param "" hsm.policy "+NRA"
+ mdts_set_param "" hsm.policy "-NBR"
+ CDT_POLICY_HAD_CHANGED=
+ fi
+}
+
+set_hsm_param() {
+ local param=$1
+ local value=$2
+ local opt=$3
+ mdts_set_param "$opt -n" "hsm.$param" "$value"
+ return $?
+}
+
+wait_request_state() {
+ local fid=$1
+ local request=$2
+ local state=$3
+ # 4th arg (mdt index) is optional
+ local mdtidx=${4:-0}
+ local mds=mds$(($mdtidx + 1))
+
+ local cmd="$LCTL get_param -n ${MDT_PREFIX}${mdtidx}.hsm.actions"
+ cmd+=" | awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d="
+
+ wait_result $mds "$cmd" "$state" 200 ||
+ error "request on $fid is not $state on $mds"
+}
+
+
+rmultiop_start() {
+ local client=$1
+ local file=$2
+ local cmds=$3
+ local WAIT_MAX=${4:-60}
+ local wait_time=0
+
+ # We need to run do_node in bg, because pdsh does not exit
+ # if child process of run script exists.
+ # I.e. pdsh does not exit when runmultiop_bg_pause exited,
+ # because of multiop_bg_pause -> $MULTIOP_PROG &
+ # By the same reason we need sleep a bit after do_nodes starts
+ # to let runmultiop_bg_pause start muliop and
+ # update /tmp/multiop_bg.pid ;
+ # The rm /tmp/multiop_bg.pid guarantees here that
+ # we have the updated by runmultiop_bg_pause
+ # /tmp/multiop_bg.pid file
+
+ local pid_file=$TMP/multiop_bg.pid.$$
+
+ do_node $client "MULTIOP_PID_FILE=$pid_file LUSTRE= \
+ runmultiop_bg_pause $file $cmds" &
+ local pid=$!
+ local multiop_pid
+
+ while [[ $wait_time -lt $WAIT_MAX ]]; do
+ sleep 3
+ wait_time=$((wait_time + 3))
+ multiop_pid=$(do_node $client cat $pid_file)
+ if [ -n "$multiop_pid" ]; then
+ break
+ fi
+ done
+
+ [ -n "$multiop_pid" ] ||
+ error "$client : Can not get multiop_pid from $pid_file "
+
+ eval export $(node_var_name $client)_multiop_pid=$multiop_pid
+ eval export $(node_var_name $client)_do_node_pid=$pid
+ local var=$(node_var_name $client)_multiop_pid
+ echo client $client multiop_bg started multiop_pid=${!var}
+ return $?
+}
+
+rmultiop_stop() {
+ local client=$1
+ local multiop_pid=$(node_var_name $client)_multiop_pid
+ local do_node_pid=$(node_var_name $client)_do_node_pid
+
+ echo "Stopping multiop_pid=${!multiop_pid} (kill ${!multiop_pid} on $client)"
+ do_node $client kill -USR1 ${!multiop_pid}
+
+ wait ${!do_node_pid}
+}
+
+sleep_maxage() {
+ local delay=$(do_facet $SINGLEMDS lctl get_param -n lo[vd].*.qos_maxage |
+ awk '{ print $1 * 2; exit; }')
+ sleep $delay
+}
+
+check_component_count() {
+ local comp_cnt=$($LFS getstripe --component-count $1)
+ [ $comp_cnt -eq $2 ] || error "$1, component count $comp_cnt != $2"
+}
+
+# Verify there are no init components with "extension" flag
+verify_no_init_extension() {
+ local flg_opts="--component-flags init,extension"
+ local found=$($LFS find $flg_opts $1 | wc -l)
+ [ $found -eq 0 ] || error "$1 has component with initialized extension"
+}
+
+# Verify there is at least one component starting at 0
+verify_comp_at_zero() {
+ flg_opts="--component-flags init"
+ found=$($LFS find --component-start 0M $flg_opts $1 | wc -l)
+ [ $found -eq 1 ] ||
+ error "No component starting at zero(!)"
+}
+
+# version after which Self-Extending Layouts are available
+SEL_VER="2.12.55"
+
+sel_layout_sanity() {
+ local file=$1
+ local comp_cnt=$2
+
+ verify_no_init_extension $file
+ verify_comp_at_zero $file
+ check_component_count $file $comp_cnt
+}
+