#!/bin/bash
-trap 'print_summary && touch $TF_FAIL && \
- echo "test-framework exiting on error"' ERR
+trap 'print_summary && print_stack_trace | tee $TF_FAIL && \
+ echo "$TESTSUITE: FAIL: test-framework exiting on error"' ERR
set -e
#set -x
# specify environment variable containing batch job name for server statistics
export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
-# LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or
-# LOAD_LLOOP is true. LOAD_LLOOP is false by default.
-export LOAD_LLOOP=${LOAD_LLOOP:-false}
-
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
export UMOUNT=${UMOUNT:-"umount -d"}
+
+export LSNAPSHOT_CONF="/etc/ldev.conf"
+export LSNAPSHOT_LOG="/var/log/lsnapshot.log"
+
# sles12 umount has a issue with -d option
[ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
export UMOUNT="umount"
export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
export RPC_MODE=${RPC_MODE:-false}
export DO_CLEANUP=${DO_CLEANUP:-true}
+ export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
fi
export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"}
[ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid"
+ export LL_DECODE_LINKEA=${LL_DECODE_LINKEA:-"$LUSTRE/utils/ll_decode_linkea"}
+ [ ! -f "$LL_DECODE_LINKEA" ] && export LL_DECODE_LINKEA="ll_decode_linkea"
export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
[ ! -f "$MKFS" ] && export MKFS="mkfs.lustre"
export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
#
# All Lustre versions support "lctl get_param" to report the version of the
# code running in the kernel (what our tests are interested in), but it
-# doesn't work without modules loaded. If that fails, use "lctl version"
-# instead, which is easy to parse and works without the kernel modules,
-# but was only added in 2.6.50. If that also fails, fall back to calling
-# "lctl lustre_build_version" which prints either (or both) the userspace
-# and kernel build versions, but is deprecated and should eventually be
-# removed.
+# doesn't work without modules loaded. After 2.9.53 and in upstream kernels
+# the "version" parameter doesn't include "lustre: " at the beginning.
+# If that fails, call "lctl lustre_build_version" which prints either (or both)
+# the userspace and kernel build versions, but until 2.8.55 required root
+# access to get the Lustre kernel version. If that also fails, fall back to
+# using "lctl --version", which is easy to parse and works without the kernel
+# modules, but was only added in 2.6.50 and only prints the lctl tool version,
+# not the module version, though they are usually the same.
#
-# output: prints version string to stdout in dotted-decimal format
+# Various commands and their output format for different Lustre versions:
+# lctl get_param version: 2.9.55
+# lctl get_param version: lustre: 2.8.53
+# lctl get_param version: lustre: 2.6.52
+# kernel: patchless_client
+# build: v2_6_92_0-2.6.32-431.el6_lustre.x86_64
+# lctl lustre_build_version: Lustre version: 2.8.53_27_gae67fc01
+# lctl lustre_build_version: error: lustre_build_version: Permission denied
+# (as non-root user) lctl version: v2_6_92_0-2.6.32-431.el6.x86_64
+# lctl lustre_build_version: Lustre version: 2.5.3-2.6.32.26-175.fc12.x86_64
+# lctl version: 2.5.3-2.6.32..26-175fc12.x86_64
+# lctl --version: lctl 2.6.50
+#
+# output: prints version string to stdout in (up to 4) dotted-decimal values
lustre_build_version() {
local facet=${1:-client}
+ local ver
+
+ local ver=$(do_facet $facet "$LCTL get_param -n version 2>/dev/null ||
+ $LCTL lustre_build_version 2>/dev/null ||
+ $LCTL --version 2>/dev/null | cut -d' ' -f2")
+ local lver=$(egrep -i "lustre: |version: " <<<$ver | head -n 1)
+ [ -n "$lver" ] && ver="$lver"
- # lustre: 2.8.52
- local VER=$(do_facet $facet $LCTL get_param -n version 2> /dev/null |
- awk '/lustre: / { print $2 }')
- # lctl 2.6.50
- [ -z "$VER" ] && VER=$(do_facet $facet $LCTL --version 2>/dev/null |
- awk '{ print $2 }')
- # Lustre version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
- # lctl version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
- [ -z "$VER" ] && VER=$(do_facet $facet $LCTL lustre_build_version |
- awk '/version:/ { print $3; exit; }')
- sed -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$VER
+ sed -e 's/.*: //' -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$ver |
+ cut -d. -f1-4
}
# Report the Lustre numeric build version code for the supplied facet.
/sbin/lsmod | grep -q "^\<$1\>"
}
+PRLFS=false
+lustre_insmod() {
+ local module=$1
+ shift
+ local args="$@"
+ local msg
+ local rc=0
+
+ if ! $PRLFS; then
+ msg="$(insmod $module $args 2>&1)" && return 0 || rc=$?
+ fi
+
+ # parallels can't load modules directly from prlfs, use /tmp instead
+ if $PRLFS || [[ "$(stat -f -c%t $module)" == "7c7c6673" ]]; then
+ local target="$(mktemp)"
+
+ cp "$module" "$target"
+ insmod $target $args
+ rc=$?
+ [[ $rc == 0 ]] && PRLFS=true
+ rm -f $target
+ else
+ echo "$msg"
+ fi
+ return $rc
+}
+
# Load a module on the system where this is running.
#
# usage: load_module module_name [module arguments for insmod/modprobe]
# we're passing options on the command-line.
if [[ "$BASE" == "lnet_selftest" ]] &&
[[ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]]; then
- insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
+ lustre_insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
elif [[ -f ${LUSTRE}/${module}${EXT} ]]; then
[[ "$BASE" != "ptlrpc_gss" ]] || modprobe sunrpc
- insmod ${LUSTRE}/${module}${EXT} "$@"
+ lustre_insmod ${LUSTRE}/${module}${EXT} "$@"
else
# must be testing a "make install" or "rpm" installation
# note failed to load ptlrpc_gss is considered not fatal
fi
}
-llite_lloop_enabled() {
- local n1=$(uname -r | cut -d. -f1)
- local n2=$(uname -r | cut -d. -f2)
- local n3=$(uname -r | cut -d- -f1 | cut -d. -f3)
-
- # load the llite_lloop module for < 2.6.32 kernels
- if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \
- [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \
- $LOAD_LLOOP; then
- return 0
- fi
- return 1
-}
-
load_modules_local() {
if [ -n "$MODPROBE" ]; then
# use modprobe
fi
load_module ../libcfs/libcfs/libcfs
-
- [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
- [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
- load_module ../lnet/lnet/lnet
- case $NETTYPE in
- o2ib)
- LNETLND="o2iblnd/ko2iblnd"
- ;;
- *)
- ;;
- esac
- LNETLND=${LNETLND:-"socklnd/ksocklnd"}
+ # Prevent local MODOPTS_LIBCFS being passed as part of environment
+ # variable to remote nodes
+ unset MODOPTS_LIBCFS
+
+ set_default_debug
+ load_module ../lnet/lnet/lnet
+
+ LNDPATH=${LNDPATH:-"../lnet/klnds"}
+ if [ -z "$LNETLND" ]; then
+ case $NETTYPE in
+ o2ib*) LNETLND="o2iblnd/ko2iblnd" ;;
+ tcp*) LNETLND="socklnd/ksocklnd" ;;
+ *) local lnd="${NETTYPE%%[0-9]}lnd"
+ [ -f "$LNDPATH/$lnd/k$lnd.ko" ] &&
+ LNETLND="$lnd/k$lnd" ||
+ LNETLND="socklnd/ksocklnd"
+ esac
+ fi
load_module ../lnet/klnds/$LNETLND
load_module obdclass/obdclass
load_module ptlrpc/ptlrpc
[ "$LQUOTA" != "no" ] &&
load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
- modprobe zfs
+ lsmod | grep zfs >&/dev/null || modprobe zfs
load_module osd-zfs/osd_zfs
fi
if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
fi
load_module llite/lustre
- llite_lloop_enabled && load_module llite/llite_lloop
[ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
OGDB=${OGDB:-$TMP}
rm -f $OGDB/ogdb-$HOSTNAME
echo -n $size
}
+fs_inode_ksize() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=4;; # ~4KB per inode
+ zfs) size=11;; # 10 to 11KB per inode
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
echo -n $device
}
+running_in_vm() {
+ local virt=$(virt-what 2> /dev/null)
+
+ [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
+
+ virt=$(dmidecode -s system-product-name | awk '{print $1}')
+
+ case $virt in
+ VMware|KVM|VirtualBox|Parallels)
+ echo $virt | tr '[A-Z]' '[a-z]' ;;
+ *) ;;
+ esac
+}
+
#
# Re-read the partition table on failover partner host.
# After a ZFS storage pool is created on a shared device, the partition table
}
#
+#
+# Get ZFS local fsname.
+#
+zfs_local_fsname() {
+ local facet=$1
+ local lfsname=$(basename $(facet_device $facet))
+
+ echo -n $lfsname
+}
+
+#
# Create ZFS storage pool.
#
create_zpool() {
shift 3
local opts=${@:-"-o cachefile=none"}
- do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL create -f $opts $poolname $vdev"
}
if [[ -n "$poolname" ]]; then
opts+=" -d $(dirname $(facet_vdevice $facet))"
- do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f $opts $poolname"
fi
}
#
+# Reimport ZFS storage pool with new name
+#
+reimport_zpool() {
+ local facet=$1
+ local newpool=$2
+ local opts="-o cachefile=none"
+ local poolname=$(zpool_name $facet)
+
+ opts+=" -d $(dirname $(facet_vdevice $facet))"
+ do_facet $facet "$ZPOOL export $poolname;
+ $ZPOOL import $opts $poolname $newpool"
+}
+
+#
# Set the "cachefile=none" property on ZFS storage pool so that the pool
# is not automatically imported on system startup.
#
if [ -f /sys/devices/system/cpu/possible ]; then
local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
else
- local cpus=$(getconf _NPROCESSORS_CONF)
+ local cpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
fi
# bug 19944, adjust size to be -gt num_possible_cpus()
# commit the device label change to disk
if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
- do_facet $facet "sync; sync; sync"
+ echo "Commit the device label on ${!dev}"
+ do_facet $facet "sync; sleep 1; sync"
fi
if [[ $(facet_fstype $facet) == zfs ]]; then
# export ZFS storage pool
- export_zpool $facet
+ [ "$KEEP_ZPOOL" = "true" ] || export_zpool $facet
fi
}
echo $free_inodes
}
+#
+# Get the OST device status from 'lfs df' with a given OST index.
+#
+ost_dev_status() {
+ local ost_idx=$1
+ local mnt_pnt=${2:-$MOUNT}
+ local ost_uuid
+
+ ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt)
+ lfs_df $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
+}
+
setup_quota(){
local mntpt=$1
local client=$1
local mnt=$2
local force
- local busy
+ local busy
local need_kill
[ "$3" ] && force=-f
echo \\\$(hostname) env are INSANE!;
exit 1;
fi"
- [ $? -eq 0 ] || rc=1
+ [ $? -eq 0 ] || rc=1
done
return $rc
}
sanity_mount_check_servers () {
- [ "$CLIENTONLY" ] &&
+ [ -n "$CLIENTONLY" ] &&
{ echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
echo Checking servers environments
fi
do_nodes $clients "
-running=\\\$(mount | grep -c $mnt' ');
-rc=0;
-if [ \\\$running -eq 0 ] ; then
- mkdir -p $mnt;
- $MOUNT_CMD $flags $opts $device $mnt;
- rc=\\\$?;
-fi;
-exit \\\$rc" || return ${PIPESTATUS[0]}
+ running=\\\$(mount | grep -c $mnt' ');
+ rc=0;
+ if [ \\\$running -eq 0 ] ; then
+ mkdir -p $mnt;
+ $MOUNT_CMD $flags $opts $device $mnt;
+ rc=\\\$?;
+ else
+ lustre_mnt_count=\\\$(mount | grep $mnt' ' | \
+ grep 'type lustre' | wc -l);
+ if [ \\\$running -ne \\\$lustre_mnt_count ] ; then
+ echo zconf_mount_clients FAILED: \
+ mount count \\\$running, not matching \
+ with mount count of 'type lustre' \
+ \\\$lustre_mnt_count;
+ rc=1;
+ fi;
+ fi;
+ exit \\\$rc" || return ${PIPESTATUS[0]}
echo "Started clients $clients: "
do_nodes $clients "mount | grep $mnt' '"
wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
echo "waiting for $host to fail attempts=$attempts"
[ $i -lt $attempts ] || \
- { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
+ { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
done
}
}
start_client_load() {
- local client=$1
- local load=$2
- local var=$(node_var_name $client)_load
- eval export ${var}=$load
-
- do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
-BREAK_ON_ERROR=$BREAK_ON_ERROR \
-END_RUN_FILE=$END_RUN_FILE \
-LOAD_PID_FILE=$LOAD_PID_FILE \
-TESTLOG_PREFIX=$TESTLOG_PREFIX \
-TESTNAME=$TESTNAME \
-DBENCH_LIB=$DBENCH_LIB \
-DBENCH_SRC=$DBENCH_SRC \
-CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
-LFS=$LFS \
-run_${load}.sh" &
- local ppid=$!
- log "Started client load: ${load} on $client"
-
- # get the children process IDs
- local pids=$(ps --ppid $ppid -o pid= | xargs)
- CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
- return 0
+ local client=$1
+ local load=$2
+ local var=$(node_var_name $client)_load
+ eval export ${var}=$load
+
+ do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
+ BREAK_ON_ERROR=$BREAK_ON_ERROR \
+ END_RUN_FILE=$END_RUN_FILE \
+ LOAD_PID_FILE=$LOAD_PID_FILE \
+ TESTLOG_PREFIX=$TESTLOG_PREFIX \
+ TESTNAME=$TESTNAME \
+ DBENCH_LIB=$DBENCH_LIB \
+ DBENCH_SRC=$DBENCH_SRC \
+ CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
+ LFS=$LFS \
+ LCTL=$LCTL \
+ FSNAME=$FSNAME \
+ run_${load}.sh" &
+ local ppid=$!
+ log "Started client load: ${load} on $client"
+
+ # get the children process IDs
+ local pids=$(ps --ppid $ppid -o pid= | xargs)
+ CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
+ return 0
}
start_client_loads () {
# only for remote client
check_client_load () {
- local client=$1
- local var=$(node_var_name $client)_load
- local TESTLOAD=run_${!var}.sh
-
- ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
-
- # bug 18914: try to connect several times not only when
- # check ps, but while check_catastrophe also
- local tries=3
- local RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! check_catastrophe $client; then
- RC=${PIPESTATUS[0]}
- if [ $RC -eq 254 ]; then
- # FIXME: not sure how long we shuold sleep here
- sleep 10
- continue
- fi
- echo "check catastrophe failed: RC=$RC "
- return $RC
- fi
- done
- # We can continue try to connect if RC=254
- # Just print the warning about this
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking catastrophe on $client"
- fi
-
- # see if the load is still on the client
- tries=3
- RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
- RC=${PIPESTATUS[0]}
- sleep 30
- fi
- done
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
- # see if we can diagnose a bit why this is
- fi
+ local client=$1
+ local var=$(node_var_name $client)_load
+ local testload=run_${!var}.sh
+
+ ps auxww | grep -v grep | grep $client | grep -q $testload || return 1
+
+ # bug 18914: try to connect several times not only when
+ # check ps, but while check_node_health also
+
+ local tries=3
+ local RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! check_node_health $client; then
+ RC=${PIPESTATUS[0]}
+ if [ $RC -eq 254 ]; then
+ # FIXME: not sure how long we shuold sleep here
+ sleep 10
+ continue
+ fi
+ echo "check node health failed: RC=$RC "
+ return $RC
+ fi
+ done
+ # We can continue try to connect if RC=254
+ # Just print the warning about this
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "node health on $client"
+ fi
+
+ # see if the load is still on the client
+ tries=3
+ RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! do_node $client \
+ "ps auxwww | grep -v grep | grep -q $testload"; then
+ RC=${PIPESTATUS[0]}
+ sleep 30
+ fi
+ done
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "(node health and 'ps') the client load on $client"
+ # see if we can diagnose a bit why this is
+ fi
- return $RC
+ return $RC
}
check_client_loads () {
local clients=${1//,/ }
grep -v 'Found no match'
}
+wait_zfs_commit() {
+ # the occupied disk space will be released
+ # only after DMUs are committed
+ if [[ $(facet_fstype $1) == zfs ]]; then
+ echo "sleep $2 for ZFS OSD"
+ sleep $2
+ fi
+}
+
wait_delete_completed_mds() {
local MAX_WAIT=${1:-20}
# for ZFS, waiting more time for DMUs to be committed
mds2sync="$mds2sync $node"
done
if [ -z "$mds2sync" ]; then
+ wait_zfs_commit $SINGLEMDS $ZFS_WAIT
return
fi
mds2sync=$(comma_list $mds2sync)
"$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum)
#echo "$node: $changes changes on all"
if [[ $changes -eq 0 ]]; then
- etime=$(date +%s)
- #echo "delete took $((etime - stime)) seconds"
-
- # the occupied disk space will be released
- # only after DMUs are committed
- if [[ $(facet_fstype $SINGLEMDS) == zfs ]]; then
- echo "sleep $ZFS_WAIT for ZFS OSD"
- sleep $ZFS_WAIT
- fi
-
+ wait_zfs_commit $SINGLEMDS $ZFS_WAIT
return
fi
sleep 1
return 1
}
+# Wait OSTs to be active on both client and MDT side.
+wait_osts_up() {
+ local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
+ awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on client failed"
+
+ cmd="$LCTL get_param osp.$FSNAME-OST*-MDT0000.prealloc_last_id |
+ awk '/=[1-9][0-9]/ { c += 1 } END { printf \\\"%d\\\", c }'"
+ wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on MDT0000 failed"
+}
+
wait_destroy_complete () {
echo "Waiting for local destroys to complete"
# MAX value shouldn't be big as this mean server responsiveness
return $rc
}
+lfs_df_check() {
+ local clients=${1:-$CLIENTS}
+
+ if [ -z "$clients" ]; then
+ $LFS df $MOUNT
+ else
+ $PDSH $clients "$LFS df $MOUNT" > /dev/null
+ fi
+}
+
+
clients_up() {
- # not every config has many clients
- sleep 1
- if [ ! -z "$CLIENTS" ]; then
- $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # not every config has many clients
+ sleep 1
+ lfs_df_check
}
client_up() {
- local client=$1
- # usually checked on particular client or locally
- sleep 1
- if [ ! -z "$client" ]; then
- $PDSH $client "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # usually checked on particular client or locally
+ sleep 1
+ lfs_df_check $1
}
client_evicted() {
clients_up || error "post-failover stat: $?"
}
-do_lmc() {
- echo There is no lmc. This is mountconf, baby.
- exit 1
-}
-
host_nids_address() {
- local nodes=$1
- local kind=$2
+ local nodes=$1
+ local net=${2:-"."}
- if [ -n "$kind" ]; then
- nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'")
- else
- nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'")
- fi
- echo $nids
+ do_nodes $nodes "$LCTL list_nids | grep $net | cut -f 1 -d @"
}
h2name_or_ip() {
fi
}
-h2ptl() {
- if [ "$1" = "'*'" ]; then echo \'*\'; else
- ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \
- awk '{print $1}'`
- if [ -z "$ID" ]; then
- echo "Could not get a ptl id for $1..."
- exit 1
- fi
- echo $ID"@ptl"
+h2nettype() {
+ if [[ -n "$NETTYPE" ]]; then
+ h2name_or_ip "$1" "$NETTYPE"
+ else
+ h2name_or_ip "$1" "$2"
fi
}
-declare -fx h2ptl
+declare -fx h2nettype
+# Wrapper function to print the deprecation warning
h2tcp() {
- h2name_or_ip "$1" "tcp"
-}
-declare -fx h2tcp
-
-h2elan() {
- if [ "$1" = "'*'" ]; then echo \'*\'; else
- if type __h2elan >/dev/null 2>&1; then
- ID=$(__h2elan $1)
- else
- ID=`echo $1 | sed 's/[^0-9]*//g'`
- fi
- echo $ID"@elan"
+ echo "h2tcp: deprecated, use h2nettype instead" 1>&2
+ if [[ -n "$NETTYPE" ]]; then
+ h2nettype "$@"
+ else
+ h2nettype "$1" "tcp"
fi
}
-declare -fx h2elan
+# Wrapper function to print the deprecation warning
h2o2ib() {
- h2name_or_ip "$1" "o2ib"
+ echo "h2o2ib: deprecated, use h2nettype instead" 1>&2
+ if [[ -n "$NETTYPE" ]]; then
+ h2nettype "$@"
+ else
+ h2nettype "$1" "o2ib"
+ fi
}
-declare -fx h2o2ib
# This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST
# expressions format. As a bonus we can then just pass in those variables
group=${group%%]*}
for range in ${group//,/ }; do
+ local order
+
begin=${range%-*}
end=${range#*-}
begin=$(echo $begin | sed 's/0*//')
[ -z $begin ] && begin=0
- for num in $(seq -f "%0${padlen}g" $begin $end); do
+ if [ ! -z "${begin##[!0-9]*}" ]; then
+ order=$(seq -f "%0${padlen}g" $begin $end)
+ else
+ order=$(eval echo {$begin..$end});
+ fi
+
+ for num in $order; do
value="${name#*,}${num}${back}"
[ "$value" != "${value/\[/}" ] && {
value=$(hostlist_expand "$value")
myList="${list%% *}"
while [[ "$list" != ${myList##* } ]]; do
- list=${list//${list%% *} /}
- myList="$myList ${list%% *}"
+ local tlist=" $list"
+ list=${tlist// ${list%% *} / }
+ list=${list:1}
+ myList="$myList ${list%% *}"
done
myList="${myList%* }";
mount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local opts
if ! do_facet $facet test -b $dev; then
unmount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
do_facet $facet $UMOUNT $mnt
}
mount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local canmnt
local mntpt
unmount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local var_mntpt=mz_$(var_name ${facet}_$ds)_mountpoint
local var_canmnt=mz_$(var_name ${facet}_$ds)_canmount
local mntpt=${!var_mntpt}
mount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- mount_$fstype $facet
+ mount_$fstype $facet $mnt
}
unmount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- unmount_$fstype $facet
+ unmount_$fstype $facet $mnt
}
########
zconf_umount_clients $clients $MOUNT "$*" || true
[ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
- [ "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
+
# The add fn does rm ${facet}active file, this would be enough
# if we use do_facet <facet> only after the facet added, but
# currently we use do_facet mds in local.sh
}
cleanup_echo_devs () {
- local devs=$($LCTL dl | grep echo | awk '{print $4}')
+ trap 0
+ local dev
+ local devs=$($LCTL dl | grep echo | awk '{print $4}')
- for dev in $devs; do
- $LCTL --device $dev cleanup
- $LCTL --device $dev detach
- done
+ for dev in $devs; do
+ $LCTL --device $dev cleanup
+ $LCTL --device $dev detach
+ done
}
cleanupall() {
var=${facet}failover_HOST
if [ -n "${!var}" ] && [ ${!var} != $(facet_host $facet) ]; then
- opts+=" --failnode=$(h2$NETTYPE ${!var})"
+ opts+=" --failnode=$(h2nettype ${!var})"
fi
opts+=${TIMEOUT:+" --param=sys.timeout=$TIMEOUT"}
echo -n "$opts"
}
+mountfs_opts() {
+ local facet=$1
+ local type=$(facet_type $facet)
+ local var=${type}_MOUNT_FS_OPTS
+ local opts=""
+ if [ -n "${!var}" ]; then
+ opts+=" --mountfsoptions=${!var}"
+ fi
+ echo -n "$opts"
+}
+
check_ost_indices() {
local index_count=${#OST_INDICES[@]}
[[ $index_count -eq 0 || $OSTCOUNT -le $index_count ]] && return 0
done
}
+__touch_device()
+{
+ local facet_type=$1 # mgs || mds || ost
+ local facet_num=$2
+ local facet=${1}${2}
+ local device
+
+ case "$(facet_fstype $facet)" in
+ ldiskfs)
+ device=$(${facet_type}devname $facet_num)
+ ;;
+ zfs)
+ device=$(${facet_type}vdevname $facet_num)
+ ;;
+ *)
+ error "Unhandled filesystem type"
+ ;;
+ esac
+
+ do_facet $facet "[ -e \"$device\" ]" && return
+
+ # Note: the following check only works with absolute paths
+ [[ ! "$device" =~ ^/dev/ ]] || [[ "$device" =~ ^/dev/shm/ ]] ||
+ error "$facet: device '$device' does not exist"
+
+ do_facet $facet "touch \"${device}\""
+}
+
format_mgs() {
local quiet
fi
echo "Format mgs: $(mgsdevname)"
reformat_external_journal mgs
- add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
+
+ # touch "device" in case it is a loopback file for testing and needs to
+ # be created. mkfs.lustre doesn't do this to avoid accidentally writing
+ # to non-existent files in /dev if the admin made a typo during setup
+ __touch_device mgs
+
+ add mgs $(mkfs_opts mgs $(mgsdevname)) $(mountfs_opts mgs) --reformat \
$(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} || exit 10
}
fi
echo "Format mds$num: $(mdsdevname $num)"
reformat_external_journal mds$num
+
+ __touch_device mds $num
+
add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) \
- ${quiet:+>/dev/null} || exit 10
+ $(mountfs_opts mds$num) --reformat $(mdsdevname $num) \
+ $(mdsvdevname $num) ${quiet:+>/dev/null} || exit 10
}
format_ost() {
fi
echo "Format ost$num: $(ostdevname $num)"
reformat_external_journal ost$num
+
+ __touch_device ost $num
+
add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
- --reformat $(ostdevname $num) $(ostvdevname ${num}) \
- ${quiet:+>/dev/null} || exit 10
+ $(mountfs_opts ost$num) --reformat $(ostdevname $num) \
+ $(ostvdevname ${num}) ${quiet:+>/dev/null} || exit 10
}
formatall() {
stopall
# Set hostid for ZFS/SPL zpool import protection
- do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+ # (Assumes MDS version is also OSS version)
+ if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ];
+ then
+ do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+ fi
# We need ldiskfs here, may as well load them all
load_modules
- [ "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
echo Formatting mgs, mds, osts
if ! combined_mgs_mds ; then
format_mgs
}
setupall() {
- nfs_client_mode && return
+ local arg1=$1
+
+ nfs_client_mode && return
cifs_client_mode && return
- sanity_mount_check ||
- error "environments are insane!"
+ sanity_mount_check || error "environments are insane!"
- load_modules
+ load_modules
- if [ -z "$CLIENTONLY" ]; then
- echo Setup mgs, mdt, osts
- echo $WRITECONF | grep -q "writeconf" && \
- writeconf_all
- if ! combined_mgs_mds ; then
+ if [ -z "$CLIENTONLY" ]; then
+ echo Setup mgs, mdt, osts
+ echo $WRITECONF | grep -q "writeconf" && writeconf_all
+ if ! combined_mgs_mds ; then
start mgs $(mgsdevname) $MGS_MOUNT_OPTS
- fi
+ fi
for num in `seq $MDSCOUNT`; do
DEVNAME=$(mdsdevname $num)
fi
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
+
+ if [ ! -z $arg1 ]; then
+ [ "$arg1" = "server_only" ] && return
+ fi
+
mount_client $MOUNT
[ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT
clients_up
}
init_facet_vars () {
- [ "$CLIENTONLY" ] && return 0
+ [ -n "$CLIENTONLY" ] && return 0
local facet=$1
shift
local device=$1
}
init_param_vars () {
- remote_mds_nodsh ||
- TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
+ TIMEOUT=$(lctl get_param -n timeout)
+ TIMEOUT=${TIMEOUT:-20}
+
+ remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
+ TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
log "Using TIMEOUT=$TIMEOUT"
osc_ensure_active $SINGLEMDS $TIMEOUT
local mntpt=$1
local mounted=$(mount | grep " $mntpt ")
- if [ "$CLIENTONLY" ]; then
+ if [ -n "$CLIENTONLY" ]; then
# bug 18021
# CLIENTONLY should not depend on *_HOST settings
local mgc=$($LCTL device_list | awk '/MGC/ {print $4}')
# in theory someone could create a new,
# client-only config file that assumed lustre was already
# configured and didn't set the MGSNID. If MGSNID is not set,
- # then we should use the mgs nid currently being used
+ # then we should use the mgs nid currently being used
# as the default value. bug 18021
[[ x$MGSNID = x ]] &&
MGSNID=${mgc//MGC/}
return 0
fi
- local myMGS_host=$mgs_HOST
- if [ "$NETTYPE" = "ptl" ]; then
- myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
- fi
-
echo Checking config lustre mounted on $mntpt
local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}')
mgshost=$(echo $mgshost | awk -F: '{print $1}')
-# if [ "$mgshost" != "$myMGS_host" ]; then
-# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
-# Please use correct config or set mds_HOST correctly!"
-# fi
-
}
check_config_clients () {
restore_mount $MOUNT2
export I_MOUNTED2=yes
fi
- fi
+ fi
# 5.
# MOUNT is mounted MOUNT2 is not mounted
set_default_debug_nodes $(comma_list $(nodes_list))
fi
- if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ if [ -z "$CLIENTONLY" -a $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ]; then
local facets=""
[ "$(facet_fstype ost1)" = "ldiskfs" ] &&
facets="$(get_facets OST)"
set_flavor_all $SEC
fi
- #Enable remote MDT create for testing
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- done
+ if [ -z "$CLIENTONLY" ]; then
+ # Enable remote MDT create for testing
+ for num in $(seq $MDSCOUNT); do
+ do_facet mds$num \
+ lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
+ 2>/dev/null
+ done
+ fi
if [ "$ONLY" == "setup" ]; then
exit 0
true
DEBUGSAVE=""
- [ -n "DEBUGSAVE_SERVER" ] &&
+ [ -n "$DEBUGSAVE_SERVER" ] &&
do_nodes $(comma_list $(all_server_nodes)) \
"$LCTL set_param debug=\\\"${DEBUGSAVE_SERVER}\\\"" ||
true
exit 1
}
+# Throw an error if it's not running in vm - usually for performance
+# verification
+error_not_in_vm() {
+ local virt=$(running_in_vm)
+ if [[ -n "$virt" ]]; then
+ echo "running in VM '$virt', ignore error"
+ error_ignore env=$virt "$@"
+ else
+ error "$@"
+ fi
+}
+
skip_env () {
$FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
#
-# Log a message (on all nodes) padded with "=" before and after.
+# Log a message (on all nodes) padded with "=" before and after.
# Also appends a timestamp and prepends the testsuite name.
-#
+#
EQUALS="===================================================================================================="
banner() {
cd $SAVE_PWD
reset_fail_loc
check_grant ${testnum} || error "check_grant $testnum failed with $?"
- check_catastrophe || error "LBUG/LASSERT detected"
+ check_node_health
check_dmesg_for_errors || error "Error in dmesg detected"
if [ "$PARALLEL" != "yes" ]; then
ps auxww | grep -v grep | grep -q multiop &&
export base=$(basetest $1)
[ "$CHECK_GRANT" == "no" ] && return 0
- testname=GCHECK_ONLY_${base}
- [ ${!testname}x == x ] && return 0
+ testnamebase=GCHECK_ONLY_${base}
+ testname=GCHECK_ONLY_$1
+ [ ${!testnamebase}x == x -a ${!testname}x == x ] && return 0
echo -n "checking grant......"
awk '{ total += $1 } END { printf("%0.0f", total) }')
# get server grant
+ # which is tot_granted less grant_precreate
server_grant=$(do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param -n obdfilter.${FSNAME}-OST*.tot_granted" |
- awk '{ total += $1 } END { printf("%0.0f", total) }')
+ "$LCTL get_param "\
+ "obdfilter.${FSNAME}-OST*.{tot_granted,tot_pending,grant_precreate}" |
+ sed 's/=/ /'| awk '/tot_granted/{ total += $2 };
+ /tot_pending/{ total -= $2 };
+ /grant_precreate/{ total -= $2 };
+ END { printf("%0.0f", total) }')
# check whether client grant == server grant
if [[ $client_grant -ne $server_grant ]]; then
- echo "failed: client:${client_grant} server: ${server_grant}."
do_nodes $(comma_list $(osts_nodes)) \
- "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*"
+ "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*" \
+ "obdfilter.${FSNAME}-OST*.grant_*"
do_nodes $clients "$LCTL get_param osc.${FSNAME}-*.cur_*_bytes"
- return 1
+ error "failed: client:${client_grant} server: ${server_grant}."
else
echo "pass: client:${client_grant} server: ${server_grant}"
fi
remote_mds_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
}
require_dsh_mds()
{
- remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" && \
- MSKIPPED=1 && return 1
- return 0
+ remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" &&
+ MSKIPPED=1 && return 1
+ return 0
}
remote_ost ()
remote_ost_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
require_dsh_ost()
remote_mgs_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- local MGS
- MGS=$(facet_host mgs)
- remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ local MGS
+ MGS=$(facet_host mgs)
+ remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
local_mode ()
rnodes=${rnodes//,/ }
local -a nodes=($rnodes)
- local num=${#nodes[@]}
+ local num=${#nodes[@]}
local i=$((RANDOM * num * 2 / 65536))
echo ${nodes[i]}
}
client_only () {
- [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ]
+ [ -n "$CLIENTONLY" ] || [ "x$CLIENTMODSONLY" = "xyes" ]
}
check_versions () {
setstripe_nfsserver () {
local dir=$1
+ local nfsexportdir=$2
+ shift
+ shift
- local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
- { print $1 }' /proc/mounts | cut -f 1 -d : | head -n1)
+ local -a nfsexport=($(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
+ { print $1 }' /proc/mounts | cut -f 1 -d :))
- [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1
+ # check that only one nfs mounted
+ [[ -z $nfsexport ]] && echo "$dir is not nfs mounted" && return 1
+ (( ${#nfsexport[@]} == 1 )) ||
+ error "several nfs mounts found for $dir: ${nfsexport[@]} !"
- do_nodev $nfsserver lfs setstripe "$@"
+ do_nodev ${nfsexport[0]} lfs setstripe $nfsexportdir "$@"
}
# Check and add a test group.
}
mdsrate_inodes_available () {
- local min_inodes=$(inodes_available)
- echo $((min_inodes * 99 / 100))
+ local min_inodes=$(inodes_available)
+ echo $((min_inodes * 99 / 100))
}
-# reset llite stat counters
-clear_llite_stats(){
- lctl set_param -n llite.*.stats 0
+# reset stat counters
+clear_stats() {
+ local paramfile="$1"
+ lctl set_param -n $paramfile=0
}
-# sum llite stat items
-calc_llite_stats() {
- local res=$(lctl get_param -n llite.*.stats |
- awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }')
- echo $((res))
-}
-
-# reset osc stat counters
-clear_osc_stats(){
- lctl set_param -n osc.*.osc_stats 0
-}
-
-# sum osc stat items
-calc_osc_stats() {
- local res=$(lctl get_param -n osc.*.osc_stats |
- awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }')
- echo $((res))
+# sum stat items
+calc_stats() {
+ local paramfile="$1"
+ local stat="$2"
+ lctl get_param -n $paramfile |
+ awk '/^'$stat'/ { sum += $2 } END { printf("%0.0f", sum) }'
}
calc_sum () {
}
calc_osc_kbytes () {
- df $MOUNT > /dev/null
- $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
+ df $MOUNT > /dev/null
+ $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum
}
# save_lustre_params(comma separated facet list, parameter_mask)
done
}
-check_catastrophe() {
+check_node_health() {
local nodes=${1:-$(comma_list $(nodes_list))}
- do_nodes $nodes "rc=0;
-val=\\\$($LCTL get_param -n catastrophe 2>&1);
-if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then
- echo \\\$(hostname -s): \\\$val;
- rc=\\\$val;
-fi;
-exit \\\$rc"
+ for node in ${nodes//,/ }; do
+ check_network "$node" 5
+ if [ $? -eq 0 ]; then
+ do_node $node "rc=0;
+ val=\\\$($LCTL get_param -n catastrophe 2>&1);
+ if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then
+ echo \\\$(hostname -s): \\\$val;
+ rc=\\\$val;
+ fi;
+ exit \\\$rc" || error "$node:LBUG/LASSERT detected"
+ fi
+ done
}
mdsrate_cleanup () {
if [[ $facet == client* ]]; then
# During setup time, the osc might not be setup, it need wait
- # until list_param can return valid value. And also if there
- # are mulitple osc entries we should list all of them before
- # go to wait.
+ # until list_param can return valid value.
params=$($LCTL list_param $param 2>/dev/null || true)
while [ -z "$params" ]; do
if [ $i -ge $maxtime ]; then
if ! do_rpc_nodes "$(facet_active_host $facet)" \
wait_import_state $expected "$params" $maxtime; then
- error "import is not in ${expected} state"
+ error "$facet: import is not in $expected state after $maxtime"
return 1
fi
fi
}
+wait_osp_active() {
+ local facet=$1
+ local tgt_name=$2
+ local tgt_idx=$3
+ local expected=$4
+ local num
+
+ # wait until all MDTs are in the expected state
+ for ((num = 1; num <= $MDSCOUNT; num++)); do
+ local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name})
+ local mproc
+
+ if [ $facet = "mds" ]; then
+ mproc="osp.$mdtosp.active"
+ [ $num -eq $((tgt_idx + 1)) ] && continue
+ else
+ mproc="osc.$mdtosp.active"
+ fi
+
+ echo "check $mproc"
+ while [ 1 ]; do
+ sleep 5
+ local result=$(do_facet mds${num} "$LCTL get_param -n $mproc")
+ local max=30
+ local wait=0
+
+ [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
+ if [ $result -eq $expected ]; then
+ echo -n "target updated after"
+ echo "$wait sec (got $result)"
+ break
+ fi
+ wait=$((wait + 5))
+ if [ $wait -eq $max ]; then
+ error "$tgt_name: wanted $expected got $result"
+ fi
+ echo "Waiting $((max - wait)) secs for $tgt_name"
+ done
+ done
+}
+
oos_full() {
local -a AVAILA
local -a GRANTA
return $OSCFULL
}
-pool_list () {
- do_facet mgs lctl pool_list $1
+list_pool() {
+ echo -e "$(do_facet $SINGLEMDS $LCTL pool_list $1 | sed '1d')"
+}
+
+check_pool_not_exist() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+ [[ $# -ne 1 ]] && return 0
+ [[ x$poolname = x ]] && return 0
+ list_pool $fsname | grep -w $1 && return 1
+ return 0
}
create_pool() {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
-
- do_facet mgs lctl pool_new $1
- local RC=$?
- # get param should return err unless pool is created
- [[ $RC -ne 0 ]] && return $RC
-
- wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
- 2>/dev/null || echo foo" "" || RC=1
- if [[ $RC -eq 0 ]]; then
- add_pool_to_list $1
- else
- error "pool_new failed $1"
- fi
- return $RC
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ trap "destroy_test_pools $fsname" EXIT
+ do_facet mgs lctl pool_new $1
+ local RC=$?
+ # get param should return err unless pool is created
+ [[ $RC -ne 0 ]] && return $RC
+
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$poolname \
+ 2>/dev/null || echo foo" "" ||
+ error "mds$mds_id: pool_new failed $1"
+ done
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "" || error "pool_new failed $1"
+
+ add_pool_to_list $1
+ return $RC
}
add_pool_to_list () {
}
destroy_pool_int() {
- local ost
- local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \
- awk '$1 !~ /^Pool:/ {print $1}')
- for ost in $OSTS; do
- do_facet mgs lctl pool_remove $1 $ost
- done
- do_facet mgs lctl pool_destroy $1
+ local ost
+ local OSTS=$(list_pool $1)
+ for ost in $OSTS; do
+ do_facet mgs lctl pool_remove $1 $ost
+ done
+ do_facet mgs lctl pool_destroy $1
}
# <fsname>.<poolname> or <poolname>
destroy_pool() {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- [[ x$fsname = x$poolname ]] && fsname=$FSNAME
+ [[ x$fsname = x$poolname ]] && fsname=$FSNAME
- local RC
+ local RC
- pool_list $fsname.$poolname || return $?
+ check_pool_not_exist $fsname.$poolname
+ [[ $? -eq 0 ]] && return 0
- destroy_pool_int $fsname.$poolname
- RC=$?
- [[ $RC -ne 0 ]] && return $RC
+ destroy_pool_int $fsname.$poolname
+ RC=$?
+ [[ $RC -ne 0 ]] && return $RC
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" ||
+ error "mds$mds_id: destroy pool failed $1"
+ done
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" || error "destroy pool failed $1"
- wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
- 2>/dev/null || echo foo" "foo" || RC=1
+ remove_pool_from_list $fsname.$poolname
- if [[ $RC -eq 0 ]]; then
- remove_pool_from_list $fsname.$poolname
- else
- error "destroy pool failed $1"
- fi
- return $RC
+ return $RC
}
destroy_pools () {
- local fsname=${1:-$FSNAME}
- local poolname
- local listvar=${fsname}_CREATED_POOLS
-
- pool_list $fsname
+ local fsname=${1:-$FSNAME}
+ local poolname
+ local listvar=${fsname}_CREATED_POOLS
- [ x${!listvar} = x ] && return 0
+ [ x${!listvar} = x ] && return 0
- echo destroy the created pools: ${!listvar}
- for poolname in ${!listvar//,/ }; do
- destroy_pool $fsname.$poolname
- done
+ echo "Destroy the created pools: ${!listvar}"
+ for poolname in ${!listvar//,/ }; do
+ destroy_pool $fsname.$poolname
+ done
}
-cleanup_pools () {
- local fsname=${1:-$FSNAME}
- trap 0
- destroy_pools $fsname
+destroy_test_pools () {
+ trap 0
+ local fsname=${1:-$FSNAME}
+ destroy_pools $fsname || true
}
gather_logs () {
suffix="$ts.log"
echo "Dumping lctl log to ${prefix}.*.${suffix}"
- if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ if [ -n "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
echo "Dumping logs only on local client."
$LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
}
min_ost_size () {
- $LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1
+ $LFS df | grep OST | awk '{print $4}' | sort -un | head -1
}
#
#
get_page_size() {
local facet=$1
- local size
+ local size=$(getconf PAGE_SIZE 2>/dev/null)
- size=$(do_facet $facet getconf PAGE_SIZE)
- [[ ${PIPESTATUS[0]} = 0 && -n "$size" ]] || size=4096
- echo -n $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
+ echo -n ${size:-4096}
}
#
local device=$2
local count
- count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ [ -z "$CLIENTONLY" ] && count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
awk '/^Block count:/ {print $3}')
- echo -n $count
+ echo -n ${count:-0}
}
# Get the block size of the filesystem.
get_block_size() {
- local facet=$1
- local device=$2
- local size
+ local facet=$1
+ local device=$2
+ local size
- size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
- awk '/^Block size:/ {print $3}')
- echo $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ awk '/^Block size:/ {print $3}')
+ echo -n ${size:-0}
}
# Check whether the "large_xattr" feature is enabled or not.
fi
}
-# find the smallest and not in use file descriptor
+# free_fd: find the smallest and not in use file descriptor [above @last_fd]
+#
+# If called many times, passing @last_fd will avoid repeated searching
+# already-open FDs repeatedly if we know they are still in use.
+#
+# usage: free_fd [last_fd]
free_fd()
{
- local max_fd=$(ulimit -n)
- local fd=3
- while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do
- ((++fd))
- done
- [ $fd -lt $max_fd ] || error "finding free file descriptor failed"
- echo $fd
+ local max_fd=$(ulimit -n)
+ local fd=$((${1:-2} + 1))
+
+ while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do
+ ((++fd))
+ done
+ [ $fd -lt $max_fd ] || error "finding free file descriptor failed"
+ echo $fd
}
check_mount_and_prep()
local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
do_facet mgs $LCTL pool_add \
$FSNAME.$pool $FSNAME-OST[$first-$last/$step]
+
+ # wait for OSTs to be added to the pool
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$pool |
+ sort -u | tr '\n' ' ' " "$t" || {
+ error_noexit "mds$mds_id: Add to pool failed"
+ return 3
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool \
| sort -u | tr '\n' ' ' " "$t" || {
error_noexit "Add to pool failed"
local pname="lov.$FSNAME-*.pools.$pool"
local t=$($LCTL get_param -n $pname | head -1)
do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$pool |
+ grep $t" "" || {
+ error_noexit "mds$mds_id: $t not removed from" \
+ "$FSNAME.$pool"
+ return 2
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n $pname | grep $t" "" || {
error_noexit "$t not removed from $FSNAME.$pool"
return 1
do
do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
done
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id "lctl get_param -n \
+ lod.$lodname.pools.$pool" "" || {
+ error_noexit "mds$mds_id: Pool $pool not drained"
+ return 4
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n $pname" "" || {
error_noexit "Pool $FSNAME.$pool cannot be drained"
return 1
do_nodes $clients "killall $signal $name"
}
+
+lsnapshot_create()
+{
+ do_facet mgs "$LCTL snapshot_create -F $FSNAME $*"
+}
+
+lsnapshot_destroy()
+{
+ do_facet mgs "$LCTL snapshot_destroy -F $FSNAME $*"
+}
+
+lsnapshot_modify()
+{
+ do_facet mgs "$LCTL snapshot_modify -F $FSNAME $*"
+}
+
+lsnapshot_list()
+{
+ do_facet mgs "$LCTL snapshot_list -F $FSNAME $*"
+}
+
+lsnapshot_mount()
+{
+ do_facet mgs "$LCTL snapshot_mount -F $FSNAME $*"
+}
+
+lsnapshot_umount()
+{
+ do_facet mgs "$LCTL snapshot_umount -F $FSNAME $*"
+}
+
+lss_err()
+{
+ local msg=$1
+
+ do_facet mgs "cat $LSNAPSHOT_LOG"
+ error $msg
+}
+
+lss_cleanup()
+{
+ echo "Cleaning test environment ..."
+
+ # Every lsnapshot command takes exclusive lock with others,
+ # so can NOT destroy the snapshot during list with 'xargs'.
+ while true; do
+ local ssname=$(lsnapshot_list | grep snapshot_name |
+ grep lss_ | awk '{ print $2 }' | head -n 1)
+ [ -z "$ssname" ] && break
+
+ lsnapshot_destroy -n $ssname -f ||
+ lss_err "Fail to destroy $ssname by force"
+ done
+}
+
+lss_gen_conf_one()
+{
+ local facet=$1
+ local role=$2
+ local idx=$3
+
+ local host=$(facet_active_host $facet)
+ local dir=$(dirname $(facet_vdevice $facet))
+ local pool=$(zpool_name $facet)
+ local lfsname=$(zfs_local_fsname $facet)
+ local label=${FSNAME}-${role}$(printf '%04x' $idx)
+
+ do_facet mgs \
+ "echo '$host - $label zfs:${dir}/${pool}/${lfsname} - -' >> \
+ $LSNAPSHOT_CONF"
+}
+
+lss_gen_conf()
+{
+ do_facet mgs "rm -f $LSNAPSHOT_CONF"
+ echo "Generating $LSNAPSHOT_CONF on MGS ..."
+
+ if ! combined_mgs_mds ; then
+ [ $(facet_fstype mgs) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ local host=$(facet_active_host mgs)
+ local dir=$(dirname $(facet_vdevice mgs))
+ local pool=$(zpool_name mgs)
+ local lfsname=$(zfs_local_fsname mgs)
+
+ do_facet mgs \
+ "echo '$host - MGS zfs:${dir}/${pool}/${lfsname} - -' \
+ >> $LSNAPSHOT_CONF" || lss_err "generate lss conf (mgs)"
+ fi
+
+ for num in `seq $MDSCOUNT`; do
+ [ $(facet_fstype mds$num) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ lss_gen_conf_one mds$num MDT $((num - 1)) ||
+ lss_err "generate lss conf (mds$num)"
+ done
+
+ for num in `seq $OSTCOUNT`; do
+ [ $(facet_fstype ost$num) != zfs ] &&
+ skip "Lustre snapshot 1 only works for ZFS backend" &&
+ exit 0
+
+ lss_gen_conf_one ost$num OST $((num - 1)) ||
+ lss_err "generate lss conf (ost$num)"
+ done
+
+ do_facet mgs "cat $LSNAPSHOT_CONF"
+}
+
+parse_plain_param()
+{
+ local line=$1
+ local val=$(awk '{print $2}' <<< $line)
+
+ if [[ $line =~ ^"lmm_stripe_count:" ]]; then
+ echo "-c $val"
+ elif [[ $line =~ ^"lmm_stripe_size:" ]]; then
+ echo "-S $val"
+ elif [[ $line =~ ^"lmm_stripe_offset:" ]]; then
+ echo "-i $val"
+ fi
+}
+
+parse_layout_param()
+{
+ local mode=""
+ local val=""
+ local param=""
+
+ while read line; do
+ if [[ -z $mode ]]; then
+ if [[ $line =~ ^"stripe_count:" ]]; then
+ mode="plain_dir"
+ elif [[ $line =~ ^"lmm_stripe_count:" ]]; then
+ mode="plain_file"
+ elif [[ $line =~ ^"lcm_layout_gen:" ]]; then
+ mode="pfl"
+ fi
+ fi
+
+ if [[ $mode = "plain_dir" ]]; then
+ param=$(echo $line |
+ awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
+ elif [[ $mode = "plain_file" ]]; then
+ val=$(parse_plain_param "$line")
+ [[ ! -z $val ]] && param="$param $val"
+ elif [[ $mode = "pfl" ]]; then
+ val=$(echo $line | awk '{print $2}')
+ if [[ $line =~ ^"lcme_extent.e_end:" ]]; then
+ if [[ $val = "EOF" ]]; then
+ param="$param -E -1"
+ else
+ param="$param -E $val"
+ fi
+ elif [[ $line =~ ^"stripe_count:" ]]; then
+ # pfl dir
+ val=$(echo $line |
+ awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}')
+ param="$param $val"
+ else
+ #pfl file
+ val=$(parse_plain_param "$line")
+ [[ ! -z $val ]] && param="$param $val"
+ fi
+ fi
+ done
+ echo "$param"
+}
+
+get_layout_param()
+{
+ local param=$($LFS getstripe -d $1 | parse_layout_param)
+ echo "$param"
+}