#set -x
export LANG=en_US
-export EJOURNAL=${EJOURNAL:-""}
export REFORMAT=${REFORMAT:-""}
export WRITECONF=${WRITECONF:-""}
export VERBOSE=${VERBOSE:-false}
# specify environment variable containing batch job name for server statistics
export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
-# LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or
-# LOAD_LLOOP is true. LOAD_LLOOP is false by default.
-export LOAD_LLOOP=${LOAD_LLOOP:-false}
-
#export PDSH="pdsh -S -Rssh -w"
export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
+export UMOUNT=${UMOUNT:-"umount -d"}
+# sles12 umount has a issue with -d option
+[ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
+ export UMOUNT="umount"
+}
# function used by scripts run on remote nodes
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
MODPROBECONF=/etc/modprobe.conf
+sanitize_parameters() {
+ for i in DIR DIR1 DIR2 MOUNT MOUNT1 MOUNT2
+ do
+ local path=${!i}
+ if [ -d "$path" ]; then
+ eval export $i=$(echo $path | sed -r 's/\/+$//g')
+ fi
+ done
+}
assert_DIR () {
- local failed=""
- [[ $DIR/ = $MOUNT/* ]] || \
- { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
- [[ $DIR1/ = $MOUNT1/* ]] || \
- { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
- [[ $DIR2/ = $MOUNT2/* ]] || \
- { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
+ local failed=""
+ [[ $DIR/ = $MOUNT/* ]] ||
+ { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
+ [[ $DIR1/ = $MOUNT1/* ]] ||
+ { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
+ [[ $DIR2/ = $MOUNT2/* ]] ||
+ { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
- [ -n "$failed" ] && exit 99 || true
+ [ -n "$failed" ] && exit 99 || true
}
usage() {
print_summary () {
trap 0
- [ "$TESTSUITE" == "lfsck" ] && return 0
+ [ -z "$DEFAULT_SUITES"] && return 0
[ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
local details
local form="%-13s %-17s %-9s %s %s\n"
export TEST_FAILED=false
export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
export RPC_MODE=${RPC_MODE:-false}
+ export DO_CLEANUP=${DO_CLEANUP:-true}
export MKE2FS=$MKE2FS
if [ -z "$MKE2FS" ]; then
fi
fi
- export LFSCK_BIN=${LFSCK_BIN:-lfsck}
- export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite
- export FSCK_MAX_ERR=4 # File system errors left uncorrected
+ export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after test suite
+ export FSCK_MAX_ERR=4 # File system errors left uncorrected
export ZFS=${ZFS:-zfs}
export ZPOOL=${ZPOOL:-zpool}
# Ubuntu, at least, has a truncate command in /usr/bin
# so fully path our truncate command.
export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
+ export FSX=${FSX:-$LUSTRE/tests/fsx}
export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
[ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
fi
export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"}
[ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid"
+ export LL_DECODE_LINKEA=${LL_DECODE_LINKEA:-"$LUSTRE/utils/ll_decode_linkea"}
+ [ ! -f "$LL_DECODE_LINKEA" ] && export LL_DECODE_LINKEA="ll_decode_linkea"
export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
[ ! -f "$MKFS" ] && export MKFS="mkfs.lustre"
export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate}
[ ! -f "$LFS_MIGRATE" ] &&
export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null)
+ export LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"}
+ [ ! -f "$LR_READER" ] && export LR_READER=$(which lr_reader 2> /dev/null)
+ [ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader"
export NAME=${NAME:-local}
export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"}
[ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \
export DIR2
export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
export AT_MAX_PATH
+ export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"}
+ [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null)
if [ "$ACCEPTOR_PORT" ]; then
export PORT_OPT="--port $ACCEPTOR_PORT"
export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
- # command line
+ # command line
- while getopts "rvwf:" opt $*; do
- case $opt in
- f) CONFIG=$OPTARG;;
- r) REFORMAT=--reformat;;
- v) VERBOSE=true;;
- w) WRITECONF=writeconf;;
- \?) usage;;
- esac
- done
+ while getopts "rvwf:" opt $*; do
+ case $opt in
+ f) CONFIG=$OPTARG;;
+ r) REFORMAT=yes;;
+ v) VERBOSE=true;;
+ w) WRITECONF=writeconf;;
+ \?) usage;;
+ esac
+ done
- shift $((OPTIND - 1))
- ONLY=${ONLY:-$*}
+ shift $((OPTIND - 1))
+ ONLY=${ONLY:-$*}
# print the durations of each test if "true"
DDETAILS=${DDETAILS:-false}
if ! $RPC_MODE; then
rm -f $TMP/*active
fi
+
+ export TF_FAIL=${TF_FAIL:-$TMP/tf.fail}
}
check_cpt_number() {
fi
}
+# Return a numeric version code based on a version string. The version
+# code is useful for comparison two version strings to see which is newer.
version_code() {
- # split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3"
- eval set -- $(tr "[:punct:]" " " <<< $*)
+ # split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3"
+ eval set -- $(tr "[:punct:]" " " <<< $*)
- echo -n "$((($1 << 16) | ($2 << 8) | $3))"
+ echo -n "$((($1 << 16) | ($2 << 8) | $3))"
}
export LINUX_VERSION=$(uname -r | sed -e "s/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1/")
export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ })
+# Report the Lustre build version string (e.g. 1.8.7.3 or 2.4.1).
+#
+# usage: lustre_build_version
+#
+# All Lustre versions support "lctl get_param" to report the version of the
+# code running in the kernel (what our tests are interested in), but it
+# doesn't work without modules loaded. If that fails, use "lctl version"
+# instead, which is easy to parse and works without the kernel modules,
+# but was only added in 2.6.50. If that also fails, fall back to calling
+# "lctl lustre_build_version" which prints either (or both) the userspace
+# and kernel build versions, but is deprecated and should eventually be
+# removed.
+#
+# output: prints version string to stdout in dotted-decimal format
+lustre_build_version() {
+ local facet=${1:-client}
+
+ # lustre: 2.8.52
+ local VER=$(do_facet $facet $LCTL get_param -n version 2> /dev/null |
+ awk '/lustre: / { print $2 }')
+ # lctl 2.6.50
+ [ -z "$VER" ] && VER=$(do_facet $facet $LCTL --version 2>/dev/null |
+ awk '{ print $2 }')
+ # Lustre version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
+ # lctl version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64
+ [ -z "$VER" ] && VER=$(do_facet $facet $LCTL lustre_build_version |
+ awk '/version:/ { print $3; exit; }')
+ sed -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$VER
+}
+
+# Report the Lustre numeric build version code for the supplied facet.
+lustre_version_code() {
+ version_code $(lustre_build_version $1)
+}
+
module_loaded () {
- /sbin/lsmod | grep -q "^\<$1\>"
+ /sbin/lsmod | grep -q "^\<$1\>"
}
# Load a module on the system where this is running.
#
-# Synopsis: load_module module_name [module arguments for insmod/modprobe]
+# usage: load_module module_name [module arguments for insmod/modprobe]
#
# If module arguments are not given but MODOPTS_<MODULE> is set, then its value
# will be used as the arguments. Otherwise arguments will be obtained from
EXT=".ko"
module=$1
shift
- BASE=`basename $module $EXT`
+ BASE=$(basename $module $EXT)
module_loaded ${BASE} && return
- # If no module arguments were passed, get them from $MODOPTS_<MODULE>, else from
- # modprobe.conf
+ # If no module arguments were passed, get them from $MODOPTS_<MODULE>,
+ # else from modprobe.conf
if [ $# -eq 0 ]; then
# $MODOPTS_<MODULE>; we could use associative arrays, but that's not in
# Bash until 4.x, so we resort to eval.
[ $# -gt 0 ] && echo "${module} options: '$*'"
- # Note that insmod will ignore anything in modprobe.conf, which is why we're
- # passing options on the command-line.
- if [ "$BASE" == "lnet_selftest" ] && \
- [ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]; then
- insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
- elif [ -f ${LUSTRE}/${module}${EXT} ]; then
- insmod ${LUSTRE}/${module}${EXT} "$@"
- else
- # must be testing a "make install" or "rpm" installation
- # note failed to load ptlrpc_gss is considered not fatal
- if [ "$BASE" == "ptlrpc_gss" ]; then
- modprobe $BASE "$@" 2>/dev/null || echo "gss/krb5 is not supported"
- else
- modprobe $BASE "$@"
- fi
- fi
-}
-
-llite_lloop_enabled() {
- local n1=$(uname -r | cut -d. -f1)
- local n2=$(uname -r | cut -d. -f2)
- local n3=$(uname -r | cut -d- -f1 | cut -d. -f3)
-
- # load the llite_lloop module for < 2.6.32 kernels
- if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \
- [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \
- $LOAD_LLOOP; then
- return 0
- fi
- return 1
+ # Note that insmod will ignore anything in modprobe.conf, which is why
+ # we're passing options on the command-line.
+ if [[ "$BASE" == "lnet_selftest" ]] &&
+ [[ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]]; then
+ insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
+ elif [[ -f ${LUSTRE}/${module}${EXT} ]]; then
+ [[ "$BASE" != "ptlrpc_gss" ]] || modprobe sunrpc
+ insmod ${LUSTRE}/${module}${EXT} "$@"
+ else
+ # must be testing a "make install" or "rpm" installation
+ # note failed to load ptlrpc_gss is considered not fatal
+ if [[ "$BASE" == "ptlrpc_gss" ]]; then
+ modprobe $BASE "$@" 2>/dev/null ||
+ echo "gss/krb5 is not supported"
+ else
+ modprobe $BASE "$@"
+ fi
+ fi
}
load_modules_local() {
fi
load_module ../libcfs/libcfs/libcfs
-
- [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
- [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
- load_module ../lnet/lnet/lnet
+ # Prevent local MODOPTS_LIBCFS being passed as part of environment
+ # variable to remote nodes
+ unset MODOPTS_LIBCFS
+
+ set_default_debug
+ load_module ../lnet/lnet/lnet
+ case $NETTYPE in
+ o2ib)
+ LNETLND="o2iblnd/ko2iblnd"
+ ;;
+ *)
+ ;;
+ esac
LNETLND=${LNETLND:-"socklnd/ksocklnd"}
load_module ../lnet/klnds/$LNETLND
load_module obdclass/obdclass
load_module lov/lov
load_module mgc/mgc
load_module obdecho/obdecho
- if ! client_only; then
- SYMLIST=/proc/kallsyms
- grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
- grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
- grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ if ! client_only; then
+ SYMLIST=/proc/kallsyms
+ grep -q crc16 $SYMLIST ||
+ { modprobe crc16 2>/dev/null || true; }
+ grep -q -w jbd2 $SYMLIST ||
+ { modprobe jbd2 2>/dev/null || true; }
load_module lfsck/lfsck
- [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
+ [ "$LQUOTA" != "no" ] &&
+ load_module quota/lquota $LQUOTAOPTS
if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
modprobe zfs
load_module osd-zfs/osd_zfs
if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
grep -q exportfs_decode_fh $SYMLIST ||
{ modprobe exportfs 2> /dev/null || true; }
+ grep -q -w mbcache $SYMLIST ||
+ { modprobe mbcache 2>/dev/null || true; }
load_module ../ldiskfs/ldiskfs
load_module osd-ldiskfs/osd_ldiskfs
fi
load_module osp/osp
load_module ofd/ofd
load_module osp/osp
- fi
+ fi
load_module llite/lustre
- llite_lloop_enabled && load_module llite/llite_lloop
[ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
OGDB=${OGDB:-$TMP}
rm -f $OGDB/ogdb-$HOSTNAME
# 'mount' doesn't look in $PATH, just sbin
local mount_lustre=$LUSTRE/utils/mount.lustre
if [ -f $mount_lustre ]; then
- local sbin_mount=/sbin/mount.lustre
- if grep -qe "$sbin_mount " /proc/mounts; then
- cmp $mount_lustre $sbin_mount || umount $sbin_mount
+ local sbin_mount=$(readlink -f /sbin)/mount.lustre
+ if grep -qw "$sbin_mount" /proc/mounts; then
+ cmp -s $mount_lustre $sbin_mount || umount $sbin_mount
fi
- if ! grep -qe "$sbin_mount " /proc/mounts; then
+ if ! grep -qw "$sbin_mount" /proc/mounts; then
[ ! -f "$sbin_mount" ] && touch "$sbin_mount"
if [ ! -s "$sbin_mount" -a -w "$sbin_mount" ]; then
cat <<- EOF > "$sbin_mount"
fi
fi
- local sbin_mount=/sbin/mount.lustre
+ local sbin_mount=$(readlink -f /sbin)/mount.lustre
if grep -qe "$sbin_mount " /proc/mounts; then
umount $sbin_mount || true
[ -s $sbin_mount ] && ! grep -q "STUB MARK" $sbin_mount ||
echo -n $size
}
+fs_inode_ksize() {
+ local facet=${1:-$SINGLEMDS}
+ local fstype=$(facet_fstype $facet)
+ local size=0
+ case $fstype in
+ ldiskfs) size=4;; # ~4KB per inode
+ zfs) size=11;; # 10 to 11KB per inode
+ esac
+
+ echo -n $size
+}
+
check_gss_daemon_nodes() {
local list=$1
dname=$2
facet_number() {
local facet=$1
- if [ $facet == mgs ]; then
+ if [ $facet == mgs ] || [ $facet == client ]; then
return 1
fi
echo -n $device
}
+running_in_vm() {
+ local virt=$(virt-what 2> /dev/null)
+
+ [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
+
+ virt=$(dmidecode -s system-product-name | awk '{print $1}')
+
+ case $virt in
+ VMware|KVM|VirtualBox|Parallels)
+ echo $virt | tr '[A-Z]' '[a-z]' ;;
+ *) ;;
+ esac
+}
+
#
# Re-read the partition table on failover partner host.
# After a ZFS storage pool is created on a shared device, the partition table
shift 3
local opts=${@:-"-o cachefile=none"}
- do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ do_facet $facet "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL create -f $opts $poolname $vdev"
}
if [[ -n "$poolname" ]]; then
opts+=" -d $(dirname $(facet_vdevice $facet))"
- do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
+ do_facet $facet "modprobe zfs;
+ $ZPOOL list -H $poolname >/dev/null 2>&1 ||
$ZPOOL import -f $opts $poolname"
fi
}
local name=$3
do_nodes $nodes "$LCTL get_param -n obdfilter.$device.$name \
- osd-*.$device.$name 2>&1" | grep -v 'Found no match'
+ osd-*.$device.$name 2>&1" | grep -v 'error:'
}
set_osd_param() {
local value=$4
do_nodes $nodes "$LCTL set_param -n obdfilter.$device.$name=$value \
- osd-*.$device.$name=$value 2>&1" | grep -v 'Found no match'
+ osd-*.$device.$name=$value 2>&1" | grep -v 'error:'
}
set_debug_size () {
if [ -f /sys/devices/system/cpu/possible ]; then
local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
else
- local cpus=$(getconf _NPROCESSORS_CONF)
+ local cpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
fi
# bug 19944, adjust size to be -gt num_possible_cpus()
set_default_debug_nodes $node
}
+set_hostid () {
+ local hostid=${1:-$(hostid)}
+
+ if [ ! -s /etc/hostid ]; then
+ printf $(echo -n $hostid |
+ sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/') >/etc/hostid
+ fi
+}
+
# Facet functions
mount_facets () {
local facets=${1:-$(get_facets)}
local opt=${facet}_opt
local mntpt=$(facet_mntpt $facet)
local opts="${!opt} $@"
+ local fstype=$(facet_fstype $facet)
+ local devicelabel
+
+ module_loaded lustre || load_modules
if [ $(facet_fstype $facet) == ldiskfs ] &&
! do_facet $facet test -b ${!dev}; then
import_zpool $facet || return ${PIPESTATUS[0]}
fi
+ case $fstype in
+ ldiskfs)
+ devicelabel=$(do_facet ${facet} "$E2LABEL ${!dev}");;
+ zfs)
+ devicelabel=$(do_facet ${facet} "$ZFS get -H -o value \
+ lustre:svname ${!dev}");;
+ *)
+ error "unknown fstype!";;
+ esac
+
echo "Starting ${facet}: $opts ${!dev} $mntpt"
# for testing LU-482 error handling in mount_facets() and test_0a()
if [ -f $TMP/test-lu482-trigger ]; then
${!dev} $mntpt"
RC=${PIPESTATUS[0]}
fi
+
if [ $RC -ne 0 ]; then
echo "Start of ${!dev} on ${facet} failed ${RC}"
- else
- set_default_debug_facet $facet
+ return $RC
+ fi
- label=$(devicelabel ${facet} ${!dev})
- [ -z "$label" ] && echo no label for ${!dev} && exit 1
- eval export ${facet}_svc=${label}
- echo Started ${label}
- fi
- return $RC
+ health=$(do_facet ${facet} "$LCTL get_param -n health_check")
+ if [[ "$health" != "healthy" ]]; then
+ error "$facet is in a unhealthy state"
+ fi
+
+ set_default_debug_facet $facet
+
+ if [[ $facet == mds* ]]; then
+ do_facet $facet \
+ lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 2>/dev/null
+ fi
+
+ if [[ $opts =~ .*nosvc.* ]]; then
+ echo "Start ${!dev} without service"
+ else
+
+ case $fstype in
+ ldiskfs)
+ wait_update_facet ${facet} "$E2LABEL ${!dev} \
+ 2>/dev/null | grep -E ':[a-zA-Z]{3}[0-9]{4}'" \
+ "" || error "${!dev} failed to initialize!";;
+ zfs)
+ wait_update_facet ${facet} "$ZFS get -H -o value \
+ lustre:svname ${!dev} 2>/dev/null | \
+ grep -E ':[a-zA-Z]{3}[0-9]{4}'" "" ||
+ error "${!dev} failed to initialize!";;
+
+ *)
+ error "unknown fstype!";;
+ esac
+ fi
+
+ # commit the device label change to disk
+ if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
+ echo "Commit the device label on ${!dev}"
+ do_facet $facet "sync; sync; sync"
+ sleep 5
+ fi
+
+
+ label=$(devicelabel ${facet} ${!dev})
+ [ -z "$label" ] && echo no label for ${!dev} && exit 1
+ eval export ${facet}_svc=${label}
+ echo Started ${label}
+
+ return $RC
}
# start facet device options
start() {
- local facet=$1
- shift
- local device=$1
- shift
- eval export ${facet}_dev=${device}
- eval export ${facet}_opt=\"$@\"
+ local facet=$1
+ shift
+ local device=$1
+ shift
+ eval export ${facet}_dev=${device}
+ eval export ${facet}_opt=\"$@\"
- local varname=${facet}failover_dev
- if [ -n "${!varname}" ] ; then
- eval export ${facet}failover_dev=${!varname}
- else
- eval export ${facet}failover_dev=$device
- fi
+ local varname=${facet}failover_dev
+ if [ -n "${!varname}" ] ; then
+ eval export ${facet}failover_dev=${!varname}
+ else
+ eval export ${facet}failover_dev=$device
+ fi
local mntpt=$(facet_mntpt $facet)
do_facet ${facet} mkdir -p $mntpt
running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
if [ ${running} -ne 0 ]; then
echo "Stopping $mntpt (opts:$@) on $HOST"
- do_facet ${facet} umount -d $@ $mntpt
+ do_facet ${facet} $UMOUNT $@ $mntpt
fi
# umount should block, but we should wait for unrelated obd's
return $rc
}
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-restore_quota_old() {
- local mntpt=${1:-$MOUNT}
- local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2)
- if [ ! "$old_QUOTA_TYPE" ] ||
- [ "$quota_type" = "$old_QUOTA_TYPE" ]; then
- return
- fi
- quota_save_version $old_QUOTA_TYPE
-}
-
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-# it should be removed whenever we drop the interoperability for such
-# server.
-setup_quota_old(){
- local mntpt=$1
-
- # no quota enforcement for now and accounting works out of the box
- return
-
- # We need save the original quota_type params, and restore them after testing
-
- # Suppose that quota type the same on mds and ost
- local quota_type=$(quota_type | grep MDT | cut -d "=" -f2)
- [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!"
- echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]"
- if [ "$quota_type" != "$QUOTA_TYPE" ]; then
- export old_QUOTA_TYPE=$quota_type
- quota_save_version $QUOTA_TYPE
- else
- qtype=$(tr -c -d "ug" <<< $QUOTA_TYPE)
- $LFS quotacheck -$qtype $mntpt || error "quotacheck has failed for $type"
- fi
-
- local quota_usrs=$QUOTA_USERS
-
- # get_filesystem_size
- local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
- local blk_soft=$((disksz + 1024))
- local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
-
- local Inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
- local i_soft=$Inodes
- local i_hard=$((i_soft + i_soft / 20))
-
- echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit:
- $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard"
-
- local cmd
- for usr in $quota_usrs; do
- echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
- for type in u g; do
- cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt"
- echo "+ $cmd"
- eval $cmd || error "$cmd FAILED!"
- done
- # display the quota status
- echo "Quota settings for $usr : "
- $LFS quota -v -u $usr $mntpt || true
- done
-}
-
# get mdt quota type
mdt_quota_type() {
local varsvc=${SINGLEMDS}_svc
# restore old quota type settings
restore_quota() {
- if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
- restore_quota_old
- return
- fi
-
if [ "$old_MDT_QUOTA_TYPE" ]; then
do_facet mgs $LCTL conf_param \
$FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
}
setup_quota(){
- if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
- setup_quota_old $1
- return
- fi
-
local mntpt=$1
# save old quota type & set new quota type
}
zconf_mount() {
- local client=$1
- local mnt=$2
- local opts=${3:-$MOUNT_OPTS}
- opts=${opts:+-o $opts}
- local flags=${4:-$MOUNT_FLAGS}
-
- local device=$MGSNID:/$FSNAME
- if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
- exit 1
- fi
-
- echo "Starting client: $client: $flags $opts $device $mnt"
- do_node $client mkdir -p $mnt
- do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1
-
- set_default_debug_nodes $client
+ local client=$1
+ local mnt=$2
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
+
+ local device=$MGSNID:/$FSNAME$FILESET
+ if [ -z "$mnt" -o -z "$FSNAME" ]; then
+ echo "Bad mount command: opt=$flags $opts dev=$device " \
+ "mnt=$mnt"
+ exit 1
+ fi
+
+ echo "Starting client: $client: $flags $opts $device $mnt"
+ do_node $client mkdir -p $mnt
+ if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
+ do_node $client $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
+ $mnt || return 1
+ #disable FILESET if not supported
+ do_nodes $client lctl get_param -n \
+ mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
+ device=$MGSNID:/$FSNAME
+ do_node $client mkdir -p $mnt/$FILESET
+ do_node $client "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ fi
+ do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1
+
+ set_default_debug_nodes $client
- return 0
+ return 0
}
zconf_umount() {
local client=$1
local mnt=$2
local force
- local busy
+ local busy
local need_kill
[ "$3" ] && force=-f
echo \\\$(hostname) env are INSANE!;
exit 1;
fi"
- [ $? -eq 0 ] || rc=1
+ [ $? -eq 0 ] || rc=1
done
return $rc
}
sanity_mount_check_servers () {
- [ "$CLIENTONLY" ] &&
+ [ -n "$CLIENTONLY" ] &&
{ echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
echo Checking servers environments
# mount clients if not mouted
zconf_mount_clients() {
- local clients=$1
- local mnt=$2
- local opts=${3:-$MOUNT_OPTS}
- opts=${opts:+-o $opts}
- local flags=${4:-$MOUNT_FLAGS}
-
- local device=$MGSNID:/$FSNAME
- if [ -z "$mnt" -o -z "$FSNAME" ]; then
- echo Bad zconf mount command: opt=$flags $opts dev=$device mnt=$mnt
- exit 1
- fi
-
- echo "Starting client $clients: $flags $opts $device $mnt"
-
- do_nodes $clients "
+ local clients=$1
+ local mnt=$2
+ local opts=${3:-$MOUNT_OPTS}
+ opts=${opts:+-o $opts}
+ local flags=${4:-$MOUNT_FLAGS}
+
+ local device=$MGSNID:/$FSNAME$FILESET
+ if [ -z "$mnt" -o -z "$FSNAME" ]; then
+ echo "Bad conf mount command: opt=$flags $opts dev=$device " \
+ "mnt=$mnt"
+ exit 1
+ fi
+
+ echo "Starting client $clients: $flags $opts $device $mnt"
+ if [ -n "$FILESET" -a ! -n "$SKIP_FILESET" ]; then
+ do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ do_nodes $clients $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
+ $mnt || return 1
+ #disable FILESET if not supported
+ do_nodes $clients lctl get_param -n \
+ mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
+ device=$MGSNID:/$FSNAME
+ do_nodes $clients mkdir -p $mnt/$FILESET
+ do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
+ umount $mnt"
+ fi
+
+ do_nodes $clients "
running=\\\$(mount | grep -c $mnt' ');
rc=0;
if [ \\\$running -eq 0 ] ; then
fi;
exit \\\$rc" || return ${PIPESTATUS[0]}
- echo "Started clients $clients: "
- do_nodes $clients "mount | grep $mnt' '"
+ echo "Started clients $clients: "
+ do_nodes $clients "mount | grep $mnt' '"
- set_default_debug_nodes $clients
+ set_default_debug_nodes $clients
- return 0
+ return 0
}
zconf_umount_clients() {
wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
echo "waiting for $host to fail attempts=$attempts"
[ $i -lt $attempts ] || \
- { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
+ { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
done
}
# only for remote client
check_client_load () {
- local client=$1
- local var=$(node_var_name $client)_load
- local TESTLOAD=run_${!var}.sh
-
- ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
-
- # bug 18914: try to connect several times not only when
- # check ps, but while check_catastrophe also
- local tries=3
- local RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! check_catastrophe $client; then
- RC=${PIPESTATUS[0]}
- if [ $RC -eq 254 ]; then
- # FIXME: not sure how long we shuold sleep here
- sleep 10
- continue
- fi
- echo "check catastrophe failed: RC=$RC "
- return $RC
- fi
- done
- # We can continue try to connect if RC=254
- # Just print the warning about this
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking catastrophe on $client"
- fi
-
- # see if the load is still on the client
- tries=3
- RC=254
- while [ $RC = 254 -a $tries -gt 0 ]; do
- let tries=$tries-1
- # assume success
- RC=0
- if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
- RC=${PIPESTATUS[0]}
- sleep 30
- fi
- done
- if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
- # see if we can diagnose a bit why this is
- fi
+ local client=$1
+ local var=$(node_var_name $client)_load
+ local testload=run_${!var}.sh
+
+ ps auxww | grep -v grep | grep $client | grep -q $testload || return 1
+
+ # bug 18914: try to connect several times not only when
+ # check ps, but while check_node_health also
+
+ local tries=3
+ local RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! check_node_health $client; then
+ RC=${PIPESTATUS[0]}
+ if [ $RC -eq 254 ]; then
+ # FIXME: not sure how long we shuold sleep here
+ sleep 10
+ continue
+ fi
+ echo "check node health failed: RC=$RC "
+ return $RC
+ fi
+ done
+ # We can continue try to connect if RC=254
+ # Just print the warning about this
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "node health on $client"
+ fi
+
+ # see if the load is still on the client
+ tries=3
+ RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
+ if ! do_node $client \
+ "ps auxwww | grep -v grep | grep -q $testload"; then
+ RC=${PIPESTATUS[0]}
+ sleep 30
+ fi
+ done
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking " \
+ "(node health and 'ps') the client load on $client"
+ # see if we can diagnose a bit why this is
+ fi
- return $RC
+ return $RC
}
check_client_loads () {
local clients=${1//,/ }
}
wait_update_facet() {
+ local verbose=
+ [ "$1" = "--verbose" ] && verbose="$1" && shift
+
local facet=$1
shift
- wait_update $(facet_active_host $facet) "$@"
+ wait_update $verbose $(facet_active_host $facet) "$@"
}
sync_all_data() {
do_nodes $(comma_list $(mdts_nodes)) \
- "lctl set_param -n osd*.*MDT*.force_sync 1"
+ "lctl set_param -n osd*.*MDT*.force_sync=1"
do_nodes $(comma_list $(osts_nodes)) \
- "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
+ "lctl set_param -n osd*.*OS*.force_sync=1" 2>&1 |
grep -v 'Found no match'
}
+wait_zfs_commit() {
+ # the occupied disk space will be released
+ # only after DMUs are committed
+ if [[ $(facet_fstype $1) == zfs ]]; then
+ echo "sleep $2 for ZFS OSD"
+ sleep $2
+ fi
+}
+
wait_delete_completed_mds() {
local MAX_WAIT=${1:-20}
+ # for ZFS, waiting more time for DMUs to be committed
+ local ZFS_WAIT=${2:-5}
local mds2sync=""
local stime=$(date +%s)
local etime
mds2sync="$mds2sync $node"
done
if [ -z "$mds2sync" ]; then
+ wait_zfs_commit $SINGLEMDS $ZFS_WAIT
return
fi
mds2sync=$(comma_list $mds2sync)
"$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum)
#echo "$node: $changes changes on all"
if [[ $changes -eq 0 ]]; then
- etime=$(date +%s)
- #echo "delete took $((etime - stime)) seconds"
+ wait_zfs_commit $SINGLEMDS $ZFS_WAIT
return
fi
sleep 1
return 1
}
+# Wait OSTs to be active on both client and MDT side.
+wait_osts_up() {
+ local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
+ awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on client failed"
+
+ cmd="$LCTL get_param -n lod.$FSNAME-MDT*-*.target_obd | sort -u |
+ awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
+ wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
+ error "wait_update OSTs up on MDT failed"
+}
+
wait_destroy_complete () {
echo "Waiting for local destroys to complete"
# MAX value shouldn't be big as this mean server responsiveness
return $rc
}
+lfs_df_check() {
+ local clients=${1:-$CLIENTS}
+
+ if [ -z "$clients" ]; then
+ $LFS df $MOUNT
+ else
+ $PDSH $clients "$LFS df $MOUNT" > /dev/null
+ fi
+}
+
+
clients_up() {
- # not every config has many clients
- sleep 1
- if [ ! -z "$CLIENTS" ]; then
- $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # not every config has many clients
+ sleep 1
+ lfs_df_check
}
client_up() {
- local client=$1
- # usually checked on particular client or locally
- sleep 1
- if [ ! -z "$client" ]; then
- $PDSH $client "stat -f $MOUNT" > /dev/null
- else
- stat -f $MOUNT > /dev/null
- fi
+ # usually checked on particular client or locally
+ sleep 1
+ lfs_df_check $1
}
client_evicted() {
}
client_reconnect_try() {
- uname -n >> $MOUNT/recon
- if [ -z "$CLIENTS" ]; then
- df $MOUNT; uname -n >> $MOUNT/recon
- else
- do_nodes $CLIENTS "df $MOUNT; uname -n >> $MOUNT/recon" > /dev/null
- fi
- echo Connected clients:
- cat $MOUNT/recon
- ls -l $MOUNT/recon > /dev/null
- rm $MOUNT/recon
+ local f=$MOUNT/recon
+
+ uname -n >> $f
+ if [ -z "$CLIENTS" ]; then
+ $LFS df $MOUNT; uname -n >> $f
+ else
+ do_nodes $CLIENTS "$LFS df $MOUNT; uname -n >> $f" > /dev/null
+ fi
+ echo "Connected clients: $(cat $f)"
+ ls -l $f > /dev/null
+ rm $f
}
client_reconnect() {
}
facet_failover() {
+ local E2FSCK_ON_MDT0=false
+ if [ "$1" == "--fsck" ]; then
+ shift
+ [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ E2FSCK_ON_MDT0=true
+ fi
+
local facets=$1
local sleep_time=$2
local -a affecteds
shutdown_facet $facet
done
+ $E2FSCK_ON_MDT0 && (run_e2fsck $(facet_active_host $SINGLEMDS) \
+ $(mdsdevname 1) "-n" || error "Running e2fsck")
+
for ((index=0; index<$total; index++)); do
facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
echo reboot facets: ${affecteds[index]}
replay_barrier() {
local facet=$1
do_facet $facet "sync; sync; sync"
- df $MOUNT
+ $LFS df $MOUNT
# make sure there will be no seq change
local clients=${CLIENTS:-$HOSTNAME}
facet_failover $* || error "failover: $?"
wait_clients_import_state "$clients" "$facets" FULL
- clients_up || error "post-failover df: $?"
+ clients_up || error "post-failover stat: $?"
}
fail_nodf() {
change_active $facet
wait_for_facet $facet
mount_facet $facet -o abort_recovery
- clients_up || echo "first df failed: $?"
- clients_up || error "post-failover df: $?"
+ clients_up || echo "first stat failed: $?"
+ clients_up || error "post-failover stat: $?"
}
do_lmc() {
done
myList="${myList%* }";
- # We can select an object at a offset in the list
+ # We can select an object at an offset in the list
[ $# -eq 2 ] && {
cnt=0
for item in $myList; do
}
facet_active_host() {
- local facet=$1
- local active=`facet_active $facet`
- if [ "$facet" == client ]; then
- echo $HOSTNAME
- else
- echo `facet_host $active`
- fi
+ facet_host $(facet_active $1)
}
# Get the passive failover partner host of facet.
fi
done
+ for var in VERBOSE; do
+ if [ -n "${!var}" ]; then
+ echo -n " $var=${!var}"
+ fi
+ done
+
if [ -n "$FSTYPE" ]; then
echo -n " FSTYPE=$FSTYPE"
fi
+
+ for var in LNETLND NETTYPE; do
+ if [ -n "${!var}" ]; then
+ echo -n " $var=${!var}"
+ fi
+ done
}
do_nodes() {
return ${PIPESTATUS[0]}
}
+##
+# Execute commands on a single service's host
+#
+# The \a facet (service) may be on a local or remote node, which is
+# determined at the time the command is run.
+#
+# usage: do_facet $facet command [arg ...]
do_facet() {
- local facet=$1
- shift
- local HOST=`facet_active_host $facet`
- [ -z $HOST ] && echo No host defined for facet ${facet} && exit 1
- do_node $HOST "$@"
+ local facet=$1
+ shift
+ local HOST=$(facet_active_host $facet)
+ [ -z $HOST ] && echo "No host defined for facet ${facet}" && exit 1
+ do_node $HOST "$@"
}
# Function: do_facet_random_file $FACET $FILE $SIZE
eval VDEVPTR="";;
zfs )
#if $OSTDEVn isn't defined, default is $OSTDEVBASE{n}
- # Device formated by zfs
+ # Device formatted by zfs
DEVNAME=OSTDEV$num
eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
* )
echo -n $VDEVPTR
}
-# Logical device formated for lustre
+# Logical device formatted for lustre
mdsdevname() {
local num=$1
local DEVNAME=MDSDEV$num
eval VDEVPTR="";;
zfs )
# if $MDSDEVn isn't defined, default is $MDSDEVBASE{n}
- # Device formated by ZFS
+ # Device formatted by ZFS
local DEVNAME=MDSDEV$num
eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
* )
[[ $facet = mgs ]] && combined_mgs_mds && facet="mds1"
local var=${facet}_MOUNT
- eval mntpt=${!var:-${MOUNT%/*}/$facet}
+ eval mntpt=${!var:-${MOUNT}-$facet}
echo -n $mntpt
}
mount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local opts
if ! do_facet $facet test -b $dev; then
unmount_ldiskfs() {
local facet=$1
local dev=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
- do_facet $facet umount -d $mnt
+ do_facet $facet $UMOUNT $mnt
}
var_name() {
mount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local canmnt
local mntpt
unmount_zfs() {
local facet=$1
local ds=$(facet_device $facet)
- local mnt=$(facet_mntpt $facet)
+ local mnt=${2:-$(facet_mntpt $facet)}
local var_mntpt=mz_$(var_name ${facet}_$ds)_mountpoint
local var_canmnt=mz_$(var_name ${facet}_$ds)_canmount
local mntpt=${!var_mntpt}
mount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- mount_$fstype $facet
+ mount_$fstype $facet $mnt
}
unmount_fstype() {
local facet=$1
+ local mnt=$2
local fstype=$(facet_fstype $facet)
- unmount_$fstype $facet
+ unmount_$fstype $facet $mnt
}
########
zconf_umount_clients $clients $MOUNT "$*" || true
[ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
- [ "$CLIENTONLY" ] && return
+ [ -n "$CLIENTONLY" ] && return
+
# The add fn does rm ${facet}active file, this would be enough
# if we use do_facet <facet> only after the facet added, but
# currently we use do_facet mds in local.sh
}
cleanup_echo_devs () {
- local devs=$($LCTL dl | grep echo | awk '{print $4}')
+ trap 0
+ local dev
+ local devs=$($LCTL dl | grep echo | awk '{print $4}')
- for dev in $devs; do
- $LCTL --device $dev cleanup
- $LCTL --device $dev detach
- done
+ for dev in $devs; do
+ $LCTL --device $dev cleanup
+ $LCTL --device $dev detach
+ done
}
cleanupall() {
nfs_client_mode && return
+ cifs_client_mode && return
stopall $*
cleanup_echo_devs
local fs_mkfs_opts
local var
- if [ $type == MGS ] && combined_mgs_mds; then
- return 1
- fi
-
if [ $type == MGS ] || ( [ $type == MDS ] &&
[ "$dev" == $(mgsdevname) ] &&
[ "$host" == "$(facet_host mgs)" ] ); then
opts+=${LDLM_TIMEOUT:+" --param=sys.ldlm_timeout=$LDLM_TIMEOUT"}
if [ $type == MDS ]; then
- opts+=${SECLEVEL:+" --param=mdt.sec_level"}
opts+=${MDSCAPA:+" --param-mdt.capa=$MDSCAPA"}
opts+=${STRIPE_BYTES:+" --param=lov.stripesize=$STRIPE_BYTES"}
opts+=${STRIPES_PER_OBJ:+" --param=lov.stripecount=$STRIPES_PER_OBJ"}
fs_mkfs_opts+="-O large_xattr"
fi
- fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
- if [ ! -z $EJOURNAL ]; then
- fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"}
+ var=${facet}_JRN
+ if [ -n "${!var}" ]; then
+ fs_mkfs_opts+=" -J device=${!var}"
+ else
+ fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
fi
fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"}
fi
fi
if [ $type == OST ]; then
- opts+=${SECLEVEL:+" --param=ost.sec_level"}
opts+=${OSSCAPA:+" --param=ost.capa=$OSSCAPA"}
if [ $fstype == ldiskfs ]; then
- fs_mkfs_opts+=${OSTJOURNALSIZE:+" -J size=$OSTJOURNALSIZE"}
+ var=${facet}_JRN
+ if [ -n "${!var}" ]; then
+ fs_mkfs_opts+=" -J device=${!var}"
+ else
+ fs_mkfs_opts+=${OSTJOURNALSIZE:+" -J size=$OSTJOURNALSIZE"}
+ fi
fi
fi
done
}
-formatall() {
+format_mgs() {
local quiet
if ! $VERBOSE; then
quiet=yes
fi
+ echo "Format mgs: $(mgsdevname)"
+ reformat_external_journal mgs
+ add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
+ $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} || exit 10
+}
- stopall
- # We need ldiskfs here, may as well load them all
- load_modules
- [ "$CLIENTONLY" ] && return
- echo Formatting mgs, mds, osts
- if ! combined_mgs_mds ; then
- echo "Format mgs: $(mgsdevname)"
- add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
- $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} ||
- exit 10
+format_mdt() {
+ local num=$1
+ local quiet
+
+ if ! $VERBOSE; then
+ quiet=yes
fi
+ echo "Format mds$num: $(mdsdevname $num)"
+ reformat_external_journal mds$num
+ add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
+ --reformat $(mdsdevname $num) $(mdsvdevname $num) \
+ ${quiet:+>/dev/null} || exit 10
+}
- for num in $(seq $MDSCOUNT); do
- echo "Format mds$num: $(mdsdevname $num)"
- add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
- --reformat $(mdsdevname $num) $(mdsvdevname $num) \
- ${quiet:+>/dev/null} || exit 10
- done
+format_ost() {
+ local num=$1
- export OST_INDICES=($(hostlist_expand "$OST_INDEX_LIST"))
- check_ost_indices
- for num in $(seq $OSTCOUNT); do
- echo "Format ost$num: $(ostdevname $num)"
- add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
- --reformat $(ostdevname $num) $(ostvdevname ${num}) \
- ${quiet:+>/dev/null} || exit 10
+ if ! $VERBOSE; then
+ quiet=yes
+ fi
+ echo "Format ost$num: $(ostdevname $num)"
+ reformat_external_journal ost$num
+ add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
+ --reformat $(ostdevname $num) $(ostvdevname ${num}) \
+ ${quiet:+>/dev/null} || exit 10
+}
+
+formatall() {
+ stopall
+ # Set hostid for ZFS/SPL zpool import protection
+ # (Assumes MDS version is also OSS version)
+ if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ];
+ then
+ do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+ fi
+
+ # We need ldiskfs here, may as well load them all
+ load_modules
+ [ -n "$CLIENTONLY" ] && return
+ echo Formatting mgs, mds, osts
+ if ! combined_mgs_mds ; then
+ format_mgs
+ fi
+
+ for num in $(seq $MDSCOUNT); do
+ format_mdt $num
+ done
+
+ export OST_INDICES=($(hostlist_expand "$OST_INDEX_LIST"))
+ check_ost_indices
+ for num in $(seq $OSTCOUNT); do
+ format_ost $num
done
}
}
setupall() {
- nfs_client_mode && return
+ nfs_client_mode && return
+ cifs_client_mode && return
- sanity_mount_check ||
- error "environments are insane!"
+ sanity_mount_check || error "environments are insane!"
- load_modules
+ load_modules
- if [ -z "$CLIENTONLY" ]; then
- echo Setup mgs, mdt, osts
- echo $WRITECONF | grep -q "writeconf" && \
- writeconf_all
- if ! combined_mgs_mds ; then
+ if [ -z "$CLIENTONLY" ]; then
+ echo Setup mgs, mdt, osts
+ echo $WRITECONF | grep -q "writeconf" && writeconf_all
+ if ! combined_mgs_mds ; then
start mgs $(mgsdevname) $MGS_MOUNT_OPTS
- fi
+ fi
for num in `seq $MDSCOUNT`; do
DEVNAME=$(mdsdevname $num)
}
init_facet_vars () {
- [ "$CLIENTONLY" ] && return 0
+ [ -n "$CLIENTONLY" ] && return 0
local facet=$1
shift
local device=$1
do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" ||
error "conf_param $PARAM failed"
- wait_update $(facet_host $myfacet) "$TEST" "$FINAL" ||
+ wait_update_facet $myfacet "$TEST" "$FINAL" ||
error "check $PARAM failed!"
}
init_param_vars () {
- remote_mds_nodsh ||
- TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
+ TIMEOUT=$(lctl get_param -n timeout)
+ TIMEOUT=${TIMEOUT:-20}
+ remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
+
+ TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
log "Using TIMEOUT=$TIMEOUT"
osc_ensure_active $SINGLEMDS $TIMEOUT
return 1
}
+cifs_client_mode () {
+ [ x$CIFSCLIENT = xyes ] &&
+ echo "CIFSCLIENT=$CIFSCLIENT mode: setup, cleanup, check config skipped"
+}
+
check_config_client () {
local mntpt=$1
local mounted=$(mount | grep " $mntpt ")
- if [ "$CLIENTONLY" ]; then
+ if [ -n "$CLIENTONLY" ]; then
# bug 18021
# CLIENTONLY should not depend on *_HOST settings
local mgc=$($LCTL device_list | awk '/MGC/ {print $4}')
# in theory someone could create a new,
# client-only config file that assumed lustre was already
# configured and didn't set the MGSNID. If MGSNID is not set,
- # then we should use the mgs nid currently being used
+ # then we should use the mgs nid currently being used
# as the default value. bug 18021
[[ x$MGSNID = x ]] &&
MGSNID=${mgc//MGC/}
return 0
fi
- local myMGS_host=$mgs_HOST
+ local myMGS_host=$mgs_HOST
if [ "$NETTYPE" = "ptl" ]; then
- myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
+ myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
fi
echo Checking config lustre mounted on $mntpt
local mntpt=$1
nfs_client_mode && return
+ cifs_client_mode && return
do_rpc_nodes "$clients" check_config_client $mntpt
}
check_and_setup_lustre() {
- nfs_client_mode && return
+ sanitize_parameters
+ nfs_client_mode && return
+ cifs_client_mode && return
- local MOUNTED=$(mounted_lustre_filesystems)
-
- local do_check=true
- # 1.
- # both MOUNT and MOUNT2 are not mounted
- if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
- [ "$REFORMAT" ] && formatall
- # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
- setupall
- is_mounted $MOUNT || error "NAME=$NAME not mounted"
- export I_MOUNTED=yes
- do_check=false
+ local MOUNTED=$(mounted_lustre_filesystems)
+
+ local do_check=true
+ # 1.
+ # both MOUNT and MOUNT2 are not mounted
+ if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
+ [ "$REFORMAT" = "yes" ] && formatall
+ # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
+ setupall
+ is_mounted $MOUNT || error "NAME=$NAME not mounted"
+ export I_MOUNTED=yes
+ do_check=false
# 2.
# MOUNT2 is mounted
elif is_mounted $MOUNT2; then
restore_mount $MOUNT2
export I_MOUNTED2=yes
fi
- fi
+ fi
# 5.
# MOUNT is mounted MOUNT2 is not mounted
set_default_debug_nodes $(comma_list $(nodes_list))
fi
- if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then
+ if [ -z "$CLIENTONLY" -a $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ]; then
local facets=""
[ "$(facet_fstype ost1)" = "ldiskfs" ] &&
facets="$(get_facets OST)"
set_flavor_all $SEC
fi
- #Enable remote MDT create for testing
- for num in $(seq $MDSCOUNT); do
- do_facet mds$num \
- lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
- 2>/dev/null
- done
+ if [ -z "$CLIENTONLY" ]; then
+ # Enable remote MDT create for testing
+ for num in $(seq $MDSCOUNT); do
+ do_facet mds$num \
+ lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
+ 2>/dev/null
+ done
+ fi
if [ "$ONLY" == "setup" ]; then
exit 0
return 0
}
-# Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
-generate_db() {
- local i
- local ostidx
- local dev
- local node
-
- [[ $(lustre_version_code $SINGLEMDS) -ne $(version_code 2.2.0) ]] ||
- { skip "Lustre 2.2.0 lacks the patch for LU-1255"; exit 0; }
-
- check_shared_dir $SHARED_DIRECTORY ||
- error "$SHARED_DIRECTORY isn't a shared directory"
-
- export MDSDB=$SHARED_DIRECTORY/mdsdb
- export OSTDB=$SHARED_DIRECTORY/ostdb
-
- # DNE is not supported, so when running e2fsck on a DNE filesystem,
- # we only pass master MDS parameters.
- run_e2fsck $MDTNODE $MDTDEV "-n --mdsdb $MDSDB"
-
- i=0
- ostidx=0
- OSTDB_LIST=""
- for node in $(osts_nodes); do
- for dev in ${OSTDEVS[i]}; do
- run_e2fsck $node $dev "-n --mdsdb $MDSDB --ostdb $OSTDB-$ostidx"
- OSTDB_LIST="$OSTDB_LIST $OSTDB-$ostidx"
- ostidx=$((ostidx + 1))
- done
- i=$((i + 1))
- done
-}
-
-# Run lfsck on server node if lfsck can't be found on client (LU-2571)
-run_lfsck_remote() {
- local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
- local client=$1
- local mounted=true
- local rc=0
-
- #Check if lustre is already mounted
- do_rpc_nodes $client is_mounted $MOUNT || mounted=false
- if ! $mounted; then
- zconf_mount $client $MOUNT ||
- error "failed to mount Lustre on $client"
- fi
- #Run lfsck
- echo $cmd
- do_node $client $cmd || rc=$?
- #Umount if necessary
- if ! $mounted; then
- zconf_umount $client $MOUNT ||
- error "failed to unmount Lustre on $client"
- fi
-
- [ $rc -le $FSCK_MAX_ERR ] ||
- error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
- echo "lfsck finished with rc=$rc"
-
- return $rc
-}
-
run_lfsck() {
- local facets="client $SINGLEMDS"
- local found=false
- local facet
- local node
- local rc=0
-
- for facet in $facets; do
- node=$(facet_active_host $facet)
- if check_progs_installed $node $LFSCK_BIN; then
- found=true
- break
- fi
+ do_nodes $(comma_list $(mdts_nodes) $(osts_nodes)) \
+ $LCTL set_param printk=+lfsck
+ do_facet $SINGLEMDS "$LCTL lfsck_start -M $FSNAME-MDT0000 -r -A -t all"
+
+ for k in $(seq $MDSCOUNT); do
+ # wait up to 10+1 minutes for LFSCK to complete
+ wait_update_facet --verbose mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_layout |
+ awk '/^status/ { print \\\$2 }'" "completed" 600 ||
+ error "MDS${k} layout isn't the expected 'completed'"
+ wait_update_facet --verbose mds${k} "$LCTL get_param -n \
+ mdd.$(facet_svc mds${k}).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed" 60 ||
+ error "MDS${k} namespace isn't the expected 'completed'"
done
- ! $found && error "None of \"$facets\" supports lfsck"
-
- run_lfsck_remote $node || rc=$?
-
- rm -rvf $MDSDB* $OSTDB* || true
- return $rc
+ local rep_mdt=$(do_nodes $(comma_list $(mdts_nodes)) \
+ $LCTL get_param -n mdd.$FSNAME-*.lfsck_* |
+ awk '/repaired/ { print $2 }' | calc_sum)
+ local rep_ost=$(do_nodes $(comma_list $(osts_nodes)) \
+ $LCTL get_param -n obdfilter.$FSNAME-*.lfsck_* |
+ awk '/repaired/ { print $2 }' | calc_sum)
+ local repaired=$((rep_mdt + rep_ost))
+ [ $repaired -eq 0 ] ||
+ error "lfsck repaired $rep_mdt MDT and $rep_ost OST errors"
}
dump_file_contents() {
}
check_and_cleanup_lustre() {
- if [ "$LFSCK_ALWAYS" = "yes" -a "$TESTSUITE" != "lfsck" ]; then
- get_svr_devs
- generate_db
- run_lfsck
- fi
+ if [ "$LFSCK_ALWAYS" = "yes" -a "$TESTSUITE" != "sanity-lfsck" -a \
+ "$TESTSUITE" != "sanity-scrub" ]; then
+ run_lfsck
+ fi
if is_mounted $MOUNT; then
- [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* ||
- error "remove sub-test dirs failed"
+ if $DO_CLEANUP; then
+ [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* ||
+ error "remove sub-test dirs failed"
+ else
+ echo "skip cleanup"
+ fi
[ "$ENABLE_QUOTA" ] && restore_quota || true
fi
}
drop_update_reply() {
-# OBD_FAIL_UPDATE_OBJ_NET_REP
+# OBD_FAIL_OUT_UPDATE_NET_REP
local index=$1
shift 1
RC=0
}
drop_bl_callback_once() {
- rc=0
+ local rc=0
do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=0
#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
do_facet client lctl set_param fail_loc=0x80000305
do_facet client "$@" || rc=$?
do_facet client lctl set_param fail_loc=0
+ do_facet client lctl set_param fail_val=0
do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
return $rc
}
do_facet client lctl set_param fail_loc=0x305
do_facet client "$@" || rc=$?
do_facet client lctl set_param fail_loc=0
+ do_facet client lctl set_param fail_val=0
do_facet client lctl set_param ldlm.namespaces.*.early_lock_cancel=1
return $rc
}
}
set_nodes_failloc () {
- do_nodes $(comma_list $1) lctl set_param fail_val=0 fail_loc=$2
+ local fv=${3:-0}
+ do_nodes $(comma_list $1) lctl set_param fail_val=$fv fail_loc=$2
}
cancel_lru_locks() {
- $LCTL mark "cancel_lru_locks $1 start"
-
- if [ $1 != "MGC" ]; then
- for d in $(lctl get_param -N ldlm.namespaces.*.lru_size |
- egrep -i $1); do
- $LCTL set_param -n $d=clear
- done
- $LCTL get_param ldlm.namespaces.*.lock_unused_count | egrep -i $1 |
- grep -v '=0'
- else
- for d in $(find \
- /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lru_size \
- 2> /dev/null); do
- echo "clear" > $d
- done
-
- for d in $(find \
- /{proc,sys}/fs/lustre/ldlm/namespaces/*$1*/lock_unused_count \
- 2> /dev/null); do
- if [ $(cat $d) != 0 ]; then
- echo "ldlm.namespaces.$(echo "$d" |
- cut -f 7 -d'/').lock_unused_count=$(cat $d)"
- fi
- done
- fi
-
- $LCTL mark "cancel_lru_locks $1 stop"
+ #$LCTL mark "cancel_lru_locks $1 start"
+ $LCTL set_param -n ldlm.namespaces.*$1*.lru_size=clear
+ $LCTL get_param ldlm.namespaces.*$1*.lock_unused_count | grep -v '=0'
+ #$LCTL mark "cancel_lru_locks $1 stop"
}
default_lru_size()
}
debugsave() {
- DEBUGSAVE="$(lctl get_param -n debug)"
+ DEBUGSAVE="$(lctl get_param -n debug)"
+ DEBUGSAVE_SERVER=$(do_facet $SINGLEMDS "$LCTL get_param -n debug")
}
debugrestore() {
- [ -n "$DEBUGSAVE" ] && \
- do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\";"
- DEBUGSAVE=""
+ [ -n "$DEBUGSAVE" ] &&
+ do_nodes $CLIENTS "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\""||
+ true
+ DEBUGSAVE=""
+
+ [ -n "$DEBUGSAVE_SERVER" ] &&
+ do_nodes $(comma_list $(all_server_nodes)) \
+ "$LCTL set_param debug=\\\"${DEBUGSAVE_SERVER}\\\"" ||
+ true
+ DEBUGSAVE_SERVER=""
}
debug_size_save() {
}
# prints bash call stack
-log_trace_dump() {
+print_stack_trace() {
+ local skip=${1:-1}
echo " Trace dump:"
- for (( i=1; i < ${#BASH_LINENO[*]} ; i++ )) ; do
- local s=${BASH_SOURCE[$i]}
- local l=${BASH_LINENO[$i-1]}
- local f=${FUNCNAME[$i]}
- echo " = $s:$l:$f()"
+ for (( i=$skip; i < ${#BASH_LINENO[*]} ; i++ )) ; do
+ local src=${BASH_SOURCE[$i]}
+ local lineno=${BASH_LINENO[$i-1]}
+ local funcname=${FUNCNAME[$i]}
+ echo " = $src:$lineno:$funcname()"
done
}
-##################################
-# Test interface
-##################################
-
-error_noexit() {
+report_error() {
local TYPE=${TYPE:-"FAIL"}
local dump=true
dump=false
fi
-
log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
- log_trace_dump
-
+ (print_stack_trace 2) >&2
mkdir -p $LOGDIR
# We need to dump the logs on all nodes
if $dump; then
echo "$@" > $LOGDIR/err
fi
fi
+
+ # cleanup the env for failed tests
+ reset_fail_loc
+}
+
+##################################
+# Test interface
+##################################
+
+error_noexit() {
+ report_error "$@"
}
exit_status () {
}
error() {
- error_noexit "$@"
+ report_error "$@"
exit 1
}
error_exit() {
- error "$@"
+ report_error "$@"
+ exit 1
}
# use only if we are ignoring failures for this test, bugno required.
error_ignore() {
local TYPE="IGNORE ($1)"
shift
- error_noexit "$@"
+ report_error "$@"
}
error_and_remount() {
- error_noexit "$@"
+ report_error "$@"
remount_client $MOUNT
exit 1
}
+# Throw an error if it's not running in vm - usually for performance
+# verification
+error_not_in_vm() {
+ local virt=$(running_in_vm)
+ if [[ -n "$virt" ]]; then
+ echo "running in VM '$virt', ignore error"
+ error_ignore env=$virt "$@"
+ else
+ error "$@"
+ fi
+}
+
skip_env () {
$FAIL_ON_SKIP_ENV && error false $@ || skip $@
}
}
log() {
- echo "$*"
- module_loaded lnet || load_modules
+ echo "$*" >&2
+ load_module ../libcfs/libcfs/libcfs
local MSG="$*"
# Get rid of '
}
reset_fail_loc () {
- echo -n "Resetting fail_loc on all nodes..."
- do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \
- fail_val=0 2>/dev/null || true"
- echo done.
+ echo -n "Resetting fail_loc on all nodes..."
+ do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \
+ fail_val=0 2>/dev/null" || true
+ echo done.
}
#
-# Log a message (on all nodes) padded with "=" before and after.
+# Log a message (on all nodes) padded with "=" before and after.
# Also appends a timestamp and prepends the testsuite name.
-#
+#
EQUALS="===================================================================================================="
banner() {
log "$msg== $(date +"%H:%M:%S (%s)")"
}
+check_dmesg_for_errors() {
+ local res
+ local errors="VFS: Busy inodes after unmount of\|\
+ldiskfs_check_descriptors: Checksum for group 0 failed\|\
+group descriptors corrupted"
+
+ res=$(do_nodes $(comma_list $(nodes_list)) "dmesg" | grep "$errors")
+ [ -z "$res" ] && return 0
+ echo "Kernel error detected: $res"
+ return 1
+}
+
#
# Run a single test function and cleanup after it.
#
local SAVE_UMASK=`umask`
umask 0022
+ if ! grep -q $DIR /proc/mounts; then
+ $SETUP
+ fi
+
banner "test $testnum: $message"
test_${testnum} || error "test_$testnum failed with $?"
cd $SAVE_PWD
reset_fail_loc
check_grant ${testnum} || error "check_grant $testnum failed with $?"
- check_catastrophe || error "LBUG/LASSERT detected"
+ check_node_health
+ check_dmesg_for_errors || error "Error in dmesg detected"
if [ "$PARALLEL" != "yes" ]; then
ps auxww | grep -v grep | grep -q multiop &&
error "multiop still running"
unset tdir
unset tfile
umask $SAVE_UMASK
+ $CLEANUP
return 0
}
remote_mds_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
}
require_dsh_mds()
{
- remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" && \
- MSKIPPED=1 && return 1
- return 0
+ remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" &&
+ MSKIPPED=1 && return 1
+ return 0
}
remote_ost ()
remote_ost_nodsh()
{
- [ "$CLIENTONLY" ] && return 0 || true
- remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
require_dsh_ost()
remote_mgs_nodsh()
{
- local MGS
- MGS=$(facet_host mgs)
- remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
+ [ -n "$CLIENTONLY" ] && return 0 || true
+ local MGS
+ MGS=$(facet_host mgs)
+ remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
local_mode ()
rnodes=${rnodes//,/ }
local -a nodes=($rnodes)
- local num=${#nodes[@]}
+ local num=${#nodes[@]}
local i=$((RANDOM * num * 2 / 65536))
echo ${nodes[i]}
}
client_only () {
- [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ]
-}
-
-is_patchless ()
-{
- lctl get_param version | grep -q patchless
+ [ -n "$CLIENTONLY" ] || [ "x$CLIENTMODSONLY" = "xyes" ]
}
check_versions () {
done
}
-check_catastrophe() {
- local rnodes=${1:-$(comma_list $(remote_nodes_list))}
- VAR=$(lctl get_param -n catastrophe 2>&1)
- if [ $? = 0 ] ; then
- if [ $VAR != 0 ]; then
- return 1
- fi
- fi
-
- [ -z "$rnodes" ] && return 0
-
- local data
- data=$(do_nodes "$rnodes" "rc=\\\$(lctl get_param -n catastrophe);
- if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
- exit \\\$rc")
- local rc=$?
- if [ -n "$data" ]; then
- echo $data
- return $rc
- fi
- return 0
-}
-
-# CMD: determine mds index where directory inode presents
-get_mds_dir() {
- local dir=$1
- local SEQ
+check_node_health() {
+ local nodes=${1:-$(comma_list $(nodes_list))}
- SEQ=$(lfs path2fid $dir | tr '[:]' ' '|cut -f2 -d ' ')
- if [ "$SEQ" == "" ]; then
- error "can't get sequence for $dir"
- return 1
- fi
- export SEQ
-
- do_facet mds1 "cat /proc/fs/lustre/fld/srv-*-MDT0000/fldb" | \
- tr '[)]:-' ' ' | \
- while read SS EE IDX TYP; do \
- if let "SEQ >= SS && SEQ < EE"; then \
- echo $IDX; \
- fi; \
+ for node in ${nodes//,/ }; do
+ check_network "$node" 5
+ if [ $? -eq 0 ]; then
+ do_node $node "rc=0;
+ val=\\\$($LCTL get_param -n catastrophe 2>&1);
+ if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then
+ echo \\\$(hostname -s): \\\$val;
+ rc=\\\$val;
+ fi;
+ exit \\\$rc" || error "$node:LBUG/LASSERT detected"
+ fi
done
}
}
get_clientosc_proc_path() {
- echo "${1}-osc-*"
-}
-
-get_lustre_version () {
- local facet=${1:-"$SINGLEMDS"}
- do_facet $facet $LCTL get_param -n version | awk '/^lustre:/ {print $2}'
-}
-
-lustre_version_code() {
- local facet=${1:-"$SINGLEMDS"}
- version_code $(get_lustre_version $1)
+ echo "${1}-osc-*"
}
# If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names
}
get_mdtosc_proc_path() {
- local mds_facet=$1
- local ost_label=${2:-"*OST*"}
-
- [ "$mds_facet" = "mds" ] && mds_facet=$SINGLEMDS
- local mdt_label=$(convert_facet2label $mds_facet)
- local mdt_index=$(echo $mdt_label | sed -e 's/^.*-//')
-
- if [ $(lustre_version_code $mds_facet) -le $(version_code 1.8.0) ] ||
- mds_on_old_device $mds_facet; then
- echo "${ost_label}-osc"
- else
- echo "${ost_label}-osc-${mdt_index}"
- fi
+ local mds_facet=$1
+ local ost_label=${2:-"*OST*"}
+
+ [ "$mds_facet" = "mds" ] && mds_facet=$SINGLEMDS
+ local mdt_label=$(convert_facet2label $mds_facet)
+ local mdt_index=$(echo $mdt_label | sed -e 's/^.*-//')
+
+ if [ $(lustre_version_code $mds_facet) -le $(version_code 1.8.0) ] ||
+ mds_on_old_device $mds_facet; then
+ echo "${ost_label}-osc"
+ elif [[ $ost_label = *OST* ]]; then
+ echo "${ost_label}-osc-${mdt_index}"
+ else
+ echo "${ost_label}-osp-${mdt_index}"
+ fi
}
get_osc_import_name() {
local expected=$1
local CONN_PROC=$2
local maxtime=${3:-$(max_recovery_time)}
+ local error_on_failure=${4:-1}
local CONN_STATE
local i=0
# reconnect timeout and test can't see real disconnect
[ "${CONN_STATE}" == "CONNECTING" ] && return 0
fi
- [ $i -ge $maxtime ] && \
- error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \
+ if [ $i -ge $maxtime ]; then
+ [ $error_on_failure -ne 0 ] && \
+ error "can't put import for $CONN_PROC into ${expected}" \
+ "state after $i sec, have ${CONN_STATE}"
return 1
+ fi
sleep 1
# Add uniq for multi-mount case
CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq)
local state=$1
local params=$2
local maxtime=${3:-$(max_recovery_time)}
+ local error_on_failure=${4:-1}
local param
for param in ${params//,/ }; do
- _wait_import_state $state $param $maxtime || return
+ _wait_import_state $state $param $maxtime $error_on_failure || return
done
}
local facet=$1
local ost_facet=$2
local expected=$3
- local ost=$(get_osc_import_name $facet $ost_facet)
- local param="osc.${ost}.ost_server_uuid"
+ local target=$(get_osc_import_name $facet $ost_facet)
+ local param="osc.${target}.ost_server_uuid"
local params=$param
local i=0
params=$($LCTL list_param $param 2>/dev/null || true)
done
fi
+
+ if [[ $ost_facet = mds* ]]; then
+ # no OSP connection to itself
+ if [[ $facet = $ost_facet ]]; then
+ return 0
+ fi
+ param="osp.${target}.mdt_server_uuid"
+ params=$param
+ fi
+
if ! do_rpc_nodes "$(facet_active_host $facet)" \
wait_import_state $expected "$params" $maxtime; then
error "import is not in ${expected} state"
fi
}
+_wait_mgc_import_state() {
+ local facet=$1
+ local expected=$2
+ local error_on_failure=${3:-1}
+ local param="mgc.*.mgs_server_uuid"
+ local params=$param
+ local i=0
+
+ # 1. wait the deadline of client 1st request (it could be skipped)
+ # 2. wait the deadline of client 2nd request
+ local maxtime=$(( 2 * $(request_timeout $facet)))
+
+ if [[ $facet == client* ]]; then
+ # During setup time, the osc might not be setup, it need wait
+ # until list_param can return valid value. And also if there
+ # are mulitple osc entries we should list all of them before
+ # go to wait.
+ params=$($LCTL list_param $param 2>/dev/null || true)
+ while [ -z "$params" ]; do
+ if [ $i -ge $maxtime ]; then
+ echo "can't get $param in $maxtime secs"
+ return 1
+ fi
+ sleep 1
+ i=$((i + 1))
+ params=$($LCTL list_param $param 2>/dev/null || true)
+ done
+ fi
+ if ! do_rpc_nodes "$(facet_active_host $facet)" \
+ wait_import_state $expected "$params" $maxtime \
+ $error_on_failure; then
+ if [ $error_on_failure -ne 0 ]; then
+ error "import is not in ${expected} state"
+ fi
+ return 1
+ fi
+
+ return 0
+}
+
+wait_mgc_import_state() {
+ local facet=$1
+ local expected=$2
+ local error_on_failure=${3:-1}
+ local num
+
+ if [[ $facet = mds ]]; then
+ for num in $(seq $MDSCOUNT); do
+ _wait_mgc_import_state mds$num "$expected" \
+ $error_on_failure || return
+ done
+ else
+ _wait_mgc_import_state "$facet" "$expected"
+ $error_on_failure || return
+ fi
+}
+
+wait_dne_interconnect() {
+ local num
+
+ if [ $MDSCOUNT -gt 1 ]; then
+ for num in $(seq $MDSCOUNT); do
+ wait_osc_import_state mds mds$num FULL
+ done
+ fi
+}
+
get_clientmdc_proc_path() {
echo "${1}-mdc-*"
}
+get_clientmgc_proc_path() {
+ echo "*"
+}
+
do_rpc_nodes () {
local list=$1
shift
}
wait_clients_import_state () {
- local list=$1
- local facet=$2
- local expected=$3
+ local list=$1
+ local facet=$2
+ local expected=$3
- local facets=$facet
+ local facets=$facet
- if [ "$FAILURE_MODE" = HARD ]; then
- facets=$(facets_on_host $(facet_active_host $facet))
- fi
+ if [ "$FAILURE_MODE" = HARD ]; then
+ facets=$(facets_on_host $(facet_active_host $facet))
+ fi
- for facet in ${facets//,/ }; do
- local label=$(convert_facet2label $facet)
- local proc_path
- case $facet in
- ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;;
- mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;;
- *) error "unknown facet!" ;;
- esac
- local params=$(expand_list $params $proc_path)
- done
+ for facet in ${facets//,/ }; do
+ local label=$(convert_facet2label $facet)
+ local proc_path
+ case $facet in
+ ost* ) proc_path="osc.$(get_clientosc_proc_path \
+ $label).ost_server_uuid" ;;
+ mds* ) proc_path="mdc.$(get_clientmdc_proc_path \
+ $label).mds_server_uuid" ;;
+ mgs* ) proc_path="mgc.$(get_clientmgc_proc_path \
+ $label).mgs_server_uuid" ;;
+ *) error "unknown facet!" ;;
+ esac
+
+ local params=$(expand_list $params $proc_path)
+ done
- if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params; then
+ if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params;
+ then
error "import is not in ${expected} state"
return 1
fi
return $OSCFULL
}
-pool_list () {
- do_facet mgs lctl pool_list $1
+list_pool() {
+ echo -e "$(do_facet $SINGLEMDS $LCTL pool_list $1 | sed '1d')"
+}
+
+check_pool_not_exist() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+ [[ $# -ne 1 ]] && return 0
+ [[ x$poolname = x ]] && return 0
+ list_pool $fsname | grep -w $1 && return 1
+ return 0
}
create_pool() {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
-
- do_facet mgs lctl pool_new $1
- local RC=$?
- # get param should return err unless pool is created
- [[ $RC -ne 0 ]] && return $RC
-
- wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
- 2>/dev/null || echo foo" "" || RC=1
- if [[ $RC -eq 0 ]]; then
- add_pool_to_list $1
- else
- error "pool_new failed $1"
- fi
- return $RC
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ do_facet mgs lctl pool_new $1
+ local RC=$?
+ # get param should return err unless pool is created
+ [[ $RC -ne 0 ]] && return $RC
+
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$poolname \
+ 2>/dev/null || echo foo" "" ||
+ error "mds$mds_id: pool_new failed $1"
+ done
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "" || error "pool_new failed $1"
+
+ add_pool_to_list $1
+ return $RC
}
add_pool_to_list () {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- local listvar=${fsname}_CREATED_POOLS
- eval export ${listvar}=$(expand_list ${!listvar} $poolname)
+ local listvar=${fsname}_CREATED_POOLS
+ local temp=${listvar}=$(expand_list ${!listvar} $poolname)
+ eval export $temp
}
remove_pool_from_list () {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- local listvar=${fsname}_CREATED_POOLS
- eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+ local listvar=${fsname}_CREATED_POOLS
+ local temp=${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+ eval export $temp
}
destroy_pool_int() {
- local ost
- local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \
- awk '$1 !~ /^Pool:/ {print $1}')
- for ost in $OSTS; do
- do_facet mgs lctl pool_remove $1 $ost
- done
- do_facet mgs lctl pool_destroy $1
+ local ost
+ local OSTS=$(list_pool $1)
+ for ost in $OSTS; do
+ do_facet mgs lctl pool_remove $1 $ost
+ done
+ do_facet mgs lctl pool_destroy $1
}
# <fsname>.<poolname> or <poolname>
destroy_pool() {
- local fsname=${1%%.*}
- local poolname=${1##$fsname.}
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
- [[ x$fsname = x$poolname ]] && fsname=$FSNAME
+ [[ x$fsname = x$poolname ]] && fsname=$FSNAME
- local RC
+ local RC
- pool_list $fsname.$poolname || return $?
+ check_pool_not_exist $fsname.$poolname
+ [[ $? -eq 0 ]] && return 0
- destroy_pool_int $fsname.$poolname
- RC=$?
- [[ $RC -ne 0 ]] && return $RC
+ destroy_pool_int $fsname.$poolname
+ RC=$?
+ [[ $RC -ne 0 ]] && return $RC
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" ||
+ error "mds$mds_id: destroy pool failed $1"
+ done
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" || error "destroy pool failed $1"
- wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
- 2>/dev/null || echo foo" "foo" || RC=1
+ remove_pool_from_list $fsname.$poolname
- if [[ $RC -eq 0 ]]; then
- remove_pool_from_list $fsname.$poolname
- else
- error "destroy pool failed $1"
- fi
- return $RC
+ return $RC
}
destroy_pools () {
local poolname
local listvar=${fsname}_CREATED_POOLS
- pool_list $fsname
-
[ x${!listvar} = x ] && return 0
echo destroy the created pools: ${!listvar}
suffix="$ts.log"
echo "Dumping lctl log to ${prefix}.*.${suffix}"
- if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ if [ -n "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
echo "Dumping logs only on local client."
$LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
return $rc
}
-# target_start_and_reset_recovery_timer()
-# service_time = at_est2timeout(service_time);
-# service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC +
-# INITIAL_CONNECT_TIMEOUT);
-# CONNECTION_SWITCH_MAX : min(25U, max(CONNECTION_SWITCH_MIN,obd_timeout))
-#define CONNECTION_SWITCH_INC 1
-#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20)
-#define CONNECTION_SWITCH_MIN 5U
+# check_and_start_recovery_timer()
+# service_time = at_est2timeout(service_time);
+# service_time += 2 * INITIAL_CONNECT_TIMEOUT;
+# service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC);
-max_recovery_time () {
- local init_connect_timeout=$(( TIMEOUT / 20 ))
- [[ $init_connect_timeout -ge 5 ]] || init_connect_timeout=5
+#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
+#define CONNECTION_SWITCH_MAX min(50, max(CONNECTION_SWITCH_MIN, obd_timeout))
+#define CONNECTION_SWITCH_MIN 5
+#define CONNECTION_SWITCH_INC 5
+max_recovery_time() {
+ local init_connect_timeout=$((TIMEOUT / 20))
+ ((init_connect_timeout >= 5)) || init_connect_timeout=5
+
+ local service_time=$(($(at_max_get client) * 9 / 4 + 5))
+ service_time=$((service_time + 2 * (init_connect_timeout + 50 + 5)))
+
+ echo -n $service_time
+}
- local service_time=$(( $(at_max_get client) + $(( 2 * $(( 25 + 1 + init_connect_timeout)) )) ))
+recovery_time_min() {
+ local connection_switch_min=5
+ local connection_switch_inc=5
+ local connection_switch_max
+ local reconnect_delay_max
+ local initial_connect_timeout
+ local max
+ local timout_20
- echo $service_time
+ #connection_switch_max=min(50, max($connection_switch_min,$TIMEOUT)
+ (($connection_switch_min > $TIMEOUT)) &&
+ max=$connection_switch_min || max=$TIMEOUT
+ (($max < 50)) && connection_switch_max=$max || connection_switch_max=50
+
+ #initial_connect_timeout = max(connection_switch_min, obd_timeout/20)
+ timeout_20=$((TIMEOUT/20))
+ (($connection_switch_min > $timeout_20)) &&
+ initial_connect_timeout=$connection_switch_min ||
+ initial_connect_timeout=$timeout_20
+
+ reconnect_delay_max=$((connection_switch_max + connection_switch_inc + \
+ initial_connect_timeout))
+ echo $((2 * reconnect_delay_max))
}
get_clients_mount_count () {
}
init_logging() {
- if [[ -n $YAML_LOG ]]; then
- return
- fi
- local SAVE_UMASK=`umask`
- umask 0000
-
- export YAML_LOG=${LOGDIR}/results.yml
- mkdir -p $LOGDIR
- init_clients_lists
-
- if [ ! -f $YAML_LOG ]; then # If the yaml log already exists then we will just append to it
- if check_shared_dir $LOGDIR; then
- touch $LOGDIR/shared
- echo "Logging to shared log directory: $LOGDIR"
- else
- echo "Logging to local directory: $LOGDIR"
- fi
+ [[ -n $YAML_LOG ]] && return
+ local save_umask=$(umask)
+ umask 0000
- yml_nodes_file $LOGDIR >> $YAML_LOG
- yml_results_file >> $YAML_LOG
- fi
+ export YAML_LOG=${LOGDIR}/results.yml
+ mkdir -p $LOGDIR
+ init_clients_lists
+
+ # If the yaml log already exists then we will just append to it
+ if [ ! -f $YAML_LOG ]; then
+ if check_shared_dir $LOGDIR; then
+ touch $LOGDIR/shared
+ echo "Logging to shared log directory: $LOGDIR"
+ else
+ echo "Logging to local directory: $LOGDIR"
+ fi
- umask $SAVE_UMASK
+ yml_nodes_file $LOGDIR >> $YAML_LOG
+ yml_results_file >> $YAML_LOG
+ fi
+
+ umask $save_umask
+
+ # If modules are not yet loaded then older "lctl lustre_build_version"
+ # will fail. Use lctl build version instead.
+ log "Client: $($LCTL lustre_build_version)"
+ log "MDS: $(do_facet $SINGLEMDS $LCTL lustre_build_version 2>/dev/null||
+ do_facet $SINGLEMDS $LCTL --version)"
+ log "OSS: $(do_facet ost1 $LCTL lustre_build_version 2> /dev/null ||
+ do_facet ost1 $LCTL --version)"
}
log_test() {
#
get_page_size() {
local facet=$1
- local size
+ local size=$(getconf PAGE_SIZE 2>/dev/null)
- size=$(do_facet $facet getconf PAGE_SIZE)
- [[ ${PIPESTATUS[0]} = 0 && -n "$size" ]] || size=4096
- echo -n $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
+ echo -n ${size:-4096}
}
#
local device=$2
local count
- count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ [ -z "$CLIENTONLY" ] && count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
awk '/^Block count:/ {print $3}')
- echo -n $count
+ echo -n ${count:-0}
}
# Get the block size of the filesystem.
get_block_size() {
- local facet=$1
- local device=$2
- local size
+ local facet=$1
+ local device=$2
+ local size
- size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
- awk '/^Block size:/ {print $3}')
- echo $size
+ [ -z "$CLIENTONLY" ] && size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ awk '/^Block size:/ {print $3}')
+ echo -n ${size:-0}
}
# Check whether the "large_xattr" feature is enabled or not.
reformat_external_journal() {
local facet=$1
+ local var
- if [ ! -z ${EJOURNAL} ]; then
+ var=${facet}_JRN
+ if [ -n "${!var}" ]; then
local rcmd="do_facet $facet"
- echo "reformat external journal on $facet:${EJOURNAL}"
- ${rcmd} mke2fs -O journal_dev ${EJOURNAL} || return 1
+ echo "reformat external journal on $facet:${!var}"
+ ${rcmd} mke2fs -O journal_dev ${!var} || return 1
fi
}
echo "backup data"
${rcmd} tar zcf $metadata -C $mntpt/ . > /dev/null 2>&1 || return 3
# step 6: umount
- ${rcmd} umount -d $mntpt || return 4
- # step 7: reformat external journal if needed
- reformat_external_journal $facet || return 5
+ ${rcmd} $UMOUNT $mntpt || return 4
# step 8: reformat dev
echo "reformat new device"
- add $facet $(mkfs_opts $facet ${devname}) --backfstype ldiskfs \
- --reformat ${devname} $(mdsvdevname $(facet_number $facet)) \
- > /dev/null || exit 6
+ format_mdt $(facet_number $facet)
# step 9: mount dev
${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7
# step 10: restore metadata
echo "remove recovery logs"
${rcmd} rm -fv $mntpt/OBJECTS/* $mntpt/CATALOGS
# step 13: umount dev
- ${rcmd} umount -d $mntpt || return 10
+ ${rcmd} $UMOUNT $mntpt || return 10
# step 14: cleanup tmp backup
${rcmd} rm -f $metaea $metadata
# step 15: reset device label - it's not virgin on
done
fi
# step 4: umount
- ${rcmd} umount -d $mntpt || return 2
+ ${rcmd} $UMOUNT $mntpt || return 2
# OI files will be recreated when mounted as lustre next time.
}
# make directory on different MDTs
test_mkdir() {
- local option
- local parent
- local child
local path
local p_option
- local option2
local stripe_count=2
- local rc=0
-
- case $# in
- 1) path=$1;;
- 2) option=$1
- path=$2;;
- 3) option=$1
- option2=$2
- path=$3;;
- *) error "Only creating single directory is supported";;
- esac
-
- child=$(basename $path)
- parent=$(dirname $path)
-
- if [ "$option" == "-p" -o "$option2" == "-p" ]; then
- if [ -d $parent/$child ]; then
- return $rc
- fi
- p_option="-p"
- fi
+ local stripe_index=-1
+ local OPTIND=1
+
+ while getopts "c:i:p" opt; do
+ case $opt in
+ c) stripe_count=$OPTARG;;
+ i) stripe_index=$OPTARG;;
+ p) p_option="-p";;
+ \?) error "only support -i -c -p";;
+ esac
+ done
- if [ "${option:0:2}" == "-c" ]; then
- stripe_count=$(echo $option | sed 's/^-c//')
- fi
+ shift $((OPTIND - 1))
+ [ $# -eq 1 ] || error "Only creating single directory is supported"
+ path="$*"
- if [ "${option2:0:2}" == "-c" ]; then
- stripe_count=$(echo $option2 | sed 's/^-c//')
- fi
+ if [ "$p_option" == "-p" ]; then
+ local parent=$(dirname $path)
- if [ ! -d ${parent} ]; then
- if [ "$p_option" == "-p" ]; then
- mkdir -p ${parent}
- else
- return 1
- fi
+ [ -d $path ] && return 0
+ [ ! -d ${parent} ] && mkdir -p ${parent}
fi
if [ $MDSCOUNT -le 1 ]; then
- mkdir $p_option $parent/$child || rc=$?
+ mkdir $path
else
- local mdt_idx=$($LFS getstripe -M $parent)
local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
+ local mdt_index
- mdt_idx=$((test_num % MDSCOUNT))
- echo "striped dir -i$mdt_idx -c$stripe_count $path"
- $LFS setdirstripe -i$mdt_idx -c$stripe_count $path || rc=$?
+ if [ $stripe_index -eq -1 ]; then
+ mdt_index=$((test_num % MDSCOUNT))
+ else
+ mdt_index=$stripe_index
+ fi
+ echo "striped dir -i$mdt_index -c$stripe_count $path"
+ $LFS setdirstripe -i$mdt_index -c$stripe_count $path
fi
- return $rc
}
# find the smallest and not in use file descriptor
local mdt_name="MDT$(printf '%04x' $mdt_idx)"
local ost_name="OST$(printf '%04x' $ost_idx)"
local proc_path="${FSNAME}-${ost_name}-osc-${mdt_name}"
- local last_id=$(do_facet mds${mdt_idx} lctl get_param -n \
+ local last_id=$(do_facet mds$((mdt_idx + 1)) lctl get_param -n \
osp.$proc_path.prealloc_last_id)
- local next_id=$(do_facet mds${mdt_idx} lctl get_param -n \
+ local next_id=$(do_facet mds$((mdt_idx + 1)) lctl get_param -n \
osp.$proc_path.prealloc_next_id)
-
echo $((last_id - next_id + 1))
}
create_pool $FSNAME.$pool ||
{ error_noexit "No pool created, result code $?"; return 1; }
- [ $($LFS pool_list $FSNAME | grep -c $pool) -eq 1 ] ||
+ [ $($LFS pool_list $FSNAME | grep -c "$FSNAME.${pool}\$") -eq 1 ] ||
{ error_noexit "$pool not in lfs pool_list"; return 2; }
}
local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
do_facet mgs $LCTL pool_add \
$FSNAME.$pool $FSNAME-OST[$first-$last/$step]
+
+ # wait for OSTs to be added to the pool
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$pool |
+ sort -u | tr '\n' ' ' " "$t" || {
+ error_noexit "mds$mds_id: Add to pool failed"
+ return 3
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool \
| sort -u | tr '\n' ' ' " "$t" || {
error_noexit "Add to pool failed"
local pname="lov.$FSNAME-*.pools.$pool"
local t=$($LCTL get_param -n $pname | head -1)
do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id \
+ "lctl get_param -n lod.$lodname.pools.$pool |
+ grep $t" "" || {
+ error_noexit "mds$mds_id: $t not removed from" \
+ "$FSNAME.$pool"
+ return 2
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n $pname | grep $t" "" || {
error_noexit "$t not removed from $FSNAME.$pool"
return 1
do
do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
done
+ for mds_id in $(seq $MDSCOUNT); do
+ local mdt_id=$((mds_id-1))
+ local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov
+ wait_update_facet mds$mds_id "lctl get_param -n \
+ lod.$lodname.pools.$pool" "" || {
+ error_noexit "mds$mds_id: Pool $pool not drained"
+ return 4
+ }
+ done
wait_update $HOSTNAME "lctl get_param -n $pname" "" || {
error_noexit "Pool $FSNAME.$pool cannot be drained"
return 1
error "OST index of the first stripe on $file is" \
"$start_ost_idx, should be $expected"
}
+
+killall_process () {
+ local clients=${1:-$(hostname)}
+ local name=$2
+ local signal=$3
+ local rc=0
+
+ do_nodes $clients "killall $signal $name"
+}