[ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey)
export MCREATE=${MCREATE:-mcreate}
export MULTIOP=${MULTIOP:-multiop}
+ export MMAP_CAT=${MMAP_CAT:-mmap_cat}
+ export STATX=${STATX:-statx}
# Ubuntu, at least, has a truncate command in /usr/bin
# so fully path our truncate command.
export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
# if there is more than 4 CPU cores, libcfs should create multiple CPU
# partitions. So we just force libcfs to create 2 partitions for
# system with 2 or 4 cores
+ local saved_opts="$MODOPTS_LIBCFS"
if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then
# force to enable multiple CPU partitions
echo "Force libcfs to create 2 CPU partitions"
load_module ../libcfs/libcfs/libcfs
# Prevent local MODOPTS_LIBCFS being passed as part of environment
# variable to remote nodes
- unset MODOPTS_LIBCFS
+ MODOPTS_LIBCFS=$saved_opts
set_default_debug
load_module ../lnet/lnet/lnet
load_module fid/fid
load_module lmv/lmv
load_module osc/osc
- load_module mdc/mdc
load_module lov/lov
+ load_module mdc/mdc
load_module mgc/mgc
load_module obdecho/obdecho
if ! client_only; then
}
check_mem_leak () {
- LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
- LEAK_PORTALS=$(dmesg | tail -n 20 | grep "Portals memory leaked" || true)
- if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
- echo "$LEAK_LUSTRE" 1>&2
- echo "$LEAK_PORTALS" 1>&2
- mv $TMP/debug $TMP/debug-leak.`date +%s` || true
- echo "Memory leaks detected"
- [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
- return 1
- fi
+ LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
+ LEAK_PORTALS=$(dmesg | tail -n 20 | egrep -i "libcfs.*memory leaked" || true)
+ if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
+ echo "$LEAK_LUSTRE" 1>&2
+ echo "$LEAK_PORTALS" 1>&2
+ mv $TMP/debug $TMP/debug-leak.`date +%s` || true
+ echo "Memory leaks detected"
+ [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
+ return 1
+ fi
}
unload_modules() {
start_gss_daemons || error_exit "start gss daemon failed! rc=$?"
fi
+ if $GSS_SK && ! $SK_NO_KEY; then
+ echo "Loading basic SSK keys on all servers"
+ do_nodes $(comma_list $(all_server_nodes)) \
+ "lgss_sk -t server -l $SK_PATH/$FSNAME.key || true"
+ do_nodes $(comma_list $(all_server_nodes)) \
+ "keyctl show | grep lustre | cut -c1-11 |
+ sed -e 's/ //g;' |
+ xargs -IX keyctl setperm X 0x3f3f3f3f"
+ fi
+
if $GSS_SK && $SK_NO_KEY; then
local numclients=${1:-$CLIENTCOUNT}
local clients=${CLIENTS:-$HOSTNAME}
set_default_debug_nodes $node "$debug" "$subsys" $debug_size
}
+set_params_nodes () {
+ [[ $# -ge 2 ]] || return 0
+
+ local nodes=$1
+ shift
+ do_nodes $nodes $LCTL set_param $@
+}
+
+set_params_clients () {
+ local clients=${1:-$CLIENTS}
+ local params=${2:-$CLIENT_LCTL_SETPARAM_PARAM}
+
+ [[ -n $params ]] || return 0
+ set_params_nodes $clients $params
+}
+
set_hostid () {
local hostid=${1:-$(hostid)}
ost_dev_status() {
local ost_idx=$1
local mnt_pnt=${2:-$MOUNT}
+ local opts=$3
local ost_uuid
ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt)
- lfs_df $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
+ lfs_df $opts $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
}
setup_quota(){
fi
set_default_debug_nodes $client
+ set_params_clients $client
return 0
}
mount_mds_client() {
local mds_HOST=${SINGLEMDS}_HOST
echo $mds_HOST
- do_facet $SINGLEMDS "mkdir -p $MOUNT2"
zconf_mount $mds1_HOST $MOUNT2 $MOUNT_OPTS ||
error "unable to mount $MOUNT2 on MDS"
}
umount_mds_client() {
local mds_HOST=${SINGLEMDS}_HOST
zconf_umount $mds1_HOST $MOUNT2
- do_facet $SINGLEMDS "rm -rf $MOUNT2"
+ do_facet $SINGLEMDS "rmdir $MOUNT2"
}
# nodes is comma list
do_nodes $clients "mount | grep $mnt' '"
set_default_debug_nodes $clients
+ set_params_clients $clients
return 0
}
reboot_facet() {
local facet=$1
+ local node=$(facet_active_host $facet)
+
if [ "$FAILURE_MODE" = HARD ]; then
- reboot_node $(facet_active_host $facet)
+ boot_node $node
else
sleep 10
fi
}
boot_node() {
- local node=$1
- if [ "$FAILURE_MODE" = HARD ]; then
- reboot_node $node
- wait_for_host $node
- fi
+ local node=$1
+
+ if [ "$FAILURE_MODE" = HARD ]; then
+ reboot_node $node
+ wait_for_host $node
+ if $LOAD_MODULES_REMOTE; then
+ echo "loading modules on $node: $facet"
+ do_rpc_nodes $node load_modules_local
+ fi
+ fi
}
facets_hosts () {
return 0
}
-wait_update () {
+##
+# wait for a command to return the expected result
+#
+# This will run @check on @node repeatedly until the output matches @expect
+# based on the supplied condition, or until @max_wait seconds have elapsed,
+# whichever comes first. @cond may be one of the normal bash operators,
+# "-gt", "-ge", "-eq", "-le", "-lt", "==", "!=", or "=~", and must be quoted
+# in the caller to avoid unintentional evaluation by the shell in the caller.
+#
+# If @max_wait is not specified, the condition will be checked for up to 90s.
+#
+# If --verbose is passed as the first argument, the result is printed on each
+# value change, otherwise it is only printed after every 10s interval.
+#
+# Using wait_update_cond() or related helper function is preferable to adding
+# a "long enough" wait for some state to change in the background, since
+# "long enough" may be too short due to tunables, system config, or running in
+# a VM, and must by necessity wait too long for most cases or risk failure.
+#
+# usage: wait_update_cond [--verbose] node check cond expect [max_wait]
+wait_update_cond() {
local verbose=false
- if [[ "$1" == "--verbose" ]]; then
- shift
- verbose=true
- fi
+ [[ "$1" == "--verbose" ]] && verbose=true && shift
local node=$1
- local TEST=$2
- local FINAL=$3
- local MAX=${4:-90}
- local RESULT
- local PREV_RESULT
- local WAIT=0
+ local check="$2"
+ local cond="$3"
+ local expect="$4"
+ local max_wait=${5:-90}
+ local result
+ local prev_result
+ local waited=0
+ local begin=$SECONDS
local sleep=1
local print=10
- PREV_RESULT=$(do_node $node "$TEST")
- while [ true ]; do
- RESULT=$(do_node $node "$TEST")
- if [[ "$RESULT" == "$FINAL" ]]; then
- [[ -z "$RESULT" || $WAIT -le $sleep ]] ||
- echo "Updated after ${WAIT}s: wanted '$FINAL'"\
- "got '$RESULT'"
+ while (( $waited <= $max_wait )); do
+ result=$(do_node $node "$check")
+
+ eval [[ "'$result'" $cond "'$expect'" ]]
+ if [[ $? == 0 ]]; then
+ [[ -z "$result" || $waited -le $sleep ]] ||
+ echo "Updated after ${waited}s: want '$expect' got '$result'"
return 0
fi
- if [[ $verbose && "$RESULT" != "$PREV_RESULT" ]]; then
- echo "Changed after ${WAIT}s: from '$PREV_RESULT'"\
- "to '$RESULT'"
- PREV_RESULT=$RESULT
+ if $verbose && [[ "$result" != "$prev_result" ]]; then
+ [[ -n "$prev_result" ]] &&
+ echo "Changed after ${waited}s: from '$prev_result' to '$result'"
+ prev_result="$result"
fi
- [[ $WAIT -ge $MAX ]] && break
- [[ $((WAIT % print)) -eq 0 ]] &&
- echo "Waiting $((MAX - WAIT)) secs for update"
- WAIT=$((WAIT + sleep))
+ (( $waited % $print == 0 )) &&
+ echo "Waiting $((max_wait - waited))s for '$expect'"
sleep $sleep
+ waited=$((SECONDS - begin))
done
- echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
+ echo "Update not seen after ${max_wait}s: want '$expect' got '$result'"
return 3
}
+# usage: wait_update [--verbose] node check expect [max_wait]
+wait_update() {
+ local verbose=
+ [ "$1" = "--verbose" ] && verbose="$1" && shift
+
+ local node="$1"
+ local check="$2"
+ local expect="$3"
+ local max_wait=$4
+
+ wait_update_cond $verbose $node "$check" "==" "$expect" $max_wait
+}
+
+# usage: wait_update_facet_cond [--verbose] facet check cond expect [max_wait]
+wait_update_facet_cond() {
+ local verbose=
+ [ "$1" = "--verbose" ] && verbose="$1" && shift
+
+ local node=$(facet_active_host $1)
+ local check="$2"
+ local cond="$3"
+ local expect="$4"
+ local max_wait=$5
+
+ wait_update_cond $verbose $node "$check" "$cond" "$expect" $max_wait
+}
+
+# usage: wait_update_facet [--verbose] facet check expect [max_wait]
wait_update_facet() {
local verbose=
[ "$1" = "--verbose" ] && verbose="$1" && shift
- local facet=$1
- shift
- wait_update $verbose $(facet_active_host $facet) "$@"
+ local node=$(facet_active_host $1)
+ local check="$2"
+ local expect="$3"
+ local max_wait=$4
+
+ wait_update_cond $verbose $node "$check" "==" "$expect" $max_wait
}
sync_all_data() {
}
fail_nodf() {
- local facet=$1
- facet_failover $facet
+ local facet=$1
+
+ facet_failover $facet
}
fail_abort() {
local facet=$1
+ local abort_type=${2:-"abort_recovery"}
+
stop $facet
change_active $facet
wait_for_facet $facet
- mount_facet $facet -o abort_recovery
+ mount_facet $facet -o $abort_type
clients_up || echo "first stat failed: $?"
clients_up || error "post-failover stat: $?"
}
local nodes=$1
local net=${2:-"."}
- do_nodes $nodes "$LCTL list_nids | grep $net | cut -f 1 -d @"
+ do_nodes $nodes "$LCTL list_nids | grep -w $net | cut -f 1 -d @"
}
h2name_or_ip() {
var=${type}_FS_MKFS_OPTS
fs_mkfs_opts+=${!var:+" ${!var}"}
+ [[ "$QUOTA_TYPE" =~ "p" ]] && fs_mkfs_opts+=" -O project"
+
[ $fstype == ldiskfs ] && fs_mkfs_opts=$(squash_opt $fs_mkfs_opts)
if [ -n "${fs_mkfs_opts## }" ]; then
export I_MOUNTED2=yes
fi
- if $do_check; then
- # FIXME: what to do if check_config failed?
- # i.e. if:
- # 1) remote client has mounted other Lustre fs?
- # 2) lustre is mounted on remote_clients atall ?
- check_config_clients $MOUNT
- init_facets_vars
- init_param_vars
+ if $do_check; then
+ # FIXME: what to do if check_config failed?
+ # i.e. if:
+ # 1) remote client has mounted other Lustre fs?
+ # 2) lustre is mounted on remote_clients atall ?
+ check_config_clients $MOUNT
+ init_facets_vars
+ init_param_vars
- set_default_debug_nodes $(comma_list $(nodes_list))
- fi
+ set_default_debug_nodes $(comma_list $(nodes_list))
+ set_params_clients
+ fi
if [ -z "$CLIENTONLY" -a $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ]; then
local facets=""
local log=$TMP/e2fsck.log
local rc=0
+ # turn on pfsck if it is supported
+ do_node $node $E2FSCK -h 2>&1 | grep -qw -- -m && cmd+=" -m8"
echo $cmd
do_node $node $cmd 2>&1 | tee $log
rc=${PIPESTATUS[0]}
default_lru_size()
{
- NR_CPU=$(grep -c "processor" /proc/cpuinfo)
- DEFAULT_LRU_SIZE=$((100 * NR_CPU))
- echo "$DEFAULT_LRU_SIZE"
+ local nr_cpu=$(grep -c "processor" /proc/cpuinfo)
+
+ echo $((100 * nr_cpu))
}
lru_resize_enable()
lru_resize_disable()
{
- lctl set_param ldlm.namespaces.*$1*.lru_size $(default_lru_size)
+ local dev=${1}
+ local lru_size=${2:-$(default_lru_size)}
+
+ $LCTL set_param ldlm.namespaces.*$dev*.lru_size=$lru_size
}
flock_is_enabled()
exit_status () {
local status=0
- local log=$TESTSUITELOG
+ local logs="$TESTSUITELOG $1"
+
+ for log in $logs; do
+ if [ -f "$log" ]; then
+ grep -qw FAIL $log && status=1
+ fi
+ done
- [ -f "$log" ] && grep -qw FAIL $log && status=1
exit $status
}
local testmsg=$2
export tfile=f${testnum}.${TESTSUITE}
export tdir=d${testnum}.${TESTSUITE}
- local name=$TESTSUITE.$TESTNAME.test_log.$(hostname -s).log
- local test_log=$LOGDIR/$name
- local zfs_log_name=$TESTSUITE.$TESTNAME.zfs_log
- local zfs_debug_log=$LOGDIR/$zfs_log_name
+ local test_log=$TESTLOG_PREFIX.$TESTNAME.test_log.$(hostname -s).log
+ local zfs_debug_log=$TESTLOG_PREFIX.$TESTNAME.zfs_log
local SAVE_UMASK=$(umask)
local rc=0
umask 0022
}
do_and_time () {
- local cmd=$1
- local rc
-
- SECONDS=0
- eval '$cmd'
+ local cmd="$1"
+ local start
+ local rc
- [ ${PIPESTATUS[0]} -eq 0 ] || rc=1
+ start=$SECONDS
+ eval '$cmd'
+ [ ${PIPESTATUS[0]} -eq 0 ] || rc=1
- echo $SECONDS
- return $rc
+ echo $((SECONDS - start))
+ return $rc
}
inodes_available () {
[ $# -eq 1 ] || error "Only creating single directory is supported"
path="$*"
+ local parent=$(dirname $path)
if [ "$p_option" == "-p" ]; then
- local parent=$(dirname $path)
-
[ -d $path ] && return 0
if [ ! -d ${parent} ]; then
mkdir -p ${parent} ||
fi
fi
- if [ $MDSCOUNT -le 1 ]; then
+ if [ $MDSCOUNT -le 1 ] || ! is_lustre ${parent}; then
mkdir $path || error "mkdir '$path' failed"
else
local mdt_index
pool_remove_first_target() {
echo "Removing first target from a pool"
+ pool_remove_target $1 -1
+}
+
+pool_remove_target() {
local pool=$1
+ local index=$2
local pname="lov.$FSNAME-*.pools.$pool"
- local t=$($LCTL get_param -n $pname | head -1)
+ if [ $index -eq -1 ]; then
+ local t=$($LCTL get_param -n $pname | head -1)
+ else
+ local t=$(printf "$FSNAME-OST%04x_UUID" $index)
+ fi
+
+ echo "Removing $t from $pool"
do_facet mgs $LCTL pool_remove $FSNAME.$pool $t
for mds_id in $(seq $MDSCOUNT); do
local mdt_id=$((mds_id-1))
}
changelog_dump() {
+ local rc
+
for M in $(seq $MDSCOUNT); do
local facet=mds$M
local mdt="$(facet_svc $facet)"
-
- $LFS changelog $mdt | sed -e 's/^/'$mdt'./'
+ local output
+ local ret
+
+ output=$($LFS changelog $mdt)
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ rc=${rc:-$ret}
+ elif [ -n "$output" ]; then
+ echo "$output" | sed -e 's/^/'$mdt'./'
+ fi
done
+
+ return ${rc:-0}
}
changelog_extract_field() {
export HSMTOOL_UPDATE_INTERVAL=${HSMTOOL_UPDATE_INTERVAL:=""}
export HSMTOOL_EVENT_FIFO=${HSMTOOL_EVENT_FIFO:=""}
export HSMTOOL_TESTDIR
- export HSMTOOL_BASE=$(basename "$HSMTOOL" | cut -f1 -d" ")
HSM_ARCHIVE_NUMBER=2
done
}
-search_copytools() {
- local hosts=${1:-$(facet_active_host $SINGLEAGT)}
- do_nodesv $hosts "pgrep -x $HSMTOOL_BASE"
+copytool_continue() {
+ local agents=${1:-$(facet_active_host $SINGLEAGT)}
+
+ do_nodesv $agents "libtool execute pkill -CONT -x $HSMTOOL" || return 0
+ echo "Copytool is continued on $agents"
}
kill_copytools() {
local hosts=${1:-$(facet_active_host $SINGLEAGT)}
echo "Killing existing copytools on $hosts"
- do_nodesv $hosts "killall -q $HSMTOOL_BASE" || true
-}
-
-wait_copytools() {
- local hosts=${1:-$(facet_active_host $SINGLEAGT)}
- local wait_timeout=200
- local wait_start=$SECONDS
- local wait_end=$((wait_start + wait_timeout))
- local sleep_time=100000 # 0.1 second
-
- while ((SECONDS < wait_end)); do
- if ! search_copytools $hosts; then
- echo "copytools stopped in $((SECONDS - wait_start))s"
- return 0
- fi
-
- echo "copytools still running on $hosts"
- usleep $sleep_time
- [ $sleep_time -lt 32000000 ] && # 3.2 seconds
- sleep_time=$(bc <<< "$sleep_time * 2")
- done
-
- # try to dump Copytool's stack
- do_nodesv $hosts "echo 1 >/proc/sys/kernel/sysrq ; " \
- "echo t >/proc/sysrq-trigger"
-
- echo "copytools failed to stop in ${wait_timeout}s"
-
- return 1
+ do_nodesv $hosts "libtool execute killall -q $HSMTOOL" || true
+ copytool_continue "$hosts"
}
copytool_monitor_cleanup() {
return $rc
}
-wait_result() {
- local facet=$1
- shift
- wait_update --verbose $(facet_active_host $facet) "$@"
-}
-
mdts_check_param() {
local key="$1"
local target="$2"
local timeout="$3"
local mdtno
+
for mdtno in $(seq 1 $MDSCOUNT); do
local idx=$(($mdtno - 1))
- wait_result mds${mdtno} \
+ wait_update_facet --verbose mds${mdtno} \
"$LCTL get_param -n $MDT_PREFIX${idx}.$key" "$target" \
$timeout ||
error "$key state is not '$target' on mds${mdtno}"
local cmd="$LCTL get_param -n ${MDT_PREFIX}${mdtidx}.hsm.actions"
cmd+=" | awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d="
- wait_result $mds "$cmd" "$state" 200 ||
+ wait_update_facet --verbose $mds "$cmd" "$state" 200 ||
error "request on $fid is not $state on $mds"
}
check_component_count $file $comp_cnt
}
+statx_supported() {
+ $STATX --quiet --version
+ return $?
+}
+
+#
+# wrappers for createmany and unlinkmany
+# to set debug=0 if number of creates is high enough
+# this is to speedup testing
+#
+function createmany() {
+ local count=${!#}
+
+ (( count > 100 )) && {
+ local saved_debug=$($LCTL get_param -n debug)
+ local list=$(comma_list $(all_nodes))
+
+ do_nodes $list $LCTL set_param debug=0
+ }
+ $LUSTRE/tests/createmany $*
+ local rc=$?
+ (( count > 100 )) &&
+ do_nodes $list "$LCTL set_param debug=\\\"$saved_debug\\\""
+ return $rc
+}
+
+function unlinkmany() {
+ local count=${!#}
+
+ (( count > 100 )) && {
+ local saved_debug=$($LCTL get_param -n debug)
+ local list=$(comma_list $(all_nodes))
+
+ do_nodes $list $LCTL set_param debug=0
+ }
+ $LUSTRE/tests/unlinkmany $*
+ local rc=$?
+ (( count > 100 )) &&
+ do_nodes $list "$LCTL set_param debug=\\\"$saved_debug\\\""
+ return $rc
+}