. $EXCEPT_LIST_FILE
fi
-[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf
-[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] && MODPROBECONF=/etc/modprobe.d/Lustre
+# check config files for options in decreasing order of preference
+[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/lustre.conf ] &&
+ MODPROBECONF=/etc/modprobe.d/lustre.conf
+[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] &&
+ MODPROBECONF=/etc/modprobe.d/Lustre
+[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
+ MODPROBECONF=/etc/modprobe.conf
assert_DIR () {
local failed=""
export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite
export FSCK_MAX_ERR=4 # File system errors left uncorrected
- # This is used by a small number of tests to share state between the client
- # running the tests, or in some cases between the servers (e.g. lfsck.sh).
- # It needs to be a non-lustre filesystem that is available on all the nodes.
- export SHARED_DIRECTORY=${SHARED_DIRECTORY:-"/tmp"}
- export MDSDB=${MDSDB:-$SHARED_DIRECTORY/mdsdb}
- export OSTDB=${OSTDB:-$SHARED_DIRECTORY/ostdb}
-
#[ -d /r ] && export ROOT=${ROOT:-/r}
export TMP=${TMP:-$ROOT/tmp}
export TESTSUITELOG=${TMP}/${TESTSUITE}.log
export LOGDIR=${LOGDIR:-${TMP}/test_logs/}/$(date +%s)
export LOGDIRSET=true
fi
- export HOSTNAME=${HOSTNAME:-`hostname`}
+ export HOSTNAME=${HOSTNAME:-$(hostname -s)}
if ! echo $PATH | grep -q $LUSTRE/utils; then
export PATH=$LUSTRE/utils:$PATH
fi
fi
}
+llite_lloop_enabled() {
+ local n1=$(uname -r | cut -d. -f1)
+ local n2=$(uname -r | cut -d. -f2)
+ local n3=$(uname -r | cut -d- -f1 | cut -d. -f3)
+
+ # load the llite_lloop module for < 2.6.32 kernels
+ if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \
+ [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \
+ $LOAD_LLOOP; then
+ return 0
+ fi
+ return 1
+}
+
load_modules_local() {
if [ -n "$MODPROBE" ]; then
# use modprobe
load_module lov/lov
load_module mgc/mgc
if ! client_only; then
- grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; }
- grep -q -w jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; }
- grep -q -w jbd2 /proc/kallsyms || { modprobe jbd2 2>/dev/null || true; }
- [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs
+ SYMLIST=/proc/kallsyms
+ grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
+ grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
+ grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
+ if [ "$FSTYPE" = "ldiskfs" ]; then
+ grep -q exportfs_decode_fh $SYMLIST ||
+ { modprobe exportfs 2> /dev/null || true; }
+ load_module ../ldiskfs/ldiskfs/ldiskfs
+ fi
load_module mgs/mgs
load_module mds/mds
load_module mdd/mdd
load_module obdfilter/obdfilter
fi
- load_module_llite_lloop() {
- local n1=$(uname -r | cut -d. -f1)
- local n2=$(uname -r | cut -d. -f2)
- local n3=$(uname -r | cut -d- -f1 | cut -d. -f3)
-
- # load the llite_lloop module for < 2.6.32 kernels
- if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \
- [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \
- $LOAD_LLOOP; then
- load_module llite/llite_lloop
- fi
- }
load_module llite/lustre
- load_module_llite/llite_lloop
+ llite_lloop_enabled && load_module llite/llite_lloop
[ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
OGDB=${OGDB:-$TMP}
rm -f $OGDB/ogdb-$HOSTNAME
echo -n $label
}
+set_debug_size () {
+ local dz=${1:-$DEBUG_SIZE}
+ local cpus=$(getconf _NPROCESSORS_CONF)
+
+ # bug 19944, adjust size to be -gt num_possible_cpus()
+ # promise 2MB for every cpu at least
+ if [ -n "$cpus" ] && [ $((cpus * 2)) -gt $dz ]; then
+ dz=$((cpus * 2))
+ fi
+ lctl set_param debug_mb=$dz
+}
+
+set_default_debug () {
+ local debug=${1:-"$PTLDEBUG"}
+ local subsystem_debug=${2:-"$SUBSYSTEM"}
+ local debug_size=${3:-$DEBUG_SIZE}
+
+ lctl set_param debug="$debug"
+ lctl set_param subsystem_debug="${subsystem_debug# }"
+
+ set_debug_size $debug_size
+ sync
+}
+
+set_default_debug_nodes () {
+ local nodes=$1
+
+ if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
+ nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
+ set_default_debug
+ fi
+
+ [[ -n $nodes ]] && do_rpc_nodes $nodes set_default_debug \
+ \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
+}
+
+set_default_debug_facet () {
+ local facet=$1
+ local node=$(facet_active_host $facet)
+ [ -z "$node" ] && echo "No host defined for facet $facet" && exit 1
+
+ set_default_debug_nodes $node
+}
+
# Facet functions
mount_facets () {
local facets=${1:-$(get_facets)}
echo "mount -t lustre $@ ${!dev} $mntpt"
echo "Start of ${!dev} on ${facet} failed ${RC}"
else
- do_facet ${facet} "lctl set_param debug=\\\"$PTLDEBUG\\\"; \
- lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; \
- lctl set_param debug_mb=${DEBUG_SIZE}; \
- sync"
+ set_default_debug_facet $facet
label=$(do_facet ${facet} "$E2LABEL ${!dev}")
[ -z "$label" ] && echo no label for ${!dev} && exit 1
local mntpt=$(facet_mntpt $facet)
running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
if [ ${running} -ne 0 ]; then
- echo "Stopping $mntpt (opts:$@)"
+ echo "Stopping $mntpt (opts:$@) on $HOST"
do_facet ${facet} umount -d $@ $mntpt
fi
do_node $client mkdir -p $mnt
do_node $client mount -t lustre $OPTIONS $device $mnt || return 1
- do_node $client "lctl set_param debug=\\\"$PTLDEBUG\\\";
- lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
- lctl set_param debug_mb=${DEBUG_SIZE}"
+ set_default_debug_nodes $client
return 0
}
echo "Started clients $clients: "
do_nodes $clients "mount | grep -w $mnt"
- do_nodes $clients "lctl set_param debug=\\\"$PTLDEBUG\\\";
- lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
- lctl set_param debug_mb=${DEBUG_SIZE};"
+ set_default_debug_nodes $clients
return 0
}
while [ true ]; do
RESULT=$(do_node $node "$TEST")
if [ "$RESULT" == "$FINAL" ]; then
- echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'"
+ [ -z "$RESULT" -o $WAIT -le $sleep ] ||
+ echo "Updated after ${WAIT}s: wanted '$FINAL' got '$RESULT'"
return 0
fi
[ $WAIT -ge $MAX ] && break
WAIT=$((WAIT + sleep))
sleep $sleep
done
- echo "Update not seen after $MAX sec: wanted '$FINAL' got '$RESULT'"
+ echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
return 3
}
echo "Failing $facet on node $host"
+ # Make sure the client data is synced to disk. LU-924
+ #
+ # We don't write client data synchrnously (to avoid flooding sync writes
+ # when there are many clients connecting), so if the server reboots before
+ # the client data reachs disk, the client data will be lost and the client
+ # will be evicted after recovery, which is not what we expected.
+ do_facet $facet "sync; sync; sync"
+
local affected=$(affected_facets $facet)
shutdown_facet $facet
replay_barrier() {
local facet=$1
- do_facet $facet sync
+ do_facet $facet "sync; sync; sync"
df $MOUNT
# make sure there will be no seq change
replay_barrier_nodf() {
local facet=$1 echo running=${running}
- do_facet $facet sync
+ do_facet $facet "sync; sync; sync"
local svc=${facet}_svc
echo Replay barrier on ${!svc}
do_facet $facet $LCTL --device %${!svc} notransno
local facet=$1
stop $facet
change_active $facet
+ wait_for_facet $facet
mount_facet $facet -o abort_recovery
clients_up || echo "first df failed: $?"
clients_up || error "post-failover df: $?"
exit 1
}
+host_nids_address() {
+ local nodes=$1
+ local kind=$2
+
+ if [ -n "$kind" ]; then
+ nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'")
+ else
+ nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'")
+ fi
+ echo $nids
+}
+
h2name_or_ip() {
if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
echo $1"@$2"
}
declare -fx h2o2ib
+# This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST
+# expressions format. As a bonus we can then just pass in those variables
+# to pdsh. What this function does is take a HOSTLIST type string and
+# expand it into a space deliminated list for us.
+hostlist_expand() {
+ local hostlist=$1
+ local offset=$2
+ local myList
+ local item
+ local list
+
+ [ -z "$hostlist" ] && return
+
+ # Translate the case of [..],..,[..] to [..] .. [..]
+ list="${hostlist/],/] }"
+ front=${list%%[*}
+ [[ "$front" == *,* ]] && {
+ new="${list%,*} "
+ old="${list%,*},"
+ list=${list/${old}/${new}}
+ }
+
+ for item in $list; do
+ # Test if we have any []'s at all
+ if [ "$item" != "${item/\[/}" ]; then {
+ # Expand the [*] into list
+ name=${item%%[*}
+ back=${item#*]}
+
+ if [ "$name" != "$item" ]; then
+ group=${item#$name[*}
+ group=${group%%]*}
+
+ for range in ${group//,/ }; do
+ begin=${range%-*}
+ end=${range#*-}
+
+ # Number of leading zeros
+ padlen=${#begin}
+ padlen2=${#end}
+ end=$(echo $end | sed 's/0*//')
+ [[ -z "$end" ]] && end=0
+ [[ $padlen2 -gt $padlen ]] && {
+ [[ $padlen2 -eq ${#end} ]] && padlen2=0
+ padlen=$padlen2
+ }
+ begin=$(echo $begin | sed 's/0*//')
+ [ -z $begin ] && begin=0
+
+ for num in $(seq -f "%0${padlen}g" $begin $end); do
+ value="${name#*,}${num}${back}"
+ [ "$value" != "${value/\[/}" ] && {
+ value=$(hostlist_expand "$value")
+ }
+ myList="$myList $value"
+ done
+ done
+ fi
+ } else {
+ myList="$myList $item"
+ } fi
+ done
+ myList=${myList//,/ }
+ myList=${myList:1} # Remove first character which is a space
+
+ # Filter any duplicates without sorting
+ list="$myList "
+ myList="${list%% *}"
+
+ while [[ "$list" != ${myList##* } ]]; do
+ list=${list//${list%% *} /}
+ myList="$myList ${list%% *}"
+ done
+ myList="${myList%* }";
+
+ # We can select an object at a offset in the list
+ [ $# -eq 2 ] && {
+ cnt=0
+ for item in $myList; do
+ let cnt=cnt+1
+ [ $cnt -eq $offset ] && {
+ myList=$item
+ }
+ done
+ [ $(get_node_count $myList) -ne 1 ] && myList=""
+ }
+ echo $myList
+}
+
facet_host() {
local facet=$1
# save the active host for this facet
local activevar=${facet}active
echo "$activevar=${!activevar}" > $TMP/$activevar
+ [[ $facet = mds1 ]] && combined_mgs_mds && \
+ echo "mgsactive=${!activevar}" > $TMP/mgsactive
local TO=`facet_active_host $facet`
echo "Failover $facet to $TO"
done
# make sure its not already running
stop ${facet} -f
rm -f $TMP/${facet}active
+ [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
do_facet ${facet} $MKFS $*
}
facet_mntpt () {
local facet=$1
+ [[ $facet = mgs ]] && combined_mgs_mds && facet="mds1"
+
local var=${facet}_MOUNT
eval mntpt=${!var:-${MOUNT%/*}/$facet}
stop mds$num -f
rm -f ${TMP}/mds${num}active
done
+ combined_mgs_mds && rm -f $TMP/mgsactive
for num in `seq $OSTCOUNT`; do
stop ost$num -f
init_facets_vars
init_param_vars
- do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=\\\"$PTLDEBUG\\\";
- lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
- lctl set_param debug_mb=${DEBUG_SIZE};
- sync"
+ set_default_debug_nodes $(comma_list $(nodes_list))
fi
init_gss
return 0
}
+# verify a directory is shared among nodes.
+check_shared_dir() {
+ local dir=$1
+
+ [ -z "$dir" ] && return 1
+ do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $dir
+ check_write_access $dir || return 1
+ return 0
+}
+
# Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
generate_db() {
local i
local ostidx
local dev
- local tmp_file
- [ $MDSCOUNT -eq 1 ] || error "CMD is not supported"
- tmp_file=$(mktemp -p $SHARED_DIRECTORY ||
- error "fail to create file in $SHARED_DIRECTORY")
+ check_shared_dir $SHARED_DIRECTORY ||
+ error "$SHARED_DIRECTORY isn't a shared directory"
- # make sure everything gets to the backing store
- local list=$(comma_list $CLIENTS $(facet_host $SINGLEMDS) $(osts_nodes))
- do_nodes $list "sync; sleep 2; sync"
+ export MDSDB=$SHARED_DIRECTORY/mdsdb
+ export OSTDB=$SHARED_DIRECTORY/ostdb
- do_nodes $list ls $tmp_file || \
- error "$SHARED_DIRECTORY is not a shared directory"
- rm $tmp_file
+ [ $MDSCOUNT -eq 1 ] || error "CMD is not supported"
run_e2fsck $(mdts_nodes) $MDTDEV "--mdsdb $MDSDB"
fi
}
-at_max_get() {
+at_get() {
local facet=$1
+ local at=$2
- # suppose that all ost-s has the same at_max set
- if [ $facet == "ost" ]; then
- do_facet ost1 "lctl get_param -n at_max"
- else
- do_facet $facet "lctl get_param -n at_max"
- fi
+ # suppose that all ost-s have the same $at value set
+ [ $facet != "ost" ] || facet=ost1
+
+ do_facet $facet "lctl get_param -n $at"
+}
+
+at_max_get() {
+ at_get $1 at_max
}
at_max_set() {
shift
local facet
+ local hosts
for facet in $@; do
if [ $facet == "ost" ]; then
- for i in `seq $OSTCOUNT`; do
- do_facet ost$i "lctl set_param at_max=$at_max"
-
- done
+ facet=$(get_facets OST)
elif [ $facet == "mds" ]; then
- for i in `seq $MDSCOUNT`; do
- do_facet mds$i "lctl set_param at_max=$at_max"
- done
- else
- do_facet $facet "lctl set_param at_max=$at_max"
+ facet=$(get_facets MDS)
fi
+ hosts=$(expand_list $hosts $(facets_hosts $facet))
done
+
+ do_nodes $hosts lctl set_param at_max=$at_max
}
##################################
# We need to dump the logs on all nodes
if $dump; then
- gather_logs $(comma_list $(nodes_list))
+ gather_logs $(comma_list $(nodes_list)) 0
fi
debugrestore
run_one_logged() {
local BEFORE=`date +%s`
local TEST_ERROR
- local name=${TESTSUITE}.test_${1}.test_log.$(hostname).log
+ local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log
local test_log=$LOGDIR/$name
rm -rf $LOGDIR/err
+ local SAVE_UMASK=`umask`
+ umask 0022
echo
log_sub_test_begin test_${1}
- (run_one $1 "$2") 2>&1 | tee $test_log
+ (run_one $1 "$2") 2>&1 | tee -i $test_log
local RC=${PIPESTATUS[0]}
[ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \
$FAIL_ON_ERROR && exit $RC
fi
+ umask $SAVE_UMASK
+
return 0
}
ostuuid_from_index()
{
- $LFS osts $2 | awk '/^'$1'/ { print $2 }'
+ $LFS osts $2 | sed -ne "/^$1: /s/.* \(.*\) .*$/\1/p"
+}
+
+ostname_from_index() {
+ local uuid=$(ostuuid_from_index $1)
+ echo ${uuid/_UUID/}
+}
+
+index_from_ostuuid()
+{
+ $LFS osts $2 | sed -ne "/${1}/s/\(.*\): .* .*$/\1/p"
+}
+
+mdtuuid_from_index()
+{
+ $LFS mdts $2 | awk '/^'$1'/ { print $2 }'
}
remote_node () {
init_clients_lists () {
# Sanity check: exclude the local client from RCLIENTS
- local rclients=$(echo " $RCLIENTS " | sed -re "s/\s+$HOSTNAME\s+/ /g")
+ local clients=$(hostlist_expand "$RCLIENTS")
+ local rclients=$(exclude_items_from_list "$clients" $HOSTNAME)
# Sanity check: exclude the dup entries
- rclients=$(for i in $rclients; do echo $i; done | sort -u)
+ RCLIENTS=$(for i in ${rclients//,/ }; do echo $i; done | sort -u)
- local clients="$SINGLECLIENT $HOSTNAME $rclients"
+ clients="$SINGLECLIENT $HOSTNAME $RCLIENTS"
# Sanity check: exclude the dup entries from CLIENTS
# for those configs which has SINGLCLIENT set to local client
clients=$(for i in $clients; do echo $i; done | sort -u)
- CLIENTS=`comma_list $clients`
- local -a remoteclients=($rclients)
+ CLIENTS=$(comma_list $clients)
+ local -a remoteclients=($RCLIENTS)
for ((i=0; $i<${#remoteclients[@]}; i++)); do
varname=CLIENT$((i + 2))
eval $varname=${remoteclients[i]}
}
mdsrate_inodes_available () {
- echo $(($(inodes_available) - 1))
+ local min_inodes=$(inodes_available)
+ echo $((min_inodes * 99 / 100))
}
# reset llite stat counters
}
get_clientosc_proc_path() {
- local ost=$1
-
- echo "${1}-osc-*"
+ echo "${1}-osc-[^M]*"
}
get_lustre_version () {
_wait_import_state () {
local expected=$1
local CONN_PROC=$2
- local maxtime=${3:-max_recovery_time}
+ local maxtime=${3:-$(max_recovery_time)}
local CONN_STATE
local i=0
CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
while [ "${CONN_STATE}" != "${expected}" ]; do
+ if [ "${expected}" == "DISCONN" ]; then
+ # for disconn we can check after proc entry is removed
+ [ "x${CONN_STATE}" == "x" ] && return 0
+ # with AT enabled, we can have connect request timeout near of
+ # reconnect timeout and test can't see real disconnect
+ [ "${CONN_STATE}" == "CONNECTING" ] && return 0
+ fi
[ $i -ge $maxtime ] && \
error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \
return 1
wait_import_state() {
local state=$1
local params=$2
- local maxtime=${3:-max_recovery_time}
+ local maxtime=${3:-$(max_recovery_time)}
local param
for param in ${params//,/ }; do
_wait_import_state $state $param $maxtime || return
done
}
+
+# One client request could be timed out because server was not ready
+# when request was sent by client.
+# The request timeout calculation details :
+# ptl_send_rpc ()
+# /* We give the server rq_timeout secs to process the req, and
+# add the network latency for our local timeout. */
+# request->rq_deadline = request->rq_sent + request->rq_timeout +
+# ptlrpc_at_get_net_latency(request) ;
+#
+# ptlrpc_connect_import ()
+# request->rq_timeout = INITIAL_CONNECT_TIMEOUT
+#
+# init_imp_at () ->
+# -> at_init(&at->iat_net_latency, 0, 0) -> iat_net_latency=0
+# ptlrpc_at_get_net_latency(request) ->
+# at_get (max (iat_net_latency=0, at_min)) = at_min
+#
+# i.e.:
+# request->rq_timeout + ptlrpc_at_get_net_latency(request) =
+# INITIAL_CONNECT_TIMEOUT + at_min
+#
+# We will use obd_timeout instead of INITIAL_CONNECT_TIMEOUT
+# because we can not get this value in runtime,
+# the value depends on configure options, and it is not stored in /proc.
+# obd_support.h:
+# #define CONNECTION_SWITCH_MIN 5U
+# #ifndef CRAY_XT3
+# #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20)
+# #else
+# #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/2)
+
+request_timeout () {
+ local facet=$1
+
+ # request->rq_timeout = INITIAL_CONNECT_TIMEOUT
+ local init_connect_timeout=$TIMEOUT
+ [[ $init_connect_timeout -ge 5 ]] || init_connect_timeout=5
+
+ local at_min=$(at_get $facet at_min)
+
+ echo $(( init_connect_timeout + at_min ))
+}
+
wait_osc_import_state() {
local facet=$1
local ost_facet=$2
local expected=$3
local ost=$(get_osc_import_name $facet $ost_facet)
- local CONN_PROC
- local CONN_STATE
- local i=0
- CONN_PROC="osc.${ost}.ost_server_uuid"
- CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
- while [ "${CONN_STATE}" != "${expected}" ]; do
- if [ "${expected}" == "DISCONN" ]; then
- # for disconn we can check after proc entry is removed
- [ "x${CONN_STATE}" == "x" ] && return 0
- # with AT we can have connect request timeout ~ reconnect timeout
- # and test can't see real disconnect
- [ "${CONN_STATE}" == "CONNECTING" ] && return 0
- fi
- # disconnect rpc should be wait not more obd_timeout
- [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \
- error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1
- sleep 1
- CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
- i=$(($i + 1))
- done
+ local param="osc.${ost}.ost_server_uuid"
+
+ # 1. wait the deadline of client 1st request (it could be skipped)
+ # 2. wait the deadline of client 2nd request
+ local maxtime=$(( 2 * $(request_timeout $facet)))
+
+ if ! do_rpc_nodes $(facet_host $facet) \
+ _wait_import_state $expected $param $maxtime; then
+ error "import is not in ${expected} state"
+ return 1
+ fi
- log "${ost_facet} now in ${CONN_STATE} state"
return 0
}
+
get_clientmdc_proc_path() {
echo "${1}-mdc-*"
}
gather_logs () {
local list=$1
+ local tar_logs=$2
local ts=$(date +%s)
local docp=true
if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
echo "Dumping logs only on local client."
- $LCTL dk > ${prefix}.debug_log.$(hostname).${suffix}
- dmesg > ${prefix}.dmesg.$(hostname).${suffix}
+ $LCTL dk > ${prefix}.debug_log.$(hostname -s).${suffix}
+ dmesg > ${prefix}.dmesg.$(hostname -s).${suffix}
return
fi
do_nodesv $list \
- "$LCTL dk > ${prefix}.debug_log.\\\$(hostname).${suffix};
- dmesg > ${prefix}.dmesg.\\\$(hostname).${suffix}"
+ "$LCTL dk > ${prefix}.debug_log.\\\$(hostname -s).${suffix};
+ dmesg > ${prefix}.dmesg.\\\$(hostname -s).${suffix}"
if [ ! -f $LOGDIR/shared ]; then
do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR
fi
- local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2
- tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}*
+ if [ $tar_logs == 1 ]; then
+ local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2
+ tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}*
- echo $archive
+ echo $archive
+ fi
}
cleanup_logs () {
max_recovery_time () {
local init_connect_timeout=$(( TIMEOUT / 20 ))
- [[ $init_connect_timeout > 5 ]] || init_connect_timeout=5
+ [[ $init_connect_timeout -ge 5 ]] || init_connect_timeout=5
local service_time=$(( $(at_max_get client) + $(( 2 * $(( 25 + 1 + init_connect_timeout)) )) ))
echo -n "checking $dir..."
res=$(do_check_flavor $dir $flavor)
echo "found $res/$expect $flavor connections"
- [ $res -eq $expect ] && return 0
+ [ $res -ge $expect ] && return 0
sleep 4
done
# Not found. Create local logdir
mkdir -p $dir
else
- touch $dir/node.$(hostname).yml
+ touch $dir/node.$(hostname -s).yml
fi
return 0
}
check_write_access() {
local dir=$1
for node in $(nodes_list); do
- if [ ! -f "$dir/node.${node}.yml" ]; then
+ if [ ! -f "$dir/node.$(short_hostname ${node}).yml" ]; then
# Logdir not accessible/writable from this node.
return 1
fi
done
+ rm -f $dir/node.*.yml
return 0
}
if [[ -n $YAML_LOG ]]; then
return
fi
+ local SAVE_UMASK=`umask`
+ umask 0000
+
export YAML_LOG=${LOGDIR}/results.yml
mkdir -p $LOGDIR
init_clients_lists
- do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $LOGDIR
- if check_write_access $LOGDIR; then
+ if check_shared_dir $LOGDIR; then
touch $LOGDIR/shared
echo "Logging to shared log directory: $LOGDIR"
else
yml_nodes_file $LOGDIR >> $YAML_LOG
yml_results_file >> $YAML_LOG
+
+ umask $SAVE_UMASK
}
log_test() {
run_llverdev()
{
local dev=$1
+ local llverdev_opts=$2
local devname=$(basename $1)
local size=$(grep "$devname"$ /proc/partitions | awk '{print $3}')
# loop devices aren't in /proc/partitions
local partial_arg=""
# Run in partial (fast) mode if the size
- # of a partition > 10 GB
- [ $size -gt 10 ] && partial_arg="-p"
+ # of a partition > 1 GB
+ [ $size -gt 1 ] && partial_arg="-p"
- llverdev --force $partial_arg $dev
+ llverdev --force $partial_arg $llverdev_opts $dev
+}
+
+run_llverfs()
+{
+ local dir=$1
+ local llverfs_opts=$2
+ local partial_arg=""
+ local size=$(df -B G $dir |tail -n 1 |awk '{print $2}' |sed 's/G//') #GB
+
+ # Run in partial (fast) mode if the size
+ # of a partition > 1 GB
+ [ $size -gt 1 ] && partial_arg="-p"
+
+ llverfs $partial_arg $llverfs_opts $dir
}
remove_mdt_files() {
local mdtdev=$2
shift 2
local files="$@"
- local mntpt=${MOUNT%/*}/$facet
+ local mntpt=$(facet_mntpt $facet)
echo "removing files from $mdtdev on $facet: $files"
mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $?
local mdtdev=$2
shift 2
local files="$@"
- local mntpt=${MOUNT%/*}/$facet
+ local mntpt=$(facet_mntpt $facet)
echo "duplicating files on $mdtdev on $facet: $files"
mkdir -p $mntpt || return $?
$LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1
}
+# Get the block size of the filesystem.
+get_block_size() {
+ local facet=$1
+ local device=$2
+ local size
+
+ size=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" |
+ awk '/^Block size:/ {print $3}')
+ echo $size
+}
+
+# Check whether the "large_xattr" feature is enabled or not.
+large_xattr_enabled() {
+ local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
+
+ do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | grep -q large_xattr"
+ return ${PIPESTATUS[0]}
+}
+
+# Get the maximum xattr size supported by the filesystem.
+max_xattr_size() {
+ local size
+
+ if large_xattr_enabled; then
+ # include/linux/limits.h: #define XATTR_SIZE_MAX 65536
+ size=65536
+ else
+ local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
+ local block_size=$(get_block_size $SINGLEMDS $mds_dev)
+
+ # maximum xattr size = size of block - size of header -
+ # size of 1 entry - 4 null bytes
+ size=$((block_size - 32 - 32 - 4))
+ fi
+
+ echo $size
+}
+
+# Dump the value of the named xattr from a file.
+get_xattr_value() {
+ local xattr_name=$1
+ local file=$2
+
+ echo "$(getfattr -n $xattr_name --absolute-names --only-values $file)"
+}
+
+# Generate a string with size of $size bytes.
+generate_string() {
+ local size=${1:-1024} # in bytes
+
+ echo "$(head -c $size < /dev/zero | tr '\0' y)"
+}