[ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
rm -f $TMP/*active
-
}
case `uname -r` in
# add an additional parameter if mountpoint is ever different from $MOUNT
quota_save_version() {
local fsname=${2:-$FSNAME}
+ local spec=$1
+ local ver=$(tr -c -d "123" <<< $spec)
+ local type=$(tr -c -d "ug" <<< $spec)
+
+ [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
- $LFS quotaoff -ug $MOUNT # just in case
- [ -n "$1" ] && { $LFS quotacheck -$1 $MOUNT || error "quotacheck has failed"; }
+ [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
- do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$1"
+ do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec"
local varsvc
local osts=$(get_facets OST)
for ost in ${osts//,/ }; do
varsvc=${ost}_svc
- do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$1"
+ do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
done
}
# Suppose that quota type the same on mds and ost
local quota_type=$(quota_type | grep MDT | cut -d "=" -f2)
[ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!"
+ echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]"
if [ "$quota_type" != "$QUOTA_TYPE" ]; then
export old_QUOTA_TYPE=$quota_type
quota_save_version $QUOTA_TYPE
local cmd
for usr in $quota_usrs; do
- echo "Setting up quota on $client:$mntpt for $usr..."
+ echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
for type in u g; do
cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt"
echo "+ $cmd"
local rc=0
for mnt in $mnts ; do
- do_nodes $nodes "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts);
+ do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts);
mpts=\\\$(mount | grep -w -c $mnt);
if [ \\\$running -ne \\\$mpts ]; then
echo \\\$(hostname) env are INSANE!;
}
sanity_mount_check_servers () {
+ [ "$CLIENTONLY" ] &&
+ { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
echo Checking servers environments
# FIXME: modify get_facets to display all facets wo params
- local facets="$(get_facets OST),$(get_facets MDS)"
+ local facets="$(get_facets OST),$(get_facets MDS),mgs"
local node
local mnt
local facet
echo "Starting client $clients: $OPTIONS $device $mnt"
- do_nodes $clients "set -x;
+ do_nodes $clients "
running=\\\$(mount | grep -c $mnt' ');
rc=0;
if [ \\\$running -eq 0 ] ; then
mkdir -p $mnt;
mount -t lustre $OPTIONS $device $mnt;
- rc=$?;
+ rc=\\\$?;
fi;
-exit $rc"
+exit \\\$rc" || return ${PIPESTATUS[0]}
echo "Started clients $clients: "
do_nodes $clients "mount | grep -w $mnt"
[ "$3" ] && force=-f
echo "Stopping clients: $clients $mnt (opts:$force)"
- do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts);
+ do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts);
if [ \\\$running -ne 0 ] ; then
echo Stopping client \\\$(hostname) client $mnt opts:$force;
lsof -t $mnt || need_kill=no;
local node=$1
if [ "$FAILURE_MODE" = HARD ]; then
$POWER_UP $node
+ wait_for_host $node
fi
}
shift
local progs=$@
- do_nodes $clients "set -x ; PATH=:$PATH status=true; for prog in $progs; do
- which \\\$prog || { echo \\\$prog missing on \\\$(hostname) && status=false; }
- done;
- eval \\\$status"
+ do_nodes $clients "PATH=:$PATH; status=true;
+for prog in $progs; do
+ if ! [ \\\"\\\$(which \\\$prog)\\\" -o \\\"\\\${!prog}\\\" ]; then
+ echo \\\$prog missing on \\\$(hostname);
+ status=false;
+ fi
+done;
+eval \\\$status"
}
client_var_name() {
local TESTLOAD=run_${!var}.sh
ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
-
- check_catastrophe $client || return 2
-
- # see if the load is still on the client
+
+ # bug 18914: try to connect several times not only when
+ # check ps, but while check_catastrophe also
local tries=3
local RC=254
while [ $RC = 254 -a $tries -gt 0 ]; do
let tries=$tries-1
# assume success
RC=0
+ if ! check_catastrophe $client; then
+ RC=${PIPESTATUS[0]}
+ if [ $RC -eq 254 ]; then
+ # FIXME: not sure how long we shuold sleep here
+ sleep 10
+ continue
+ fi
+ echo "check catastrophe failed: RC=$RC "
+ return $RC
+ fi
+ done
+ # We can continue try to connect if RC=254
+ # Just print the warning about this
+ if [ $RC = 254 ]; then
+ echo "got a return status of $RC from do_node while checking catastrophe on $client"
+ fi
+
+ # see if the load is still on the client
+ tries=3
+ RC=254
+ while [ $RC = 254 -a $tries -gt 0 ]; do
+ let tries=$tries-1
+ # assume success
+ RC=0
if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
RC=${PIPESTATUS[0]}
sleep 30
fi
done
if [ $RC = 254 ]; then
- echo "got a return status of $RC from do_node while checking (i.e. with 'ps') the client load on the remote system"
+ echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
# see if we can diagnose a bit why this is
fi
check_client_load $client
rc=${PIPESTATUS[0]}
if [ "$rc" != 0 -a "$expectedfail" ]; then
- start_client_load $client
- echo "Restarted client load: on $client. Checking ..."
- check_client_load $client
+ local var=$(client_var_name $client)_load
+ start_client_load $client ${!var}
+ echo "Restarted client load ${!var}: on $client. Checking ..."
+ check_client_load $client
rc=${PIPESTATUS[0]}
if [ "$rc" != 0 ]; then
log "Client load failed to restart on node $client, rc=$rc"
wait_for_host() {
local host=$1
check_network "$host" 900
- while ! do_node $host "ls -d $LUSTRE " > /dev/null; do sleep 5; done
+ while ! do_node $host hostname > /dev/null; do sleep 5; done
}
wait_for() {
shutdown_facet $facet
[ -n "$sleep_time" ] && sleep $sleep_time
reboot_facet $facet
- client_df &
- DFPID=$!
- echo "df pid is $DFPID"
change_active $facet
local TO=`facet_active_host $facet`
echo "Failover $facet to $TO"
fail() {
facet_failover $* || error "failover: $?"
- df $MOUNT || error "post-failover df: $?"
+ client_df || error "post-failover df: $?"
}
fail_nodf() {
stop $facet
change_active $facet
mount_facet $facet -o abort_recovery
- df $MOUNT || echo "first df failed: $?"
+ client_df || echo "first df failed: $?"
sleep 1
- df $MOUNT || error "post-failover df: $?"
+ client_df || error "post-failover df: $?"
}
do_lmc() {
}
do_node() {
- HOST=$1
+ local verbose=false
+ # do not stripe off hostname if verbose, bug 19215
+ if [ x$1 = x--verbose ]; then
+ shift
+ verbose=true
+ fi
+
+ local HOST=$1
shift
local myPDSH=$PDSH
if [ "$HOST" = "$HOSTNAME" ]; then
[ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
return 0
fi
- $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //"
+
+ if $verbose ; then
+ # print HOSTNAME for myPDSH="no_dsh"
+ if [[ $myPDSH = no_dsh ]]; then
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /"
+ else
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
+ fi
+ else
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //"
+ fi
return ${PIPESTATUS[0]}
}
}
do_nodes() {
+ local verbose=false
+ # do not stripe off hostname if verbose, bug 19215
+ if [ x$1 = x--verbose ]; then
+ shift
+ verbose=true
+ fi
+
local rnodes=$1
shift
if $(single_local_node $rnodes); then
- do_node $rnodes $@
+ if $verbose; then
+ do_node --verbose $rnodes $@
+ else
+ do_node $rnodes $@
+ fi
return $?
fi
$myPDSH $rnodes $LCTL mark "$@" > /dev/null 2>&1 || :
fi
- $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g"
+ if $verbose ; then
+ $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
+ else
+ $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g"
+ fi
return ${PIPESTATUS[0]}
}
}
cleanupall() {
+ nfs_client_mode && return
+
stopall $*
unload_modules
cleanup_gss
# We need ldiskfs here, may as well load them all
load_modules
[ "$CLIENTONLY" ] && return
- echo "Formatting mdts, osts"
+ echo Formatting mgs, mds, osts
+ if [[ $MDSDEV1 != $MGSDEV ]] || [[ $mds1_HOST != $mgs_HOST ]]; then
+ add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10
+ fi
+
for num in `seq $MDSCOUNT`; do
echo "Format mds$num: $(mdsdevname $num)"
if $VERBOSE; then
}
setupall() {
+ nfs_client_mode && return
+
sanity_mount_check ||
error "environments are insane!"
load_modules
init_gss
if [ -z "$CLIENTONLY" ]; then
- echo "Setup mdts, osts"
+ echo Setup mgs, mdt, osts
echo $WRITECONF | grep -q "writeconf" && \
writeconf_all
+ if [[ $mds1_HOST != $mgs_HOST ]] || [[ $MDSDEV1 != $MGSDEV ]]; then
+ start mgs $MGSDEV $mgs_MOUNT_OPTS
+ fi
+
for num in `seq $MDSCOUNT`; do
DEVNAME=$(mdsdevname $num)
start mds$num $DEVNAME $MDS_MOUNT_OPTS
}
init_facet_vars () {
+ [ "$CLIENTONLY" ] && return 0
local facet=$1
shift
local device=$1
done
}
+osc_ensure_active () {
+ local facet=$1
+ local type=$2
+ local timeout=$3
+ local period=0
+
+ while [ $period -lt $timeout ]; do
+ count=$(do_facet $facet "lctl dl | grep '${FSNAME}-OST.*-osc-${type}' | grep ' IN ' 2>/dev/null | wc -l")
+ if [ $count -eq 0 ]; then
+ break
+ fi
+
+ echo "There are $count OST are inactive, wait $period seconds, and try again"
+ sleep 3
+ period=$((period+3))
+ done
+
+ [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up"
+}
+
+som_check() {
+ SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1)
+ echo $SOM_ENABLED
+}
+
init_param_vars () {
if ! remote_ost_nodsh && ! remote_mds_nodsh; then
export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2)
log "Using TIMEOUT=$TIMEOUT"
+ osc_ensure_active $SINGLEMDS M $TIMEOUT
+ osc_ensure_active client c $TIMEOUT
+
+ if [ x"$(som_check)" = x"enabled" ]; then
+ ENABLE_QUOTA=""
+ fi
if [ "$ENABLE_QUOTA" ]; then
setup_quota $MOUNT || return 2
fi
}
+nfs_client_mode () {
+ if [ "$NFSCLIENT" ]; then
+ echo "NFSCLIENT mode: setup, cleanup, check config skipped"
+ local clients=$CLIENTS
+ [ -z $clients ] && clients=$(hostname)
+
+ # FIXME: remove hostname when 19215 fixed
+ do_nodes $clients "echo \\\$(hostname); grep ' '$MOUNT' ' /proc/mounts"
+ declare -a nfsexport=(`grep ' '$MOUNT' ' /proc/mounts | awk '{print $1}' | awk -F: '{print $1 " " $2}'`)
+ do_nodes ${nfsexport[0]} "echo \\\$(hostname); df -T ${nfsexport[1]}"
+ return
+ fi
+ return 1
+}
+
check_config () {
+ nfs_client_mode && return
+
local mntpt=$1
+
+ local mounted=$(mount | grep " $mntpt ")
+ if [ "$CLIENTONLY" ]; then
+ # bug 18021
+ # CLIENTONLY should not depend on *_HOST settings
+ local mgc=$($LCTL device_list | awk '/MGC/ {print $4}')
+ # in theory someone could create a new,
+ # client-only config file that assumed lustre was already
+ # configured and didn't set the MGSNID. If MGSNID is not set,
+ # then we should use the mgs nid currently being used
+ # as the default value. bug 18021
+ [[ x$MGSNID = x ]] &&
+ MGSNID=${mgc//MGC/}
+
+ if [[ x$mgc != xMGC$MGSNID ]]; then
+ if [ "$mgs_HOST" ]; then
+ local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g")
+ [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] ||
+ error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc"
+ fi
+ fi
+ return 0
+ fi
+
local myMGS_host=$mgs_HOST
if [ "$NETTYPE" = "ptl" ]; then
myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}')
mgshost=$(echo $mgshost | awk -F: '{print $1}')
- if [ "$mgshost" != "$myMGS_host" ]; then
- FAIL_ON_ERROR=true \
- error "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
- Please use correct config or set mds_HOST correctly!"
- fi
+# if [ "$mgshost" != "$myMGS_host" ]; then
+# error_exit "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
+# Please use correct config or set mds_HOST correctly!"
+# fi
sanity_mount_check ||
error "environments are insane!"
}
check_and_setup_lustre() {
+ nfs_client_mode && return
+
local MOUNTED=$(mounted_lustre_filesystems)
if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then
[ "$REFORMAT" ] && formatall
check_config $MOUNT
init_facets_vars
init_param_vars
+
+ do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=\\\"$PTLDEBUG\\\";
+ lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
+ lctl set_param debug_mb=${DEBUG_SIZE};
+ sync"
fi
if [ "$ONLY" == "setup" ]; then
exit 0
local WAIT=0
local MAX=$2
while [ $NETWORK -eq 0 ]; do
- ping -c 1 -w 3 $1 > /dev/null
- if [ $? -eq 0 ]; then
+ if ping -c 1 -w 3 $1 > /dev/null; then
NETWORK=1
else
WAIT=$((WAIT + 5))
##################################
# Adaptive Timeouts funcs
-at_is_valid() {
- if [ -z "$AT_MAX_PATH" ]; then
- AT_MAX_PATH=$(do_facet $SINGLEMDS "find /sys/ -name at_max")
- [ -z "$AT_MAX_PATH" ] && echo "missing /sys/.../at_max " && return 1
- fi
- return 0
-}
-
at_is_enabled() {
- at_is_valid || error "invalid call"
-
# only check mds, we assume at_max is the same on all nodes
- local at_max=$(do_facet $SINGLEMDS "cat $AT_MAX_PATH")
+ local at_max=$(do_facet $SINGLEMDS "lctl get_param -n at_max")
if [ $at_max -eq 0 ]; then
return 1
else
at_max_get() {
local facet=$1
- at_is_valid || error "invalid call"
-
# suppose that all ost-s has the same at_max set
if [ $facet == "ost" ]; then
- do_facet ost1 "cat $AT_MAX_PATH"
+ do_facet ost1 "lctl get_param -n at_max"
else
- do_facet $facet "cat $AT_MAX_PATH"
+ do_facet $facet "lctl get_param -n at_max"
fi
}
local at_max=$1
shift
- at_is_valid || error "invalid call"
-
local facet
for facet in $@; do
if [ $facet == "ost" ]; then
for i in `seq $OSTCOUNT`; do
- do_facet ost$i "echo $at_max > $AT_MAX_PATH"
+ do_facet ost$i "lctl set_param at_max=$at_max"
+
done
elif [ $facet == "mds" ]; then
for i in `seq $MDSCOUNT`; do
- do_facet mds$i "echo $at_max > $AT_MAX_PATH"
+ do_facet mds$i "lctl set_param at_max=$at_max"
done
else
- do_facet $facet "echo $at_max > $AT_MAX_PATH"
+ do_facet $facet "lctl set_param at_max=$at_max"
fi
done
}
}
set_nodes_failloc () {
- local nodes=$1
- local node
-
- for node in $nodes ; do
- do_node $node lctl set_param fail_loc=$2
- done
+ do_nodes $(comma_list $1) lctl set_param fail_loc=$2
}
cancel_lru_locks() {
return 1
fi
done
+ if [[ $MDSDEV1 != $MGSDEV ]]; then
+ stop mgs
+ fi
+
return 0
}
ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s)
echo "Dumping lctl log to $ERRLOG"
# We need to dump the logs on all nodes
- local NODES=${NODES:-$(nodes_list)}
- for NODE in $NODES; do
- do_node $NODE $LCTL dk $ERRLOG
- done
+ do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG
debugrestore
[ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG
TEST_FAILED=true
done
}
-_basetest() {
- echo $*
-}
-
basetest() {
- IFS=abcdefghijklmnopqrstuvwxyz _basetest $1
+ echo ${1%%[a-z]*}
}
# print a newline if the last test was skipped
MSG=${MSG//\>/\\\>}
MSG=${MSG//\</\\\<}
MSG=${MSG//\//\\\/}
- local NODES=$(nodes_list)
- for NODE in $NODES; do
- do_node $NODE $LCTL mark "$MSG" 2> /dev/null || true
- done
+ do_nodes $(comma_list $(nodes_list)) $LCTL mark "$MSG" 2> /dev/null || true
}
trace() {
}
reset_fail_loc () {
- local myNODES=$(nodes_list)
- local NODE
-
echo -n "Resetting fail_loc on all nodes..."
- for NODE in $myNODES; do
- do_node $NODE "lctl set_param -n fail_loc=0 2>/dev/null || true"
- done
+ do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true"
echo done.
}
remote_mds_nodsh()
{
+ [ "$CLIENTONLY" ] && return 0 || true
remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
}
remote_ost_nodsh()
{
+ [ "$CLIENTONLY" ] && return 0 || true
remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
add user $myRUNAS_UID:$myRUNAS_GID on these nodes."
}
+# obtain the UID/GID for MPI_USER
+get_mpiuser_id() {
+ local mpi_user=$1
+
+ MPI_USER_UID=$(do_facet client "getent passwd $mpi_user | cut -d: -f3;
+exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the UID for $mpi_user"
+
+ MPI_USER_GID=$(do_facet client "getent passwd $mpi_user | cut -d: -f4;
+exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the GID for $mpi_user"
+}
+
+# obtain and cache Kerberos ticket-granting ticket
+refresh_krb5_tgt() {
+ local myRUNAS_UID=$1
+ local myRUNAS_GID=$2
+ shift 2
+ local myRUNAS=$@
+ if [ -z "$myRUNAS" ]; then
+ error_exit "myRUNAS command must be specified for refresh_krb5_tgt"
+ fi
+
+ CLIENTS=${CLIENTS:-$HOSTNAME}
+ do_nodes $CLIENTS "set -x
+if ! $myRUNAS krb5_login.sh; then
+ echo "Failed to refresh Krb5 TGT for UID/GID $myRUNAS_UID/$myRUNAS_GID."
+ exit 1
+fi"
+}
+
# Run multiop in the background, but wait for it to print
# "PAUSING" to its stdout before returning from this function.
multiop_bg_pause() {
return 0
}
+do_and_time () {
+ local cmd=$1
+ local rc
+
+ SECONDS=0
+ eval '$cmd'
+
+ [ ${PIPESTATUS[0]} -eq 0 ] || rc=1
+
+ echo $SECONDS
+ return $rc
+}
+
inodes_available () {
local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk '{print $4}' | sort -un | head -1) || return 1
echo $IFree
# generate a stream of formatted strings (<node> <param name>=<param value>)
save_lustre_params() {
local s
- do_node $1 "lctl get_param $2" | while read s; do echo "$1 $s"; done
+ do_nodes --verbose $1 "lctl get_param $2 | while read s; do echo \\\$s; done"
}
# restore lustre parameters from input stream, produces by save_lustre_params
local name
local val
while IFS=" =" read node name val; do
- do_node $node "lctl set_param -n $name $val"
+ do_node ${node//:/} "lctl set_param -n $name $val"
done
}
stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file`
stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file`
- stripe_index=`awk '/obdidx/ {start = 1; getline; print $1; exit}' $tmp_file`
+ stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file`
rm -f $tmp_file
}
rm -f $file
sleep 1
local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}')
- local oldused=($iused)
+ local -a oldused=($iused)
touch $file
sleep 1
iused=$(lfs df -i $dir | grep MDT | awk '{print $3}')
- local newused=($iused)
+ local -a newused=($iused)
local num=0
for ((i=0; i<${#newused[@]}; i++)); do
error "mdt-s : inodes count OLD ${oldused[@]} NEW ${newused[@]}"
}
-mpi_run () {
- local mpirun="$MPIRUN $MPIRUN_OPTIONS"
- local command="$mpirun $@"
- local mpilog=$TMP/mpi.log
- local rc
-
- if [ "$MPI_USER" != root -a $mpirun ]; then
- echo "+ chmod 0777 $MOUNT"
- chmod 0777 $MOUNT
- command="su $MPI_USER sh -c \"$command \""
- fi
-
- ls -ald $MOUNT
- echo "+ $command"
- eval $command 2>&1 > $mpilog || true
-
- rc=${PIPESTATUS[0]}
- if [ $rc -eq 0 ] && grep -q "p4_error: : [^0]" $mpilog ; then
- rc=1
- fi
- cat $mpilog
- return $rc
-}
-
mdsrate_cleanup () {
mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6
}
local i=0
CONN_PROC="osc.${FSNAME}-${ost}.ost_server_uuid"
- CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC | cut -f2)
+ CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
while [ "${CONN_STATE}" != "${expected}" ]; do
- # for disconn we can check after proc entry is removed
- [ "x${CONN_STATE}" == "x" -a "${expected}" == "DISCONN" ] && return 0
+ if [ "${expected}" == "DISCONN" ]; then
+ # for disconn we can check after proc entry is removed
+ [ "x${CONN_STATE}" == "x" ] && return 0
+ # with AT we can have connect request timeout ~ reconnect timeout
+ # and test can't see real disconnect
+ [ "${CONN_STATE}" == "CONNECTING" ] && return 0
+ fi
# disconnect rpc should be wait not more obd_timeout
[ $i -ge $(($TIMEOUT * 3 / 2)) ] && \
error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1
sleep 1
- CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC | cut -f2)
+ CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
i=$(($i + 1))
done