# function used by scripts run on remote nodes
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
. $LUSTRE/tests/functions.sh
+. $LUSTRE/tests/yaml.sh
+
+LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
+
+EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh}
+
+if [ -f "$EXCEPT_LIST_FILE" ]; then
+ echo "Reading test skip list from $EXCEPT_LIST_FILE"
+ cat $EXCEPT_LIST_FILE
+ . $EXCEPT_LIST_FILE
+fi
+
+[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf
+[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] && MODPROBECONF=/etc/modprobe.d/Lustre
assert_DIR () {
local failed=""
print_summary () {
trap 0
- [ "$TESTSUITE" == "lfscktest" ] && return 0
+ [ "$TESTSUITE" == "lfsck" ] && return 0
[ -n "$ONLY" ] && echo "WARNING: ONLY is set to ${ONLY}."
local form="%-13s %-17s %s\n"
printf "$form" "status" "script" "skipped tests E(xcluded) S(low)"
echo "------------------------------------------------------------------------------------"
- for O in $TESTSUITE_LIST; do
+ for O in $DEFAULT_SUITES; do
local skipped=""
local slow=""
- local o=$(echo $O | tr "[:upper:]" "[:lower:]")
+ O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
+ local o=$(echo $O | tr "[:upper:]" "[:lower:]")
o=${o//_/-}
- o=${o//tyn/tyN}
local log=${TMP}/${o}.log
[ -f $log ] && skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
[ -f $log ] && slow=$(grep SLOW $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
done
- for O in $TESTSUITE_LIST; do
+ for O in $DEFAULT_SUITES; do
+ O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
if [ "${!O}" = "no" ]; then
# FIXME.
# only for those tests suits which are run directly from acc-sm script:
fi
done
- for O in $TESTSUITE_LIST; do
+ for O in $DEFAULT_SUITES; do
+ O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
[ "${!O}" = "done" -o "${!O}" = "no" ] || \
printf "$form" "UNFINISHED" "$O" ""
done
#[ -d /r ] && export ROOT=${ROOT:-/r}
export TMP=${TMP:-$ROOT/tmp}
export TESTSUITELOG=${TMP}/${TESTSUITE}.log
+ if [[ -z $LOGDIRSET ]]; then
+ export LOGDIR=${LOGDIR:-${TMP}/test_logs/}/$(date +%s)
+ export LOGDIRSET=true
+ fi
export HOSTNAME=${HOSTNAME:-`hostname`}
if ! echo $PATH | grep -q $LUSTRE/utils; then
export PATH=$PATH:$LUSTRE/utils
export PATH=$PATH:$LUSTRE/utils/gss
fi
if ! echo $PATH | grep -q $LUSTRE/tests; then
- export PATH=$PATH:$LUSTRE/tests
+ export PATH=$PATH:$LUSTRE/tests
fi
+ export LST=${LST:-"$LUSTRE/../lnet/utils/lst"}
+ [ ! -f "$LST" ] && export LST=$(which lst)
export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
[ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
- export PATH=$PATH:$LUSTRE/tests/racer
+ export PATH=$LUSTRE/tests/racer:$PATH:
fi
if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then
export PATH=$PATH:$LUSTRE/tests/mpi
fi
+ export RSYNC_RSH=${RSYNC_RSH:-rsh}
export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
[ ! -f "$LCTL" ] && export LCTL=$(which lctl)
export LFS=${LFS:-"$LUSTRE/utils/lfs"}
xkrb5*)
echo "Using GSS/krb5 ptlrpc security flavor"
which lgss_keyring > /dev/null 2>&1 || \
- error "built with gss disabled! SEC=$SEC"
+ error_exit "built with gss disabled! SEC=$SEC"
GSS=true
GSS_KRB5=true
;;
/sbin/lsmod | grep -q $1
}
+# Load a module on the system where this is running.
+#
+# Synopsis: load_module module_name [module arguments for insmod/modprobe]
+#
+# If module arguments are not given but MODOPTS_<MODULE> is set, then its value
+# will be used as the arguments. Otherwise arguments will be obtained from
+# /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used.
+#
load_module() {
+ local optvar
EXT=".ko"
module=$1
shift
BASE=`basename $module $EXT`
+ # If no module arguments were passed, get them from $MODOPTS_<MODULE>, else from
+ # modprobe.conf
+ if [ $# -eq 0 ]; then
+ # $MODOPTS_<MODULE>; we could use associative arrays, but that's not in
+ # Bash until 4.x, so we resort to eval.
+ optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
+ eval set -- \$$optvar
+ if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
+ # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
+ set -- $(grep "^options\\s*\<${module}\>" $MODPROBECONF)
+ # Get rid of "options $module"
+ (($# > 0)) && shift 2
+
+ # Ensure we have accept=all for lnet
+ if [ $module = lnet ]; then
+ # OK, this is a bit wordy...
+ local arg accept_all_present=false
+ for arg in "$@"; do
+ [ "$arg" = accept=all ] && accept_all_present=true
+ done
+ $accept_all_present || set -- "$@" accept=all
+ fi
+ fi
+ fi
+
+ [ $# -gt 0 ] && echo "${module} options: '$*'"
+
module_loaded ${BASE} && return
- if [ -f ${LUSTRE}/${module}${EXT} ]; then
- insmod ${LUSTRE}/${module}${EXT} $@
+ # Note that insmod will ignore anything in modprobe.conf, which is why we're
+ # passing options on the command-line.
+ if [ "$BASE" == "lnet_selftest" ] && \
+ [ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]; then
+ insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
+ elif [ -f ${LUSTRE}/${module}${EXT} ]; then
+ insmod ${LUSTRE}/${module}${EXT} "$@"
else
# must be testing a "make install" or "rpm" installation
# note failed to load ptlrpc_gss is considered not fatal
if [ "$BASE" == "ptlrpc_gss" ]; then
- modprobe $BASE $@ 2>/dev/null || echo "gss/krb5 is not supported"
+ modprobe $BASE "$@" 2>/dev/null || echo "gss/krb5 is not supported"
else
- modprobe $BASE $@
+ modprobe $BASE "$@"
fi
fi
}
load_modules_local() {
if [ -n "$MODPROBE" ]; then
# use modprobe
- return 0
+ echo "Using modprobe to load modules"
+ return 0
fi
if [ "$HAVE_MODULES" = true ]; then
- # we already loaded
+ # we already loaded
+ echo "Modules already loaded"
return 0
fi
HAVE_MODULES=true
load_module ../libcfs/libcfs/libcfs
[ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
[ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
- local MODPROBECONF=
- [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf
- [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre
- [ -z "$LNETOPTS" -a "$MODPROBECONF" ] && \
- LNETOPTS=$(awk '/^options lnet/ { print $0}' $MODPROBECONF | sed 's/^options lnet //g')
- echo $LNETOPTS | grep -q "accept=all" || LNETOPTS="$LNETOPTS accept=all";
- echo "lnet options: '$LNETOPTS'"
- # note that insmod will ignore anything in modprobe.conf
- load_module ../lnet/lnet/lnet $LNETOPTS
+ load_module ../lnet/lnet/lnet
LNETLND=${LNETLND:-"socklnd/ksocklnd"}
load_module ../lnet/klnds/$LNETLND
load_module lvfs/lvfs
if $LOAD_MODULES_REMOTE ; then
local list=$(comma_list $(remote_nodes_list))
- echo unloading modules on $list
- do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE
- do_rpc_nodes $list check_mem_leak
+ if [ ! -z $list ]; then
+ echo unloading modules on $list
+ do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE
+ do_rpc_nodes $list check_mem_leak
+ fi
fi
HAVE_MODULES=false
return 0
}
+check_gss_daemon_nodes() {
+ local list=$1
+ dname=$2
+
+ do_nodes --verbose $list "num=\\\$(ps -o cmd -C $dname | grep $dname | wc -l);
+if [ \\\"\\\$num\\\" -ne 1 ]; then
+ echo \\\$num instance of $dname;
+ exit 1;
+fi; "
+}
+
check_gss_daemon_facet() {
facet=$1
dname=$2
}
send_sigint() {
- local facet=$1
+ local list=$1
shift
- do_facet $facet "killall -2 $@ 2>/dev/null || true"
+ echo Stopping $@ on $list
+ do_nodes $list "killall -2 $@ 2>/dev/null || true"
}
+# start gss daemons on all nodes, or
+# "daemon" on "list" if set
start_gss_daemons() {
- # starting on MDT
- for num in `seq $MDSCOUNT`; do
- do_facet mds$num "$LSVCGSSD -v"
- if $GSS_PIPEFS; then
- do_facet mds$num "$LGSSD -v"
- fi
- done
- # starting on OSTs
- for num in `seq $OSTCOUNT`; do
- do_facet ost$num "$LSVCGSSD -v"
- done
- # starting on client
- # FIXME: is "client" the right facet name?
+ local list=$1
+ local daemon=$2
+
+ if [ "$list" ] && [ "$daemon" ] ; then
+ echo "Starting gss daemon on nodes: $list"
+ do_nodes $list "$daemon" || return 8
+ return 0
+ fi
+
+ local list=$(comma_list $(mdts_nodes))
+
+ echo "Starting gss daemon on mds: $list"
+ do_nodes $list "$LSVCGSSD -v" || return 1
if $GSS_PIPEFS; then
- do_facet client "$LGSSD -v"
+ do_nodes $list "$LGSSD -v" || return 2
+ fi
+
+ list=$(comma_list $(osts_nodes))
+ echo "Starting gss daemon on ost: $list"
+ do_nodes $list "$LSVCGSSD -v" || return 3
+ # starting on clients
+
+ local clients=${CLIENTS:-`hostname`}
+ if $GSS_PIPEFS; then
+ echo "Starting $LGSSD on clients $clients "
+ do_nodes $clients "$LGSSD -v" || return 4
fi
# wait daemons entering "stable" status
#
# check daemons are running
#
- for num in `seq $MDSCOUNT`; do
- check_gss_daemon_facet mds$num lsvcgssd
- if $GSS_PIPEFS; then
- check_gss_daemon_facet mds$num lgssd
- fi
- done
- for num in `seq $OSTCOUNT`; do
- check_gss_daemon_facet ost$num lsvcgssd
- done
+ list=$(comma_list $(mdts_nodes) $(osts_nodes))
+ check_gss_daemon_nodes $list lsvcgssd || return 5
+ if $GSS_PIPEFS; then
+ list=$(comma_list $(mdts_nodes))
+ check_gss_daemon_nodes $list lgssd || return 6
+ fi
if $GSS_PIPEFS; then
- check_gss_daemon_facet client lgssd
+ check_gss_daemon_nodes $clients lgssd || return 7
fi
}
stop_gss_daemons() {
- for num in `seq $MDSCOUNT`; do
- send_sigint mds$num lsvcgssd lgssd
- done
- for num in `seq $OSTCOUNT`; do
- send_sigint ost$num lsvcgssd
- done
- send_sigint client lgssd
+ local list=$(comma_list $(mdts_nodes))
+
+ send_sigint $list lsvcgssd lgssd
+
+ list=$(comma_list $(osts_nodes))
+ send_sigint $list lsvcgssd
+
+ list=${CLIENTS:-`hostname`}
+ send_sigint $list lgssd
}
init_gss() {
if $GSS; then
- start_gss_daemons
+ if ! module_loaded ptlrpc_gss; then
+ load_module ptlrpc/gss/ptlrpc_gss
+ module_loaded ptlrpc_gss ||
+ error_exit "init_gss : GSS=$GSS, but gss/krb5 is not supported!"
+ fi
+ start_gss_daemons || error_exit "start gss daemon failed! rc=$?"
if [ -n "$LGSS_KEYRING_DEBUG" ]; then
echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level
if [ "$myPDSH" = "rsh" ]; then
# we need this because rsh does not return exit code of an executed command
- local command_status="$TMP/cs"
- rsh $HOST ":> $command_status"
- rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin;
- cd $RPWD; sh -c \"$@\") ||
- echo command failed >$command_status"
- [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
+ local command_status="$TMP/cs"
+ rsh $HOST ":> $command_status"
+ rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin;
+ cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\") ||
+ echo command failed >$command_status"
+ [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
return 0
fi
if $verbose ; then
# print HOSTNAME for myPDSH="no_dsh"
if [[ $myPDSH = no_dsh ]]; then
- $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /"
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /"
else
- $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")"
fi
else
- $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //"
+ $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed "s/^${HOST}: //"
fi
return ${PIPESTATUS[0]}
}
[ "$1" = "$HOSTNAME" ]
}
+# Outputs environment variable assignments that should be passed to remote nodes
+get_env_vars() {
+ local var
+ local value
+
+ for var in ${!MODOPTS_*}; do
+ value=${!var}
+ echo "${var}=\"$value\""
+ done
+}
+
do_nodes() {
local verbose=false
# do not stripe off hostname if verbose, bug 19215
local rnodes=$1
shift
- if $(single_local_node $rnodes); then
+ if single_local_node $rnodes; then
if $verbose; then
- do_node --verbose $rnodes $@
+ do_node --verbose $rnodes "$@"
else
- do_node $rnodes $@
+ do_node $rnodes "$@"
fi
return $?
fi
fi
if $verbose ; then
- $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
+ $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")"
else
- $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g"
+ $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" | sed -re "s/\w+:\s//g"
fi
return ${PIPESTATUS[0]}
}
rm -f $TMP/ost${num}active
done
+ if ! combined_mgs_mds ; then
+ stop mgs
+ fi
+
return 0
}
test $nr = 1 && echo -n $MDS_MKFS_OPTS || echo -n $MDSn_MKFS_OPTS
}
+combined_mgs_mds () {
+ [[ $MDSDEV1 = $MGSDEV ]] && [[ $mds1_HOST = $mgs_HOST ]]
+}
+
formatall() {
if [ "$IAMDIR" == "yes" ]; then
MDS_MKFS_OPTS="$MDS_MKFS_OPTS --iam-dir"
[ "$FSTYPE" ] && FSTYPE_OPT="--backfstype $FSTYPE"
- if [ ! -z $SEC ]; then
- MDS_MKFS_OPTS="$MDS_MKFS_OPTS --param srpc.flavor.default=$SEC"
- MDSn_MKFS_OPTS="$MDSn_MKFS_OPTS --param srpc.flavor.default=$SEC"
- OST_MKFS_OPTS="$OST_MKFS_OPTS --param srpc.flavor.default=$SEC"
- fi
-
stopall
# We need ldiskfs here, may as well load them all
load_modules
[ "$CLIENTONLY" ] && return
echo Formatting mgs, mds, osts
- if [[ $MDSDEV1 != $MGSDEV ]] || [[ $mds1_HOST != $mgs_HOST ]]; then
+ if ! combined_mgs_mds ; then
add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10
fi
remount_client()
{
- zconf_umount `hostname` $1 || error "umount failed"
- zconf_mount `hostname` $1 || error "mount failed"
+ zconf_umount `hostname` $1 || error "umount failed"
+ zconf_mount `hostname` $1 || error "mount failed"
}
writeconf_facet () {
error "environments are insane!"
load_modules
- init_gss
+
if [ -z "$CLIENTONLY" ]; then
echo Setup mgs, mdt, osts
echo $WRITECONF | grep -q "writeconf" && \
writeconf_all
- if [[ $mds1_HOST != $mgs_HOST ]] || [[ $MDSDEV1 != $MGSDEV ]]; then
+ if ! combined_mgs_mds ; then
start mgs $MGSDEV $mgs_MOUNT_OPTS
fi
eval mds${num}failover_HOST=$(facet_host mds$num)
fi
- if [ $IDENTITY_UPCALL != "default" ]; then
+ if [ $IDENTITY_UPCALL != "default" ]; then
switch_identity $num $IDENTITY_UPCALL
- fi
+ fi
done
for num in `seq $OSTCOUNT`; do
DEVNAME=$(ostdevname $num)
done
fi
+
+ init_gss
+
# wait a while to allow sptlrpc configuration be propogated to targets,
# only needed when mounting new target devices.
- $GSS && sleep 10
+ if $GSS; then
+ sleep 10
+ fi
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
mount_client $MOUNT
[ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT
if [ "$MOUNT_2" ]; then
- mount_client $MOUNT2
+ mount_client $MOUNT2
[ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2
fi
# by a context negotiation rpc with $TIMEOUT.
# FIXME better by monitoring import status.
if $GSS; then
+ set_flavor_all $SEC
sleep $((TIMEOUT + 5))
else
sleep 5
}
mounted_lustre_filesystems() {
- awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts
+ awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts
}
init_facet_vars () {
[ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up"
}
-som_check() {
- SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1)
- echo $SOM_ENABLED
-}
-
init_param_vars () {
if ! remote_ost_nodsh && ! remote_mds_nodsh; then
export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2)
osc_ensure_active $SINGLEMDS M $TIMEOUT
osc_ensure_active client c $TIMEOUT
- if [ x"$(som_check)" = x"enabled" ]; then
- ENABLE_QUOTA=""
- echo "disable quota temporary when SOM enabled"
- fi
if [ $QUOTA_AUTO -ne 0 ]; then
if [ "$ENABLE_QUOTA" ]; then
echo "enable quota as required"
return 1
}
-check_config () {
- nfs_client_mode && return
-
+check_config_client () {
local mntpt=$1
local mounted=$(mount | grep " $mntpt ")
local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}')
mgshost=$(echo $mgshost | awk -F: '{print $1}')
- if [ "$mgshost" != "$myMGS_host" ]; then
- log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
- Please use correct config or set mds_HOST correctly!"
- fi
+# if [ "$mgshost" != "$myMGS_host" ]; then
+# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
+# Please use correct config or set mds_HOST correctly!"
+# fi
+
+}
+
+check_config_clients () {
+ local clients=${CLIENTS:-$HOSTNAME}
+ local mntpt=$1
+
+ nfs_client_mode && return
+
+ do_rpc_nodes $clients check_config_client $mntpt
sanity_mount_check ||
error "environments are insane!"
fi
}
+is_mounted () {
+ local mntpt=$1
+ local mounted=$(mounted_lustre_filesystems)
+
+ echo $mounted' ' | grep -w -q $mntpt' '
+}
+
check_and_setup_lustre() {
nfs_client_mode && return
local MOUNTED=$(mounted_lustre_filesystems)
local do_check=true
- # MOUNT is not mounted
- if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then
+ # 1.
+ # both MOUNT and MOUNT2 are not mounted
+ if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
[ "$REFORMAT" ] && formatall
+ # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
setupall
- MOUNTED=$(mounted_lustre_filesystems | head -1)
- [ -z "$MOUNTED" ] && error "NAME=$NAME not mounted"
+ is_mounted $MOUNT || error "NAME=$NAME not mounted"
export I_MOUNTED=yes
do_check=false
-
- # MOUNT and MOUNT2 are mounted
- elif $(echo $MOUNTED | grep -w -q $MOUNT2); then
-
- # MOUNT2 is mounted, MOUNT_2 is not set
- if ! [ "$MOUNT_2" ]; then
- zconf_umount `hostname` $MOUNT2
- export I_UMOUNTED2=yes
-
- # MOUNT2 is mounted, MOUNT_2 is set
- else
- check_config $MOUNT2
- fi
+ # 2.
+ # MOUNT2 is mounted
+ elif is_mounted $MOUNT2; then
+ # 3.
+ # MOUNT2 is mounted, while MOUNT_2 is not set
+ if ! [ "$MOUNT_2" ]; then
+ cleanup_mount $MOUNT2
+ export I_UMOUNTED2=yes
+
+ # 4.
+ # MOUNT2 is mounted, MOUNT_2 is set
+ else
+ # FIXME: what to do if check_config failed?
+ # i.e. if:
+ # 1) remote client has mounted other Lustre fs ?
+ # 2) it has insane env ?
+ # let's try umount MOUNT2 on all clients and mount it again:
+ if ! check_config_clients $MOUNT2; then
+ cleanup_mount $MOUNT2
+ restore_mount $MOUNT2
+ export I_MOUNTED2=yes
+ fi
+ fi
+
+ # 5.
+ # MOUNT is mounted MOUNT2 is not mounted
+ elif [ "$MOUNT_2" ]; then
+ restore_mount $MOUNT2
+ export I_MOUNTED2=yes
fi
if $do_check; then
- check_config $MOUNT
+ # FIXME: what to do if check_config failed?
+ # i.e. if:
+ # 1) remote client has mounted other Lustre fs?
+ # 2) lustre is mounted on remote_clients atall ?
+ check_config_clients $MOUNT
init_facets_vars
init_param_vars
lctl set_param debug_mb=${DEBUG_SIZE};
sync"
fi
+
+ init_gss
+ set_flavor_all $SEC
+
if [ "$ONLY" == "setup" ]; then
exit 0
fi
}
+restore_mount () {
+ local clients=${CLIENTS:-$HOSTNAME}
+ local mntpt=$1
+
+ zconf_mount_clients $clients $mntpt
+}
+
+cleanup_mount () {
+ local clients=${CLIENTS:-$HOSTNAME}
+ local mntpt=$1
+
+ zconf_umount_clients $clients $mntpt
+}
+
cleanup_and_setup_lustre() {
if [ "$ONLY" == "cleanup" -o "`mount | grep $MOUNT`" ]; then
lctl set_param debug=0 || true
cleanupall
if [ "$ONLY" == "cleanup" ]; then
- exit 0
+ exit 0
fi
fi
check_and_setup_lustre
}
check_and_cleanup_lustre() {
- if [ "`mount | grep $MOUNT`" ]; then
+ if is_mounted $MOUNT; then
[ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]*
[ "$ENABLE_QUOTA" ] && restore_quota_type || true
fi
+
if [ "$I_UMOUNTED2" = "yes" ]; then
- mount_client $MOUNT2 || error "restore $MOUNT2 failed"
+ restore_mount $MOUNT2 || error "restore $MOUNT2 failed"
+ fi
+
+ if [ "$I_MOUNTED2" = "yes" ]; then
+ cleanup_mount $MOUNT2
fi
if [ "$I_MOUNTED" = "yes" ]; then
cleanupall -f || error "cleanup failed"
+ unset I_MOUNTED
fi
- unset I_MOUNTED
}
#######
# suppose that all ost-s has the same at_max set
if [ $facet == "ost" ]; then
- do_facet ost1 "lctl get_param -n at_max"
+ do_facet ost1 "lctl get_param -n at_max"
else
- do_facet $facet "lctl get_param -n at_max"
+ do_facet $facet "lctl get_param -n at_max"
fi
}
for facet in $@; do
if [ $facet == "ost" ]; then
for i in `seq $OSTCOUNT`; do
- do_facet ost$i "lctl set_param at_max=$at_max"
+ do_facet ost$i "lctl set_param at_max=$at_max"
done
elif [ $facet == "mds" ]; then
for i in `seq $MDSCOUNT`; do
- do_facet mds$i "lctl set_param at_max=$at_max"
+ do_facet mds$i "lctl set_param at_max=$at_max"
done
else
- do_facet $facet "lctl set_param at_max=$at_max"
+ do_facet $facet "lctl set_param at_max=$at_max"
fi
done
}
return 1
fi
done
- if [[ $MDSDEV1 != $MGSDEV ]]; then
- stop mgs
- fi
-
return 0
}
error_noexit() {
local TYPE=${TYPE:-"FAIL"}
- local ERRLOG
local dump=true
# do not dump logs if $1=false
log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
+ # We need to dump the logs on all nodes
if $dump; then
- ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s)
- echo "Dumping lctl log to $ERRLOG"
- # We need to dump the logs on all nodes
- do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG
+ gather_logs $(comma_list $(nodes_list))
fi
+
debugrestore
[ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG
- TEST_FAILED=true
+ echo "$@" > $LOGDIR/err
}
error() {
error_noexit "$@"
- if $FAIL_ON_ERROR; then
- reset_fail_loc
- exit 1
- fi
+ exit 1
}
error_exit() {
- error_noexit "$@"
- exit 1
+ error "$@"
}
# use only if we are ignoring failures for this test, bugno required.
done
for G in $GRANT_CHECK_LIST; do
eval GCHECK_ONLY_${G}=true
- done
+ done
}
basetest() {
# print a newline if the last test was skipped
export LAST_SKIPPED=
+#
+# Main entry into test-framework. This is called with the name and
+# description of a test. The name is used to find the function to run
+# the test using "test_$name".
+#
+# This supports a variety of methods of specifying specific test to
+# run or not run. These need to be documented...
+#
run_test() {
assert_DIR
testname=ONLY_$1
if [ ${!testname}x != x ]; then
[ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
- run_one $1 "$2"
+ run_one_logged $1 "$2"
return $?
fi
testname=ONLY_$base
if [ ${!testname}x != x ]; then
[ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
- run_one $1 "$2"
+ run_one_logged $1 "$2"
return $?
fi
LAST_SKIPPED="y"
fi
LAST_SKIPPED=
- run_one $1 "$2"
+ run_one_logged $1 "$2"
return $?
}
}
trace() {
- log "STARTING: $*"
- strace -o $TMP/$1.strace -ttt $*
- RC=$?
- log "FINISHED: $*: rc $RC"
- return 1
+ log "STARTING: $*"
+ strace -o $TMP/$1.strace -ttt $*
+ RC=$?
+ log "FINISHED: $*: rc $RC"
+ return 1
}
pass() {
- $TEST_FAILED && echo -n "FAIL " || echo -n "PASS "
- echo $@
+ # Set TEST_STATUS here; will be used for logging the result
+ if [ -f $LOGDIR/err ]; then
+ TEST_STATUS="FAIL"
+ else
+ TEST_STATUS="PASS"
+ fi
+ echo $TEST_STATUS " " $@
}
check_mds() {
echo done.
}
+
+#
+# Log a message (on all nodes) padded with "=" before and after.
+# Also appends a timestamp and prepends the testsuite name.
+#
+banner() {
+ msg="== ${TESTSUITE} $*"
+ # pad the message out to 70 with "="
+ last=${msg: -1:1}
+ [[ $last != "=" && $last != " " ]] && msg+=" "
+ for i in $(seq $((68 - ${#msg})) ); do
+ msg+="="
+ done
+ # always include at least == after the message
+ msg+="=="
+
+ log "$msg $(date +"%H:%M:%S (%s)")"
+}
+
+#
+# Run a single test function and cleanup after it.
+#
+# This function should be run in a subshell so the test func can
+# exit() without stopping the whole script.
+#
run_one() {
- testnum=$1
- message=$2
+ local testnum=$1
+ local message=$2
tfile=f${testnum}
export tdir=d0.${TESTSUITE}/d${base}
-
+ export TESTNAME=test_$testnum
local SAVE_UMASK=`umask`
umask 0022
- local BEFORE=`date +%s`
- echo
- log "== test $testnum: $message == `date +%H:%M:%S` ($BEFORE)"
- #check_mds
- export TESTNAME=test_$testnum
- TEST_FAILED=false
+ banner "test $testnum: $message"
test_${testnum} || error "test_$testnum failed with $?"
- #check_mds
cd $SAVE_PWD
reset_fail_loc
check_grant ${testnum} || error "check_grant $testnum failed with $?"
check_catastrophe || error "LBUG/LASSERT detected"
ps auxww | grep -v grep | grep -q multiop && error "multiop still running"
- pass "($((`date +%s` - $BEFORE))s)"
- TEST_FAILED=false
unset TESTNAME
unset tdir
umask $SAVE_UMASK
+ return 0
+}
+
+#
+# Wrapper around run_one to ensure:
+# - test runs in subshell
+# - output of test is saved to separate log file for error reporting
+# - test result is saved to data file
+#
+run_one_logged() {
+ local BEFORE=`date +%s`
+ local TEST_ERROR
+ local name=${TESTSUITE}.test_${1}.test_log.$(hostname).log
+ local test_log=$LOGDIR/$name
+ rm -rf $LOGDIR/err
+
+ echo
+ run_one $1 "$2" 2>&1 | tee $test_log
+ local RC=${PIPESTATUS[0]}
+
+ [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \
+ echo "test_$1 returned $RC" | tee $LOGDIR/err
+
+ duration=$((`date +%s` - $BEFORE))
+ pass "(${duration}s)"
+ [ -f $LOGDIR/err ] && TEST_ERROR=$(cat $LOGDIR/err)
+ log_sub_test test_${1} $TEST_STATUS $duration "$RC" "$TEST_ERROR"
+
+ if [ -f $LOGDIR/err ]; then
+ $FAIL_ON_ERROR && exit $RC
+ fi
+
+ return 0
}
canonical_path() {
sync_clients() {
[ -d $DIR1 ] && cd $DIR1 && sync; sleep 1; sync
[ -d $DIR2 ] && cd $DIR2 && sync; sleep 1; sync
- cd $SAVE_PWD
+ cd $SAVE_PWD
}
check_grant() {
export base=`basetest $1`
[ "$CHECK_GRANT" == "no" ] && return 0
- testname=GCHECK_ONLY_${base}
+ testname=GCHECK_ONLY_${base}
[ ${!testname}x == x ] && return 0
echo -n "checking grant......"
- cd $SAVE_PWD
- # write some data to sync client lost_grant
- rm -f $DIR1/${tfile}_check_grant_* 2>&1
- for i in `seq $OSTCOUNT`; do
- $LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1
- dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \
- count=1 > /dev/null 2>&1
- done
- # sync all the data and make sure no pending data on server
- sync_clients
-
- #get client grant and server grant
- client_grant=0
+ cd $SAVE_PWD
+ # write some data to sync client lost_grant
+ rm -f $DIR1/${tfile}_check_grant_* 2>&1
+ for i in `seq $OSTCOUNT`; do
+ $LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1
+ dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \
+ count=1 > /dev/null 2>&1
+ done
+ # sync all the data and make sure no pending data on server
+ sync_clients
+
+ #get client grant and server grant
+ client_grant=0
for d in `lctl get_param -n osc.*.cur_grant_bytes`; do
- client_grant=$((client_grant + $d))
- done
- server_grant=0
- for d in `lctl get_param -n obdfilter.*.tot_granted`; do
- server_grant=$((server_grant + $d))
- done
-
- # cleanup the check_grant file
- for i in `seq $OSTCOUNT`; do
- rm $DIR1/${tfile}_check_grant_$i
- done
-
- #check whether client grant == server grant
- if [ $client_grant != $server_grant ]; then
- echo "failed: client:${client_grant} server: ${server_grant}"
- return 1
- else
- echo "pass"
- fi
+ client_grant=$((client_grant + $d))
+ done
+ server_grant=0
+ for d in `lctl get_param -n obdfilter.*.tot_granted`; do
+ server_grant=$((server_grant + $d))
+ done
+
+ # cleanup the check_grant file
+ for i in `seq $OSTCOUNT`; do
+ rm $DIR1/${tfile}_check_grant_$i
+ done
+
+ #check whether client grant == server grant
+ if [ $client_grant != $server_grant ]; then
+ echo "failed: client:${client_grant} server: ${server_grant}"
+ return 1
+ else
+ echo "pass"
+ fi
}
########################
remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
}
+require_dsh_mds()
+{
+ remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" && \
+ MSKIPPED=1 && return 1
+ return 0
+}
+
remote_ost ()
{
local node
remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
+require_dsh_ost()
+{
+ remote_ost_nodsh && echo "SKIP: $TESTSUITE: remote OST with nodsh" && \
+ OSKIPPED=1 && return 1
+ return 0
+}
+
remote_mgs_nodsh()
{
local MGS
remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
}
+local_mode ()
+{
+ remote_mds_nodsh || remote_ost_nodsh || \
+ $(single_local_node $(comma_list $(nodes_list)))
+}
+
mdts_nodes () {
local MDSNODES
local NODES_sort
rm -f $file
}
+setstripe_nfsserver () {
+ local dir=$1
+
+ local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
+ { print $1 }' /proc/mounts | cut -f 1 -d : | head -1)
+
+ [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1
+
+ do_node --verbose $nfsserver lfs setstripe "$@"
+}
+
check_runas_id_ret() {
local myRC=0
local myRUNAS_UID=$1
echo $IFree
}
+mdsrate_inodes_available () {
+ echo $(($(inodes_available) - 1))
+}
+
# reset llite stat counters
clear_llite_stats(){
lctl set_param -n llite.*.stats 0
# $2 file
# $3 $RUNAS
get_stripe_info() {
- local tmp_file
+ local tmp_file
- stripe_size=0
- stripe_count=0
- stripe_index=0
- tmp_file=$(mktemp)
+ stripe_size=0
+ stripe_count=0
+ stripe_index=0
+ tmp_file=$(mktemp)
- do_facet $1 $3 lfs getstripe -v $2 > $tmp_file
+ do_facet $1 $3 lfs getstripe -v $2 > $tmp_file
- stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file`
- stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file`
- stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file`
- rm -f $tmp_file
+ stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file`
+ stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file`
+ stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file`
+ rm -f $tmp_file
}
# CMD: determine mds index where directory inode presents
log "${ost_facet} now in ${CONN_STATE} state"
return 0
}
-
get_clientmdc_proc_path() {
echo "${1}-mdc-*"
}
local list=$1
shift
- do_nodes --verbose $list "PATH=$LUSTRE/tests/:$PATH sh rpc.sh $@ "
+ # Add paths to lustre tests for 32 and 64 bit systems.
+ local RPATH="$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH"
+ do_nodes --verbose $list "PATH=$RPATH sh rpc.sh $@ "
}
wait_clients_import_state () {
*) error "unknown facet!" ;;
esac
-
if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then
error "import is not in ${expected} state"
return 1
return $OSCFULL
}
+pool_list () {
+ do_facet mgs lctl pool_list $1
+}
+
+create_pool() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ do_facet mgs lctl pool_new $1
+ local RC=$?
+ # get param should return err unless pool is created
+ [[ $RC -ne 0 ]] && return $RC
+
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "" || RC=1
+ if [[ $RC -eq 0 ]]; then
+ add_pool_to_list $1
+ else
+ error "pool_new failed $1"
+ fi
+ return $RC
+}
+
+add_pool_to_list () {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ local listvar=${fsname}_CREATED_POOLS
+ eval export ${listvar}=$(expand_list ${!listvar} $poolname)
+}
+
+remove_pool_from_list () {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ local listvar=${fsname}_CREATED_POOLS
+ eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
+}
+
destroy_pool_int() {
local ost
local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \
do_facet mgs lctl pool_destroy $1
}
+# <fsname>.<poolname> or <poolname>
destroy_pool() {
+ local fsname=${1%%.*}
+ local poolname=${1##$fsname.}
+
+ [[ x$fsname = x$poolname ]] && fsname=$FSNAME
+
local RC
- do_facet $SINGLEMDS lctl pool_list $FSNAME.$1
- RC=$?
- [[ $RC -ne 0 ]] && return $RC
+ pool_list $fsname.$poolname || return $?
- destroy_pool_int $FSNAME.$1
+ destroy_pool_int $fsname.$poolname
RC=$?
[[ $RC -ne 0 ]] && return $RC
- wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$1 \
- 2>/dev/null || echo foo" "foo" && return 0
+ wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
+ 2>/dev/null || echo foo" "foo" || RC=1
+
+ if [[ $RC -eq 0 ]]; then
+ remove_pool_from_list $fsname.$poolname
+ else
+ error "destroy pool failed $1"
+ fi
+ return $RC
+}
+
+destroy_pools () {
+ local fsname=${1:-$FSNAME}
+ local poolname
+ local listvar=${fsname}_CREATED_POOLS
+
+ pool_list $fsname
+
+ [ x${!listvar} = x ] && return 0
+
+ echo destroy the created pools: ${!listvar}
+ for poolname in ${!listvar//,/ }; do
+ destroy_pool $fsname.$poolname
+ done
+}
+
+cleanup_pools () {
+ local fsname=${1:-$FSNAME}
+ trap 0
+ destroy_pools $fsname
}
gather_logs () {
# of writing the file to an NFS directory so it doesn't need to be copied.
local tmp=$TMP
local docp=true
- [ -d "$SHARED_DIR_LOGS" ] && tmp=$SHARED_DIR_LOGS && docp=false
-
+ [ -f $LOGDIR/shared ] && docp=false
+
# dump lustre logs, dmesg
- do_nodes $list "log=$tmp/\\\$(hostname)-debug-$ts.log ;
-lctl dk \\\$log >/dev/null;
-log=$tmp/\\\$(hostname)-dmesg-$ts.log;
-dmesg > \\\$log; "
-
- # FIXME: does it make sense to collect the logs for $ts only, but all
- # TESTSUITE logs?
- # rsync $TMP/*${TESTSUITE}* to gather the logs dumped by error fn
- local logs=$TMP/'*'${TESTSUITE}'*'
- if $docp; then
- logs=$logs' '$tmp/'*'$ts'*'
- fi
- for node in ${list//,/ }; do
- rsync -az $node:"$logs" $TMP
- done
- local archive=$TMP/${TESTSUITE}-$ts.tar.bz2
- tar -jcf $archive $tmp/*$ts* $TMP/*${TESTSUITE}*
+ prefix="$LOGDIR/${TESTSUITE}.${TESTNAME}"
+ suffix="$ts.log"
+ echo "Dumping lctl log to ${prefix}.*.${suffix}"
+
+ if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then
+ echo "Dumping logs only on local client."
+ $LCTL dk > ${prefix}.debug_log.$(hostname).${suffix}
+ dmesg > ${prefix}.dmesg.$(hostname).${suffix}
+ return
+ fi
+
+ do_nodes --verbose $list \
+ "$LCTL dk > ${prefix}.debug_log.\\\$(hostname).${suffix};
+ dmesg > ${prefix}.dmesg.\\\$(hostname).${suffix}"
+ if [ ! -f $LOGDIR/shared ]; then
+ do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR
+ fi
+
+ local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2
+ tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}*
echo $archive
}
return $rc
}
+get_clients_mount_count () {
+ local clients=${CLIENTS:-`hostname`}
+
+ # we need to take into account the clients mounts and
+ # exclude mds/ost mounts if any;
+ do_nodes $clients cat /proc/mounts | grep lustre | grep $MOUNT | wc -l
+}
+
+# gss functions
+PROC_CLI="srpc_info"
+
+combination()
+{
+ local M=$1
+ local N=$2
+ local R=1
+
+ if [ $M -lt $N ]; then
+ R=0
+ else
+ N=$((N + 1))
+ while [ $N -le $M ]; do
+ R=$((R * N))
+ N=$((N + 1))
+ done
+ fi
+
+ echo $R
+ return 0
+}
+
+calc_connection_cnt() {
+ local dir=$1
+
+ # MDT->MDT = 2 * C(M, 2)
+ # MDT->OST = M * O
+ # CLI->OST = C * O
+ # CLI->MDT = C * M
+ comb_m2=$(combination $MDSCOUNT 2)
+
+ local num_clients=$(get_clients_mount_count)
+
+ local cnt_mdt2mdt=$((comb_m2 * 2))
+ local cnt_mdt2ost=$((MDSCOUNT * OSTCOUNT))
+ local cnt_cli2ost=$((num_clients * OSTCOUNT))
+ local cnt_cli2mdt=$((num_clients * MDSCOUNT))
+ local cnt_all2ost=$((cnt_mdt2ost + cnt_cli2ost))
+ local cnt_all2mdt=$((cnt_mdt2mdt + cnt_cli2mdt))
+ local cnt_all2all=$((cnt_mdt2ost + cnt_mdt2mdt + cnt_cli2ost + cnt_cli2mdt))
+
+ local var=cnt_$dir
+ local res=${!var}
+
+ echo $res
+}
+
+set_rule()
+{
+ local tgt=$1
+ local net=$2
+ local dir=$3
+ local flavor=$4
+ local cmd="$tgt.srpc.flavor"
+
+ if [ $net == "any" ]; then
+ net="default"
+ fi
+ cmd="$cmd.$net"
+
+ if [ $dir != "any" ]; then
+ cmd="$cmd.$dir"
+ fi
+
+ cmd="$cmd=$flavor"
+ log "Setting sptlrpc rule: $cmd"
+ do_facet mgs "$LCTL conf_param $cmd"
+}
+
+count_flvr()
+{
+ local output=$1
+ local flavor=$2
+ local count=0
+
+ rpc_flvr=`echo $flavor | awk -F - '{ print $1 }'`
+ bulkspec=`echo $flavor | awk -F - '{ print $2 }'`
+
+ count=`echo "$output" | grep "rpc flavor" | grep $rpc_flvr | wc -l`
+
+ if [ "x$bulkspec" != "x" ]; then
+ algs=`echo $bulkspec | awk -F : '{ print $2 }'`
+
+ if [ "x$algs" != "x" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" | grep $algs | wc -l`
+ else
+ bulk=`echo $bulkspec | awk -F : '{ print $1 }'`
+ if [ $bulk == "bulkn" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "null/null" | wc -l`
+ elif [ $bulk == "bulki" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "/null" | grep -v "null/" | wc -l`
+ else
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep -v "/null" | grep -v "null/" | wc -l`
+ fi
+ fi
+
+ [ $bulk_count -lt $count ] && count=$bulk_count
+ fi
+
+ echo $count
+}
+
+flvr_cnt_cli2mdt()
+{
+ local flavor=$1
+ local cnt
+
+ local clients=${CLIENTS:-`hostname`}
+
+ for c in ${clients//,/ }; do
+ output=`do_node $c lctl get_param -n mdc.*-MDT*-mdc-*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt
+}
+
+flvr_cnt_cli2ost()
+{
+ local flavor=$1
+ local cnt
+
+ local clients=${CLIENTS:-`hostname`}
+
+ for c in ${clients//,/ }; do
+ output=`do_node $c lctl get_param -n osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt
+}
+
+flvr_cnt_mdt2mdt()
+{
+ local flavor=$1
+ local cnt=0
+
+ if [ $MDSCOUNT -le 1 ]; then
+ echo 0
+ return
+ fi
+
+ for num in `seq $MDSCOUNT`; do
+ output=`do_facet mds$num lctl get_param -n mdc.*-MDT*-mdc[0-9]*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt;
+}
+
+flvr_cnt_mdt2ost()
+{
+ local flavor=$1
+ local cnt=0
+
+ for num in `seq $MDSCOUNT`; do
+ output=`do_facet mds$num lctl get_param -n osc.*OST*-osc-MDT*.$PROC_CLI 2>/dev/null`
+ tmpcnt=`count_flvr "$output" $flavor`
+ cnt=$((cnt + tmpcnt))
+ done
+ echo $cnt;
+}
+
+flvr_cnt_mgc2mgs()
+{
+ local flavor=$1
+
+ output=`do_facet client lctl get_param -n mgc.*.$PROC_CLI 2>/dev/null`
+ count_flvr "$output" $flavor
+}
+
+do_check_flavor()
+{
+ local dir=$1 # from to
+ local flavor=$2 # flavor expected
+ local res=0
+
+ if [ $dir == "cli2mdt" ]; then
+ res=`flvr_cnt_cli2mdt $flavor`
+ elif [ $dir == "cli2ost" ]; then
+ res=`flvr_cnt_cli2ost $flavor`
+ elif [ $dir == "mdt2mdt" ]; then
+ res=`flvr_cnt_mdt2mdt $flavor`
+ elif [ $dir == "mdt2ost" ]; then
+ res=`flvr_cnt_mdt2ost $flavor`
+ elif [ $dir == "all2ost" ]; then
+ res1=`flvr_cnt_mdt2ost $flavor`
+ res2=`flvr_cnt_cli2ost $flavor`
+ res=$((res1 + res2))
+ elif [ $dir == "all2mdt" ]; then
+ res1=`flvr_cnt_mdt2mdt $flavor`
+ res2=`flvr_cnt_cli2mdt $flavor`
+ res=$((res1 + res2))
+ elif [ $dir == "all2all" ]; then
+ res1=`flvr_cnt_mdt2ost $flavor`
+ res2=`flvr_cnt_cli2ost $flavor`
+ res3=`flvr_cnt_mdt2mdt $flavor`
+ res4=`flvr_cnt_cli2mdt $flavor`
+ res=$((res1 + res2 + res3 + res4))
+ fi
+
+ echo $res
+}
+
+wait_flavor()
+{
+ local dir=$1 # from to
+ local flavor=$2 # flavor expected
+ local expect=${3:-$(calc_connection_cnt $dir)} # number expected
+
+ local res=0
+
+ for ((i=0;i<20;i++)); do
+ echo -n "checking..."
+ res=$(do_check_flavor $dir $flavor)
+ if [ $res -eq $expect ]; then
+ echo "found $res $flavor connections of $dir, OK"
+ return 0
+ else
+ echo "found $res $flavor connections of $dir, not ready ($expect)"
+ sleep 4
+ fi
+ done
+
+ echo "Error checking $flavor of $dir: expect $expect, actual $res"
+ return 1
+}
+
+restore_to_default_flavor()
+{
+ local proc="mgs.MGS.live.$FSNAME"
+
+ echo "restoring to default flavor..."
+
+ nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l`
+
+ # remove all existing rules if any
+ if [ $nrule -ne 0 ]; then
+ echo "$nrule existing rules"
+ for rule in `do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor."`; do
+ echo "remove rule: $rule"
+ spec=`echo $rule | awk -F = '{print $1}'`
+ do_facet mgs "$LCTL conf_param $spec="
+ done
+ fi
+
+ # verify no rules left
+ nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l`
+ [ $nrule -ne 0 ] && error "still $nrule rules left"
+
+ # wait for default flavor to be applied
+ # currently default flavor for all connections are 'null'
+ wait_flavor all2all null
+ echo "now at default flavor settings"
+}
+
+set_flavor_all()
+{
+ local flavor=${1:-null}
+
+ echo "setting all flavor to $flavor"
+
+ # FIXME need parameter to this fn
+ # and remove global vars
+ local cnt_all2all=$(calc_connection_cnt all2all)
+
+ local res=$(do_check_flavor all2all $flavor)
+ if [ $res -eq $cnt_all2all ]; then
+ echo "already have total $res $flavor connections"
+ return
+ fi
+
+ echo "found $res $flavor out of total $cnt_all2all connections"
+ restore_to_default_flavor
+
+ [[ $flavor = null ]] && return 0
+
+ set_rule $FSNAME any any $flavor
+ wait_flavor all2all $flavor
+}
+
+
+check_logdir() {
+ local dir=$1
+ # Checking for shared logdir
+ if [ ! -d $dir ]; then
+ # Not found. Create local logdir
+ mkdir -p $dir
+ else
+ touch $dir/node.$(hostname).yml
+ fi
+ return 0
+}
+
+check_write_access() {
+ local dir=$1
+ for node in $(nodes_list); do
+ if [ ! -f "$dir/node.${node}.yml" ]; then
+ # Logdir not accessible/writable from this node.
+ return 1
+ fi
+ done
+ return 0
+}
+
+init_logging() {
+ if [[ -n $YAML_LOG ]]; then
+ return
+ fi
+ export YAML_LOG=${LOGDIR}/results.yml
+ mkdir -p $LOGDIR
+ init_clients_lists
+
+ do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $LOGDIR
+ if check_write_access $LOGDIR; then
+ touch $LOGDIR/shared
+ echo "Logging to shared log directory: $LOGDIR"
+ else
+ echo "Logging to local directory: $LOGDIR"
+ fi
+
+ yml_nodes_file $LOGDIR
+ yml_results_file >> $YAML_LOG
+}
+
+log_test() {
+ yml_log_test $1 >> $YAML_LOG
+}
+
+log_sub_test() {
+ yml_log_sub_test $@ >> $YAML_LOG
+}
+