#!/bin/bash
-# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
-# vim:shiftwidth=4:softtabstop=4:tabstop=4:
# Simple function used by run_*.sh scripts
}
mpi_run () {
- local mpirun="$MPIRUN $MPIRUN_OPTIONS"
+ local mpirun="$MPIRUN $MPIRUN_OPTIONS --oversubscribe"
local command="$mpirun $@"
local mpilog=$TMP/mpi.log
local rc
}
nids_list () {
- local list
- for i in ${1//,/ }; do
- list="$list $i@$NETTYPE"
- done
- echo $list
+ local list
+ local escape="$2"
+ for i in ${1//,/ }; do
+ if [ "$list" = "" ]; then
+ list="$i@$NETTYPE"
+ else
+ list="$list$escape $i@$NETTYPE"
+ fi
+ done
+ echo $list
}
# FIXME: all setup/cleanup can be done without rpc.sh
export LST_SESSION=`$LST show_session 2>/dev/null | awk -F " " '{print $5}'`
[ "$LST_SESSION" == "" ] && return
+ $LST stop b
if $verbose; then
$LST show_error c s
fi
- $LST stop b
$LST end_session
}
}
run_compilebench() {
- # Space estimation:
- # compile dir kernel-0 ~1GB
- # required space ~1GB * cbench_IDIRS
+ local dir=${1:-$DIR}
+ local cbench_DIR=${cbench_DIR:-""}
+ local cbench_IDIRS=${cbench_IDIRS:-2}
+ local cbench_RUNS=${cbench_RUNS:-2}
- cbench_DIR=${cbench_DIR:-""}
- cbench_IDIRS=${cbench_IDIRS:-2}
- cbench_RUNS=${cbench_RUNS:-2}
+ print_opts cbench_DIR cbench_IDIRS cbench_RUNS
- print_opts cbench_DIR cbench_IDIRS cbench_RUNS
+ [ x$cbench_DIR = x ] &&
+ skip_env "compilebench not found"
- [ x$cbench_DIR = x ] &&
- { skip_env "compilebench not found" && return; }
+ [ -e $cbench_DIR/compilebench ] ||
+ skip_env "No compilebench build"
- [ -e $cbench_DIR/compilebench ] || \
- { skip_env "No compilebench build" && return; }
-
- local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
+ # Space estimation:
+ # compile dir kernel-0 ~1GB
+ # required space ~1GB * cbench_IDIRS
+ local space=$(df -P $dir | tail -n 1 | awk '{ print $4 }')
if [[ $space -le $((1024 * 1024 * cbench_IDIRS)) ]]; then
cbench_IDIRS=$((space / 1024 / 1024))
[[ $cbench_IDIRS -eq 0 ]] &&
- skip_env "Need free space at least 1GB, have $space" &&
- return
+ skip_env "Need free space at least 1GB, have $space"
- echo "free space=$space, reducing initial dirs to $cbench_IDIRS"
+ echo "reducing initial dirs to $cbench_IDIRS"
fi
+ echo "free space = $space KB"
- # FIXME:
- # t-f _base needs to be modifyed to set properly tdir
- # for new "test_foo" functions names
- # local testdir=$DIR/$tdir
- local testdir=$DIR/d0.compilebench
- mkdir -p $testdir
+ # FIXME:
+ # t-f _base needs to be modifyed to set properly tdir
+ # for new "test_foo" functions names
+ # local testdir=$DIR/$tdir
+ local testdir=$dir/d0.compilebench.$$
+ mkdir -p $testdir
local savePWD=$PWD
cd $cbench_DIR
}
run_metabench() {
-
- METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
- mbench_NFILES=${mbench_NFILES:-30400}
- # threads per client
- mbench_THREADS=${mbench_THREADS:-4}
+ local dir=${1:-$DIR}
+ local mntpt=${2:-$MOUNT}
+ METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
+ mbench_NFILES=${mbench_NFILES:-30400}
+ # threads per client
+ mbench_THREADS=${mbench_THREADS:-4}
mbench_OPTIONS=${mbench_OPTIONS:-}
+ mbench_CLEANUP=${mbench_CLEANUP:-true}
- [ x$METABENCH = x ] &&
- { skip_env "metabench not found" && return; }
+ [ x$METABENCH = x ] && skip_env "metabench not found"
- # FIXME
- # Need space estimation here.
+ print_opts METABENCH clients mbench_NFILES mbench_THREADS
- print_opts METABENCH clients mbench_NFILES mbench_THREADS
-
- local testdir=$DIR/d0.metabench
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
+ local testdir=$dir/d0.metabench
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- # -C Run the file creation tests.
- # -S Run the file stat tests.
- # -c nfile Number of files to be used in each test.
- # -k Cleanup. Remove the test directories.
- local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S -k $mbench_OPTIONS"
- echo "+ $cmd"
+ # -C Run the file creation tests. Creates zero byte files.
+ # -S Run the file stat tests.
+ # -c nfile Number of files to be used in each test.
+ # -k Cleanup files when finished.
+ local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S $mbench_OPTIONS"
+ echo "+ $cmd"
# find out if we need to use srun by checking $SRUN_PARTITION
if [ "$SRUN_PARTITION" ]; then
-np $((num_clients * $mbench_THREADS)) $cmd
fi
- local rc=$?
- if [ $rc != 0 ] ; then
- error "metabench failed! $rc"
- fi
- rm -rf $testdir
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "metabench failed! $rc"
+ fi
+
+ if $mbench_CLEANUP; then
+ rm -rf $testdir
+ else
+ mv $dir/d0.metabench $mntpt/_xxx.$(date +%s).d0.metabench
+ fi
}
run_simul() {
+ SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
+ [ x$SIMUL = x ] && skip_env "simul not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
- SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
- # threads per client
- simul_THREADS=${simul_THREADS:-2}
- simul_REP=${simul_REP:-20}
+ # threads per client
+ simul_THREADS=${simul_THREADS:-2}
+ simul_REP=${simul_REP:-20}
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
+ # FIXME
+ # Need space estimation here.
- [ x$SIMUL = x ] &&
- { skip_env "simul not found" && return; }
+ print_opts SIMUL clients simul_REP simul_THREADS
- # FIXME
- # Need space estimation here.
+ local testdir=$DIR/d0.simul
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- print_opts SIMUL clients simul_REP simul_THREADS
+ # -n # : repeat each test # times
+ # -N # : repeat the entire set of tests # times
- local testdir=$DIR/d0.simul
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
-
- # -n # : repeat each test # times
- # -N # : repeat the entire set of tests # times
-
- local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
+ local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
echo "+ $cmd"
# find out if we need to use srun by checking $SRUN_PARTITION
}
run_mdtest() {
+ MDTEST=${MDTEST:=$(which mdtest 2> /dev/null || true)}
+ [ x$MDTEST = x ] && skip_env "mdtest not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
- MDTEST=${MDTEST:=$(which mdtest 2> /dev/null || true)}
- # threads per client
- mdtest_THREADS=${mdtest_THREADS:-2}
- mdtest_nFiles=${mdtest_nFiles:-"100000"}
- # We devide the files by number of core
- mdtest_nFiles=$((mdtest_nFiles/mdtest_THREADS/num_clients))
- mdtest_iteration=${mdtest_iteration:-1}
+ # threads per client
+ mdtest_THREADS=${mdtest_THREADS:-2}
+ mdtest_nFiles=${mdtest_nFiles:-"100000"}
+ # We devide the files by number of core
+ mdtest_nFiles=$((mdtest_nFiles/mdtest_THREADS/num_clients))
+ mdtest_iteration=${mdtest_iteration:-1}
+ local mdtest_custom_params=${mdtest_custom_params:-""}
+ local type=${1:-"ssf"}
- local type=${1:-"ssf"}
+ # FIXME
+ # Need space estimation here.
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
+ print_opts MDTEST mdtest_iteration mdtest_THREADS mdtest_nFiles
- [ x$MDTEST = x ] &&
- { skip_env "mdtest not found" && return; }
+ local testdir=$DIR/d0.mdtest
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- # FIXME
- # Need space estimation here.
+ # -i # : repeat each test # times
+ # -d : test dir
+ # -n # : number of file/dir to create/stat/remove
+ # -u : each process create/stat/remove individually
- print_opts MDTEST mdtest_iteration mdtest_THREADS mdtest_nFiles
+ local cmd="$MDTEST -d $testdir -i $mdtest_iteration \
+ -n $mdtest_nFiles $mdtest_custom_params"
- local testdir=$DIR/d0.mdtest
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
-
- # -i # : repeat each test # times
- # -d : test dir
- # -n # : number of file/dir to create/stat/remove
- # -u : each process create/stat/remove individually
-
- local cmd="$MDTEST -d $testdir -i $mdtest_iteration -n $mdtest_nFiles"
- [ $type = "fpp" ] && cmd="$cmd -u"
+ [ $type = "fpp" ] && cmd="$cmd -u"
echo "+ $cmd"
# find out if we need to use srun by checking $SRUN_PARTITION
}
run_connectathon() {
+ local dir=${1:-$DIR}
+ cnt_DIR=${cnt_DIR:-""}
+ cnt_NRUN=${cnt_NRUN:-10}
- cnt_DIR=${cnt_DIR:-""}
- cnt_NRUN=${cnt_NRUN:-10}
+ print_opts cnt_DIR cnt_NRUN
- print_opts cnt_DIR cnt_NRUN
+ [ x$cnt_DIR = x ] && skip_env "connectathon dir not found"
+ [ -e $cnt_DIR/runtests ] || skip_env "No connectathon runtests found"
- [ x$cnt_DIR = x ] &&
- { skip_env "connectathon dir not found" && return; }
+ # Space estimation:
+ # "special" tests create a 30 MB file + misc. small files
+ # required space ~40 MB
+ local space=$(df -P $dir | tail -n 1 | awk '{ print $4 }')
+ if [[ $space -le $((1024 * 40)) ]]; then
+ skip_env "Need free space at least 40MB, have $space KB"
+ fi
+ echo "free space = $space KB"
+
+ local testdir=$dir/d0.connectathon
+ mkdir -p $testdir
+
+ local savePWD=$PWD
+ cd $cnt_DIR
+
+ #
+ # To run connectathon:
+ # runtests [-a|-b|-g|-s|-l] [-f|-n|-t] [-N numpasses] [test-directory]
+ #
+ # One of the following test types
+ # -b basic
+ # -g general
+ # -s special
+ # -l lock
+ # -a all of the above
+ #
+ # -f a quick functional test
+ # -n suppress directory operations (mkdir and rmdir)
+ # -t run with time statistics (default for basic tests)
+ #
+ # -N numpasses - specifies the number of times to run
+ # the tests. Optional.
+
+ tests="-b -g -s"
+ # Include lock tests unless we're running on nfsv4
+ local fstype=$(df -TP $testdir | awk 'NR==2 {print $2}')
+ echo "$testdir: $fstype"
+ if [[ $fstype != "nfs4" ]]; then
+ tests="$tests -l"
+ fi
+ echo "tests: $tests"
+ for test in $tests; do
+ local cmd="sh ./runtests -N $cnt_NRUN $test -f $testdir"
+ local rc=0
+
+ log "$cmd"
+ eval $cmd
+ rc=$?
+ [ $rc = 0 ] || error "connectathon failed: $rc"
+ done
- [ -e $cnt_DIR/runtests ] || \
- { skip_env "No connectathon runtests found" && return; }
+ cd $savePWD
+ rm -rf $testdir
+}
- local testdir=$DIR/d0.connectathon
- mkdir -p $testdir
+run_ior() {
+ local type=${1:="ssf"}
+ local dir=${2:-$DIR}
+ local testdir=$dir/d0.ior.$type
+ local nfs_srvmntpt=$3
+
+ if [ "$NFSCLIENT" ]; then
+ [[ -n $nfs_srvmntpt ]] ||
+ { error "NFSCLIENT mode, but nfs exported dir"\
+ "is not set!" && return 1; }
+ fi
- local savePWD=$PWD
- cd $cnt_DIR
-
- #
- # cthon options (must be in this order)
- #
- # -N numpasses - will be passed to the runtests script. This argument
- # is optional. It specifies the number of times to run
- # through the tests.
- #
- # One of these test types
- # -b basic
- # -g general
- # -s special
- # -l lock
- # -a all of the above
- #
- # -f a quick functionality test
- #
-
- tests="-b -g -s"
- # Include lock tests unless we're running on nfsv4
- local fstype=$(df -TP $testdir | awk 'NR==2 {print $2}')
- echo "$testdir: $fstype"
- if [[ $fstype != "nfs4" ]]; then
- tests="$tests -l"
- fi
- echo "tests: $tests"
- for test in $tests; do
- local cmd="./runtests -N $cnt_NRUN $test -f $testdir"
- local rc=0
-
- log "$cmd"
- eval $cmd
- rc=$?
- [ $rc = 0 ] || error "connectathon failed: $rc"
- done
+ IOR=${IOR:-$(which IOR 2> /dev/null || true)}
+ [ x$IOR = x ] && skip_env "IOR not found"
+
+ # threads per client
+ ior_THREADS=${ior_THREADS:-2}
+ ior_iteration=${ior_iteration:-1}
+ ior_blockSize=${ior_blockSize:-6}
+ ior_blockUnit=${ior_blockUnit:-M} # K, M, G
+ ior_xferSize=${ior_xferSize:-1M}
+ ior_type=${ior_type:-POSIX}
+ ior_DURATION=${ior_DURATION:-30} # minutes
+ local multiplier=1
+ case ${ior_blockUnit} in
+ [G])
+ multiplier=$((1024 * 1024 * 1024))
+ ;;
+ [M])
+ multiplier=$((1024 * 1024))
+ ;;
+ [K])
+ multiplier=1024
+ ;;
+ *) error "Incorrect block unit should be one of [KMG]"
+ ;;
+ esac
+
+ # calculate the space in bytes
+ local space=$(df -B 1 -P $dir | tail -n 1 | awk '{ print $4 }')
+ local total_threads=$((num_clients * ior_THREADS))
+ echo "+ $ior_blockSize * $multiplier * $total_threads "
+ if [ $((space / 2)) -le \
+ $((ior_blockSize * multiplier * total_threads)) ]; then
+ ior_blockSize=$((space / 2 / multiplier / total_threads))
+ [ $ior_blockSize -eq 0 ] &&
+ skip_env "Need free space more than $((2 * total_threads)) \
+ ${ior_blockUnit}: have $((space / multiplier))"
+
+ echo "(reduced blockSize to $ior_blockSize \
+ ${ior_blockUnit} bytes)"
+ fi
- cd $savePWD
- rm -rf $testdir
-}
+ print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
-run_ior() {
- local type=${1:="ssf"}
-
- IOR=${IOR:-$(which IOR 2> /dev/null || true)}
- # threads per client
- ior_THREADS=${ior_THREADS:-2}
- ior_iteration=${ior_iteration:-1}
- ior_blockSize=${ior_blockSize:-6} # GB
- ior_xferSize=${ior_xferSize:-2m}
- ior_type=${ior_type:-POSIX}
- ior_DURATION=${ior_DURATION:-30} # minutes
-
- [ x$IOR = x ] &&
- { skip_env "IOR not found" && return; }
-
- local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
- local total_threads=$(( num_clients * ior_THREADS ))
- echo "+ $ior_blockSize * 1024 * 1024 * $total_threads "
- if [ $((space / 2)) -le \
- $(( ior_blockSize * 1024 * 1024 * total_threads)) ]; then
- echo "+ $space * 9/10 / 1024 / 1024 / $num_clients / $ior_THREADS"
- ior_blockSize=$(( space /2 /1024 /1024 / num_clients / ior_THREADS ))
- [ $ior_blockSize = 0 ] && \
- skip_env "Need free space more than $((2 * total_threads))GB: \
- $((total_threads *1024 *1024*2)), have $space" && return
-
- local reduced_size="$num_clients x $ior_THREADS x $ior_blockSize"
- echo "free space=$space, Need: $reduced_size GB"
- echo "(blockSize reduced to $ior_blockSize Gb)"
- fi
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+ if [ -z "$NFSCLIENT" ]; then
+ ior_stripe_params=${ior_stripe_params:-"-c -1"}
+ $LFS setstripe $testdir $ior_stripe_params ||
+ { error "setstripe failed" && return 2; }
+ fi
- print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
+ #
+ # -b N blockSize --
+ # contiguous bytes to write per task (e.g.: 8, 4K, 2M, 1G)"
+ # -o S testFileName
+ # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4K, 2M, 1G)"
+ # -w writeFile -- write file"
+ # -r readFile -- read existing file"
+ # -W checkWrite -- check read after write"
+ # -C reorderTasks -- changes task ordering to n+1 ordering for readback
+ # -T maxTimeDuration -- max time in minutes to run tests"
+ # -k keepFile -- keep testFile(s) on program exit
+
+ local cmd
+ if [ -n "$ior_custom_params" ]; then
+ cmd="$IOR $ior_custom_params -o $testdir/iorData"
+ else
+ cmd="$IOR -a $ior_type -b ${ior_blockSize}${ior_blockUnit} \
+ -o $testdir/iorData -t $ior_xferSize -v -C -w -r -W \
+ -i $ior_iteration -T $ior_DURATION -k"
+ fi
- local testdir=$DIR/d0.ior.$type
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
- if [ "$NFSCLIENT" ]; then
- setstripe_nfsserver $testdir -c -1 ||
- { error "setstripe on nfsserver failed" && return 1; }
- else
- $LFS setstripe $testdir -c -1 ||
- { error "setstripe failed" && return 2; }
- fi
- #
- # -b N blockSize --
- # contiguous bytes to write per task (e.g.: 8, 4k, 2m, 1g)"
- # -o S testFileName
- # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)"
- # -w writeFile -- write file"
- # -r readFile -- read existing file"
- # -W checkWrite -- check read after write"
- # -C reorderTasks -- changes task ordering to n+1 ordering for readback
- # -T maxTimeDuration -- max time in minutes to run tests"
- # -k keepFile -- keep testFile(s) on program exit
-
- local cmd="$IOR -a $ior_type -b ${ior_blockSize}g -o $testdir/iorData \
- -t $ior_xferSize -v -C -w -r -W -i $ior_iteration -T $ior_DURATION -k"
- [ $type = "fpp" ] && cmd="$cmd -F"
+ [ $type = "fpp" ] && cmd="$cmd -F"
echo "+ $cmd"
# find out if we need to use srun by checking $SRUN_PARTITION
-n $((num_clients * ior_THREADS)) -p $SRUN_PARTITION \
-- $cmd
else
+ mpi_ior_custom_threads=${mpi_ior_custom_threads:-"$((num_clients * ior_THREADS))"}
mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
- -np $((num_clients * $ior_THREADS)) $cmd
+ -np $mpi_ior_custom_threads $cmd
fi
local rc=$?
}
run_mib() {
-
- MIB=${MIB:=$(which mib 2> /dev/null || true)}
- # threads per client
- mib_THREADS=${mib_THREADS:-2}
- mib_xferSize=${mib_xferSize:-1m}
- mib_xferLimit=${mib_xferLimit:-5000}
- mib_timeLimit=${mib_timeLimit:-300}
-
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
-
- [ x$MIB = x ] &&
- { skip_env "MIB not found" && return; }
-
- print_opts MIB mib_THREADS mib_xferSize mib_xferLimit mib_timeLimit \
- MACHINEFILE
-
- local testdir=$DIR/d0.mib
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
- $LFS setstripe $testdir -c -1 ||
- { error "setstripe failed" && return 2; }
- #
- # -I Show intermediate values in output
- # -H Show headers in output
- # -L Do not issue new system calls after this many seconds
- # -s Use system calls of this size
- # -t test dir
- # -l Issue no more than this many system calls
- local cmd="$MIB -t $testdir -s $mib_xferSize -l $mib_xferLimit \
- -L $mib_timeLimit -HI -p mib.$(date +%Y%m%d%H%M%S)"
+ MIB=${MIB:=$(which mib 2> /dev/null || true)}
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+ [ x$MIB = x ] && skip_env "MIB not found"
+
+ # threads per client
+ mib_THREADS=${mib_THREADS:-2}
+ mib_xferSize=${mib_xferSize:-1m}
+ mib_xferLimit=${mib_xferLimit:-5000}
+ mib_timeLimit=${mib_timeLimit:-300}
+
+ print_opts MIB mib_THREADS mib_xferSize mib_xferLimit mib_timeLimit \
+ MACHINEFILE
+
+ local testdir=$DIR/d0.mib
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+ $LFS setstripe $testdir -c -1 ||
+ error "setstripe failed"
+ #
+ # -I Show intermediate values in output
+ # -H Show headers in output
+ # -L Do not issue new system calls after this many seconds
+ # -s Use system calls of this size
+ # -t test dir
+ # -l Issue no more than this many system calls
+ local cmd="$MIB -t $testdir -s $mib_xferSize -l $mib_xferLimit \
+ -L $mib_timeLimit -HI -p mib.$(date +%Y%m%d%H%M%S)"
echo "+ $cmd"
# find out if we need to use srun by checking $SRUN_PARTITION
}
run_cascading_rw() {
+ CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
+ [ x$CASC_RW = x ] && skip_env "cascading_rw not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
- CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
- # threads per client
- casc_THREADS=${casc_THREADS:-2}
- casc_REP=${casc_REP:-300}
+ # threads per client
+ casc_THREADS=${casc_THREADS:-2}
+ casc_REP=${casc_REP:-300}
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
+ # FIXME
+ # Need space estimation here.
- [ x$CASC_RW = x ] &&
- { skip_env "cascading_rw not found" && return; }
+ print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
- # FIXME
- # Need space estimation here.
+ local testdir=$DIR/d0.cascading_rw
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
+ # -g: debug mode
+ # -n: repeat test # times
- local testdir=$DIR/d0.cascading_rw
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
-
- # -g: debug mode
- # -n: repeat test # times
-
- local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
+ local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
echo "+ $cmd"
mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
}
run_write_append_truncate() {
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+ # location is lustre/tests dir
+ if ! which write_append_truncate > /dev/null 2>&1 ; then
+ skip_env "write_append_truncate not found"
+ fi
- # threads per client
- write_THREADS=${write_THREADS:-8}
- write_REP=${write_REP:-10000}
+ # threads per client
+ write_THREADS=${write_THREADS:-8}
+ write_REP=${write_REP:-10000}
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
+ # FIXME
+ # Need space estimation here.
- # location is lustre/tests dir
- if ! which write_append_truncate > /dev/null 2>&1 ; then
- skip_env "write_append_truncate not found"
- return
- fi
+ local testdir=$DIR/d0.write_append_truncate
+ local file=$testdir/f0.wat
- # FIXME
- # Need space estimation here.
+ print_opts clients write_REP write_THREADS MACHINEFILE
- local testdir=$DIR/d0.write_append_truncate
- local file=$testdir/f0.wat
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- print_opts clients write_REP write_THREADS MACHINEFILE
-
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
-
- local cmd="write_append_truncate -n $write_REP $file"
+ local cmd="write_append_truncate -n $write_REP $file"
echo "+ $cmd"
mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
}
run_write_disjoint() {
+ WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null ||
+ true)}
+ [ x$WRITE_DISJOINT = x ] && skip_env "write_disjoint not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
- WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint \
- 2> /dev/null || true)}
- # threads per client
- wdisjoint_THREADS=${wdisjoint_THREADS:-4}
- wdisjoint_REP=${wdisjoint_REP:-10000}
-
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
-
- [ x$WRITE_DISJOINT = x ] &&
- { skip_env "write_disjoint not found" && return; }
+ # threads per client
+ wdisjoint_THREADS=${wdisjoint_THREADS:-4}
+ wdisjoint_REP=${wdisjoint_REP:-10000}
+ chunk_size_limit=$1
# FIXME
# Need space estimation here.
# mpi_run uses mpiuser
chmod 0777 $testdir
- local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
+ local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP -m \
+ $chunk_size_limit"
echo "+ $cmd"
mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
}
run_parallel_grouplock() {
+ PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock \
+ 2> /dev/null || true)}
- PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock \
- 2> /dev/null || true)}
- parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
+ [ x$PARALLEL_GROUPLOCK = x ] && skip "PARALLEL_GROUPLOCK not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
- if [ "$NFSCLIENT" ]; then
- skip "skipped for NFSCLIENT mode"
- return
- fi
+ parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
- [ x$PARALLEL_GROUPLOCK = x ] &&
- { skip "PARALLEL_GROUPLOCK not found" && return; }
+ print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
- print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
+ local testdir=$DIR/d0.parallel_grouplock
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
- local testdir=$DIR/d0.parallel_grouplock
- mkdir -p $testdir
- # mpi_run uses mpiuser
- chmod 0777 $testdir
-
- local cmd
- local status=0
- local subtest
+ local cmd
+ local status=0
+ local subtest
for i in $(seq 12); do
subtest="-t $i"
local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
}
run_statahead () {
+ if [[ -n $NFSCLIENT ]]; then
+ skip "Statahead testing is not supported on NFS clients."
+ fi
+ [ x$MDSRATE = x ] && skip_env "mdsrate not found"
- statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
- statahead_NUMFILES=${statahead_NUMFILES:-500000}
-
- if [[ -n $NFSCLIENT ]]; then
- skip "Statahead testing is not supported on NFS clients."
- return 0
- fi
-
- [ x$MDSRATE = x ] &&
- { skip_env "mdsrate not found" && return; }
+ statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
+ statahead_NUMFILES=${statahead_NUMFILES:-500000}
- print_opts MDSRATE clients statahead_NUMMNTPTS statahead_NUMFILES
+ print_opts MDSRATE clients statahead_NUMMNTPTS statahead_NUMFILES
- # create large dir
+ # create large dir
# do not use default "d[0-9]*" dir name
# to avoid of rm $statahead_NUMFILES (500k) files in t-f cleanup
rm -rf $testdir
cleanup_statahead $clients $mntpt_root $num_mntpts
}
+
+cleanup_rr_alloc () {
+ trap 0
+ local clients="$1"
+ local mntpt_root="$2"
+ local rr_alloc_MNTPTS="$3"
+ local mntpt_dir=$(dirname ${mntpt_root})
+
+ for i in $(seq 0 $((rr_alloc_MNTPTS - 1))); do
+ zconf_umount_clients $clients ${mntpt_root}$i ||
+ error_exit "Failed to umount lustre on ${mntpt_root}$i"
+ done
+ do_nodes $clients "rm -rf $mntpt_dir"
+}
+
+run_rr_alloc() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ echo "===Test gives more reproduction percentage if number of "\
+ "client and ost are more. Test with 44 or more clients "\
+ "and 73 or more OSTs gives 100% reproduction rate=="
+
+ RR_ALLOC=${RR_ALLOC:-$(which rr_alloc 2> /dev/null || true)}
+ [ x$RR_ALLOC = x ] && skip_env "rr_alloc not found"
+ declare -a diff_max_min_arr
+ # foeo = file on each ost. calc = calculated.
+ local ost_idx
+ local foeo_calc
+ local qos_prec_objs="${TMP}/qos_and_precreated_objects"
+ local rr_alloc_NFILES=${rr_alloc_NFILES:-555}
+ local rr_alloc_MNTPTS=${rr_alloc_MNTPTS:-11}
+ local total_MNTPTS=$((rr_alloc_MNTPTS * num_clients))
+ local mntpt_root="${TMP}/rr_alloc_mntpt/lustre"
+ if [ $MDSCOUNT -lt 2 ]; then
+ [ -e $DIR/$tdir ] || mkdir -p $DIR/$tdir
+ else
+ [ -e $DIR/$tdir ] || $LFS mkdir -i 0 $DIR/$tdir
+ fi
+ chmod 0777 $DIR/$tdir
+ $SETSTRIPE -c 1 /$DIR/$tdir
+
+ trap "cleanup_rr_alloc $clients $mntpt_root $rr_alloc_MNTPTS" EXIT ERR
+ for i in $(seq 0 $((rr_alloc_MNTPTS - 1))); do
+ zconf_mount_clients $clients ${mntpt_root}$i $MOUNT_OPTS ||
+ error_exit "Failed to mount lustre on ${mntpt_root}$i $clients"
+ done
+
+ local cmd="$RR_ALLOC $mntpt_root/$tdir/ash $rr_alloc_NFILES \
+ $num_clients"
+
+ # Save mdt values, set threshold to 100% i.e always Round Robin,
+ # restore the saved values again after creating files...
+ save_lustre_params mds1 \
+ "lov.$FSNAME-MDT0000*.qos_threshold_rr" > $qos_prec_objs
+ save_lustre_params mds1 \
+ "osp.$FSNAME-OST*-osc-MDT0000.create_count" >> $qos_prec_objs
+
+ local old_create_count=$(grep -e "create_count" $qos_prec_objs |
+ cut -d'=' -f 2 | sort -nr | head -n1)
+
+ # Make sure that every osp has enough precreated objects for the file
+ # creation app
+
+ # create_count is always set to the power of 2 only, so if the files
+ # per OST are not multiple of that then it will be set to nearest
+ # lower power of 2. So set 'create_count' to the upper power of 2.
+
+ foeo_calc=$((rr_alloc_NFILES * total_MNTPTS / OSTCOUNT))
+ local create_count=$((2 * foeo_calc))
+ do_facet mds1 "$LCTL set_param -n \
+ lov.$FSNAME-MDT0000*.qos_threshold_rr 100 \
+ osp.$FSNAME-OST*-osc-MDT0000.create_count $create_count" ||
+ error "failed while setting qos_threshold_rr & creat_count"
+
+ # Create few temporary files in order to increase the precreated objects
+ # to a desired value, before starting 'rr_alloc' app. Due to default
+ # value 32 of precreation count (OST_MIN_PRECREATE=32), precreated
+ # objects available are 32 initially, these gets exhausted very soon,
+ # which causes skip of some osps when very large number of files
+ # is created per OSTs.
+ createmany -o $DIR/$tdir/foo- $(((old_create_count + 1) * OSTCOUNT)) \
+ > /dev/null
+ rm -f /$DIR/$tdir/foo*
+
+ # Check for enough precreated objects... We should not
+ # fail here because code(osp_precreate.c) also takes care of it.
+ # So we have good chances of passing test even if this check fails.
+ local mdt_idx=0
+ for ost_idx in $(seq 0 $((OSTCOUNT - 1))); do
+ [[ $(precreated_ost_obj_count $mdt_idx $ost_idx) -ge \
+ $foeo_calc ]] || echo "Warning: test may fail because" \
+ "of lack of precreated objects on OST${ost_idx}"
+ done
+
+ if [[ $total_MNTPTS -ne 0 ]]; then
+ # Now start the actual file creation app.
+ mpi_run "-np $total_MNTPTS" $cmd || return
+ else
+ error "No mount point"
+ fi
+
+ restore_lustre_params < $qos_prec_objs
+ rm -f $qos_prec_objs
+
+ diff_max_min_arr=($($GETSTRIPE -r $DIR/$tdir/ |
+ grep "lmm_stripe_offset:" | awk '{print $2}' | sort -n |
+ uniq -c | awk 'NR==1 {min=max=$1} \
+ { $1<min ? min=$1 : min; $1>max ? max=$1 : max} \
+ END {print max-min, max, min}'))
+
+ rm -rf $DIR/$tdir
+
+ # In-case of fairly large number of file creation using RR (round-robin)
+ # there can be two cases in which deviation will occur than the regular
+ # RR algo behaviour-
+ # 1- When rr_alloc does not start right with 'lqr_start_count' reseeded,
+ # 2- When rr_alloc does not finish with 'lqr_start_count == 0'.
+ # So the difference of files b/w any 2 OST should not be more than 2.
+ [[ ${diff_max_min_arr[0]} -le 2 ]] ||
+ error "Uneven distribution detected: difference between" \
+ "maximum files per OST (${diff_max_min_arr[1]}) and" \
+ "minimum files per OST (${diff_max_min_arr[2]}) must not be" \
+ "greater than 2"
+}
+
+run_fs_test() {
+ # fs_test.x is the default name for exe
+ FS_TEST=${FS_TEST:=$(which fs_test.x 2> /dev/null || true)}
+
+ local clients=${CLIENTS:-$(hostname)}
+ local testdir=$DIR/d0.fs_test
+ local file=${testdir}/fs_test
+ fs_test_threads=${fs_test_threads:-2}
+ fs_test_type=${fs_test_type:-1}
+ fs_test_nobj=${fs_test_nobj:-10}
+ fs_test_check=${fs_test_check:-3}
+ fs_test_strided=${fs_test_strided:-1}
+ fs_test_touch=${fs_test_touch:-3}
+ fs_test_supersize=${fs_test_supersize:-1}
+ fs_test_op=${fs_test_op:-write}
+ fs_test_barriers=${fs_test_barriers:-bopen,bwrite,bclose}
+ fs_test_io=${fs_test_io:-mpi}
+ fs_test_objsize=${fs_test_objsize:-100}
+ fs_test_objunit=${fs_test_objunit:-1048576} # 1 mb
+ fs_test_ndirs=${fs_test_ndirs:-80000}
+
+ [ x$FS_TEST = x ] && skip "FS_TEST not found"
+
+ # Space estimation in bytes
+ local space=$(df -B 1 -P $dir | tail -n 1 | awk '{ print $4 }')
+ local total_threads=$((num_clients * fs_test_threads))
+ echo "+ $fs_test_objsize * $fs_test_objunit * $total_threads "
+ if [ $((space / 2)) -le \
+ $((fs_test_objsize * fs_test_objunit * total_threads)) ]; then
+ fs_test_objsize=$((space / 2 / fs_test_objunit / \
+ total_threads))
+ [ $fs_test_objsize -eq 0 ] &&
+ skip_env "Need free space more than \
+ $((2 * total_threads * fs_test_objunit)) \
+ : have $((space / fs_test_objunit))"
+
+ echo "(reduced objsize to \
+ $((fs_test_objsize * fs_test_objunit)) bytes)"
+ fi
+
+ print_opts FS_TEST clients fs_test_threads fs_test_objsize MACHINEFILE
+
+ mkdir -p $testdir
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ # --nodb Turn off the database code at runtime
+ # -g --target The path to the data file
+ # -t --type Whether to do N-N (1) or N-1 (2)
+ # -n --nobj The number of objects written/read by each proc
+ # -z --size The size of each object
+ # -d ---num_nn_dirs Number of subdirectories for files
+ # -C --check Check every byte using argument 3.
+ # --collective Whether to use collective I/O (for N-1, mpi-io only)
+ # -s --strided Whether to use a strided pattern (for N-1 only)
+ # -T --touch Touch every byte using argument 3
+ # -o --op Whether to read only (read) or write only (write)
+ # -b --barriers When to barrier.
+ # -i --io Use POSIX, MPI, or PLFS IO routines (mpi|posix|plfs)
+ # -S --supersize Specify how many objects per superblock
+
+ local cmd="$FS_TEST -nodb -g $file -t $fs_test_type -n $fs_test_nobj \
+ -z $((fs_test_objsize * fs_test_objunit)) -d $fs_test_ndirs \
+ -C $fs_test_check -collective -s $fs_test_strided \
+ -T $fs_test_touch -o $fs_test_op -b $fs_test_barriers \
+ -i $fs_test_io -S $fs_test_supersize"
+
+ echo "+ $cmd"
+ mpi_run "-np $((num_clients * fs_test_threads))" $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "fs_test failed! $rc"
+ fi
+
+ rm -rf $testdir
+}
+
+ior_mdtest_parallel() {
+ local rc1=0
+ local rc2=0
+ local type=$1
+
+ run_ior $type &
+ local pids=$!
+
+ run_mdtest $type || rc2=$?
+ [[ $rc2 -ne 0 ]] && echo "mdtest failed with error $rc2"
+
+ wait $pids || rc1=$?
+ [[ $rc1 -ne 0 ]] && echo "ior failed with error $rc1"
+
+ [[ $rc1 -ne 0 || $rc2 -ne 0 ]] && return 1
+ return 0
+}
+
+run_fio() {
+ FIO=${FIO:=$(which fio 2> /dev/null || true)}
+
+ local clients=${CLIENTS:-$(hostname)}
+ local fio_jobNum=${fio_jobNum:-4}
+ local fio_jobFile=${fio_jobFile:-$TMP/fiojobfile.$(date +%s)}
+ local fio_bs=${fio_bs:-1}
+ local testdir=$DIR/d0.fio
+ local file=${testdir}/fio
+ local runtime=60
+ local propagate=false
+
+ [ "$SLOW" = "no" ] || runtime=600
+
+ [ x$FIO = x ] && skip_env "FIO not found"
+
+ mkdir -p $testdir
+
+ # use fio job file if exists,
+ # create a simple one if missing
+ if ! [ -f $fio_jobFile ]; then
+ cat >> $fio_jobFile <<EOF
+[global]
+rw=randwrite
+size=128m
+time_based=1
+runtime=$runtime
+filename=${file}_\$(hostname)
+EOF
+ # bs size increased by $i for each job
+ for ((i=1; i<=fio_jobNum; i++)); do
+ cat >> $fio_jobFile <<EOF
+
+[job$i]
+bs=$(( fio_bs * i ))m
+EOF
+ done
+ # job file is created, should be propagated to all clients
+ propagate=true
+ fi
+
+
+ # propagate the job file if not all clients have it yet or
+ # if the job file was created during the test run
+ if ! do_nodesv $clients " [ -f $fio_jobFile ] " ||
+ $propagate; then
+ local cfg=$(cat $fio_jobFile)
+ do_nodes $clients "echo \\\"$cfg\\\" > ${fio_jobFile}" ||
+ error "job file $fio_jobFile is not propagated"
+ do_nodesv $clients "cat ${fio_jobFile}"
+ fi
+
+ cmd="$FIO $fio_jobFile"
+ echo "+ $cmd"
+
+ log "clients: $clients $cmd"
+
+ local rc=0
+ do_nodesv $clients "$cmd "
+ rc=$?
+
+ [ $rc = 0 ] || error "fio failed: $rc"
+ rm -rf $testdir
+}
+
+run_xdd() {
+ XDD=${XDD:=$(which xdd 2> /dev/null || true)}
+
+ local clients=${CLIENTS:-$(hostname)}
+ local testdir=$DIR/d0.xdd
+ xdd_queuedepth=${xdd_queuedepth:-4}
+ xdd_blocksize=${xdd_blocksize:-512}
+ xdd_reqsize=${xdd_reqsize:-128}
+ xdd_mbytes=${xdd_mbytes:-100}
+ xdd_passes=${xdd_passes:-40}
+ xdd_rwratio=${xdd_rwratio:-0}
+ xdd_ntargets=${xdd_ntargets:-6}
+ local xdd_custom_params=${xdd_custom_params:-"-dio -stoponerror \
+ -maxpri -minall -noproclock -nomemlock"}
+
+ [ x$XDD = x ] && skip "XDD not found"
+
+ print_opts XDD clients xdd_queuedepth xdd_blocksize xdd_reqsize \
+ xdd_mbytes xdd_passes xdd_rwratio
+
+ mkdir -p $testdir
+
+ local files=""
+ # Target files creates based on the given number of targets
+ for (( i=0; i < $xdd_ntargets; i++ ))
+ do
+ files+="${testdir}/xdd"$i" "
+ done
+
+ # -targets specifies the devices or files to perform operation
+ # -reqsize number of 'blocks' per operation
+ # -mbytes number of 1024*1024-byte blocks to transfer
+ # -blocksize size of a single 'block'
+ # -passes number of times to read mbytes
+ # -queuedepth number of commands to queue on the target
+ # -rwratio percentage of read to write operations
+ # -verbose will print out statistics on each pass
+
+ local cmd="$XDD -targets $xdd_ntargets $files -reqsize $xdd_reqsize \
+ -mbytes $xdd_mbytes -blocksize $xdd_blocksize \
+ -passes $xdd_passes -queuedepth $xdd_queuedepth \
+ -rwratio $xdd_rwratio -verbose $xdd_custom_params"
+ echo "+ $cmd"
+
+ local rc=0
+ do_nodesv $clients "$cmd "
+ rc=$?
+
+ [ $rc = 0 ] || error "xdd failed: $rc"
+
+ rm -rf $testdir
+}