+ do_rpc_nodes $list lst_setup
+}
+
+###
+# short_hostname
+#
+# Passed a single argument, strips everything off following
+# and includes the first period.
+# client-20.lab.whamcloud.com becomes client-20
+short_hostname() {
+ echo $(sed 's/\..*//' <<< $1)
+}
+
+###
+# short_nodename
+#
+# Find remote nodename, stripped of any domain, etc.
+# 'hostname -s' is easy, but not implemented on all systems
+short_nodename() {
+ local rname=$(do_node $1 "uname -n" || echo -1)
+ if [[ "$rname" = "-1" ]]; then
+ rname=$1
+ fi
+ echo $(short_hostname $rname)
+}
+
+print_opts () {
+ local var
+
+ echo OPTIONS:
+
+ for i in $@; do
+ var=$i
+ echo "${var}=${!var}"
+ done
+ [ -e $MACHINEFILE ] && cat $MACHINEFILE
+}
+
+is_lustre () {
+ [ "$(stat -f -c %T $1)" = "lustre" ]
+}
+
+setstripe_getstripe () {
+ local file=$1
+ shift
+ local params=$@
+
+ is_lustre $file || return 0
+
+ if [ -n "$params" ]; then
+ $LFS setstripe $params $file ||
+ error "setstripe $params failed"
+ fi
+ $LFS getstripe $file ||
+ error "getstripe $file failed"
+}
+
+run_compilebench() {
+ local dir=${1:-$DIR}
+ local cbench_DIR=${cbench_DIR:-""}
+ local cbench_IDIRS=${cbench_IDIRS:-2}
+ local cbench_RUNS=${cbench_RUNS:-2}
+
+ print_opts cbench_DIR cbench_IDIRS cbench_RUNS
+
+ [ x$cbench_DIR = x ] &&
+ skip_env "compilebench not found"
+
+ [ -e $cbench_DIR/compilebench ] ||
+ skip_env "No compilebench build"
+
+ # Space estimation:
+ # compile dir kernel-0 ~1GB
+ # required space ~1GB * cbench_IDIRS
+ local space=$(df -P $dir | tail -n 1 | awk '{ print $4 }')
+ if [[ $space -le $((1024 * 1024 * cbench_IDIRS)) ]]; then
+ cbench_IDIRS=$((space / 1024 / 1024))
+ [[ $cbench_IDIRS -eq 0 ]] &&
+ skip_env "Need free space at least 1GB, have $space"
+
+ echo "reducing initial dirs to $cbench_IDIRS"
+ fi
+ echo "free space = $space KB"
+
+ # FIXME:
+ # t-f _base needs to be modifyed to set properly tdir
+ # for new "test_foo" functions names
+ # local testdir=$DIR/$tdir
+ local testdir=$dir/d0.compilebench.$$
+ test_mkdir -p $testdir
+ setstripe_getstripe $testdir $cbench_STRIPEPARAMS
+
+ local savePWD=$PWD
+ cd $cbench_DIR
+ local cmd="./compilebench -D $testdir -i $cbench_IDIRS \
+ -r $cbench_RUNS --makej"
+
+ log "$cmd"
+
+ local rc=0
+ eval $cmd
+ rc=$?
+
+ cd $savePWD
+ [ $rc = 0 ] || error "compilebench failed: $rc"
+ rm -rf $testdir
+}
+
+run_metabench() {
+ local dir=${1:-$DIR}
+ local mntpt=${2:-$MOUNT}
+ METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
+ mbench_NFILES=${mbench_NFILES:-30400}
+ # threads per client
+ mbench_THREADS=${mbench_THREADS:-4}
+ mbench_OPTIONS=${mbench_OPTIONS:-}
+ mbench_CLEANUP=${mbench_CLEANUP:-true}
+
+ [ x$METABENCH = x ] && skip_env "metabench not found"
+
+ print_opts METABENCH clients mbench_NFILES mbench_THREADS
+
+ local testdir=$dir/d0.metabench
+ test_mkdir -p $testdir
+ setstripe_getstripe $testdir $mbench_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ # -C Run the file creation tests. Creates zero byte files.
+ # -S Run the file stat tests.
+ # -c nfile Number of files to be used in each test.
+ # -k Cleanup files when finished.
+ local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S $mbench_OPTIONS"
+ echo "+ $cmd"
+
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mbench_THREADS)) \
+ -p $SRUN_PARTITION -- $cmd
+ else
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $mbench_THREADS)) $cmd
+ fi
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "metabench failed! $rc"
+ fi
+
+ if $mbench_CLEANUP; then
+ rm -rf $testdir
+ else
+ mv $dir/d0.metabench $mntpt/_xxx.$(date +%s).d0.metabench
+ fi
+}
+
+run_simul() {
+ SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
+ [ x$SIMUL = x ] && skip_env "simul not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+
+ # threads per client
+ simul_THREADS=${simul_THREADS:-2}
+ simul_REP=${simul_REP:-20}
+
+ # FIXME
+ # Need space estimation here.
+
+ print_opts SIMUL clients simul_REP simul_THREADS
+
+ local testdir=$DIR/d0.simul
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $simul_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ # -n # : repeat each test # times
+ # -N # : repeat the entire set of tests # times
+
+ local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
+
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * simul_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * simul_THREADS)) $cmd
+ fi
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "simul failed! $rc"
+ fi
+ rm -rf $testdir
+}
+
+run_mdtest() {
+ MDTEST=${MDTEST:=$(which mdtest 2> /dev/null || true)}
+ [ x$MDTEST = x ] && skip_env "mdtest not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+
+ # threads per client
+ mdtest_THREADS=${mdtest_THREADS:-2}
+ mdtest_nFiles=${mdtest_nFiles:-"100000"}
+ # We devide the files by number of core
+ mdtest_nFiles=$((mdtest_nFiles/mdtest_THREADS/num_clients))
+ mdtest_iteration=${mdtest_iteration:-1}
+ local mdtest_custom_params=${mdtest_custom_params:-""}
+ local type=${1:-"ssf"}
+
+ local mdtest_Nmntp=${mdtest_Nmntp:-1}
+
+ if [ $type = "ssf" ] && [ $mdtest_Nmntp -ne 1 ]; then
+ skip "shared directory mode is not compatible" \
+ "with multiple directory paths"
+ fi
+
+ # FIXME
+ # Need space estimation here.
+
+ print_opts MDTEST mdtest_iteration mdtest_THREADS mdtest_nFiles
+
+ local testdir=$DIR/d0.mdtest
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $mdtest_STRIPEPARAMS
+ chmod 0777 $testdir
+
+ for ((i=1; i<mdtest_Nmntp; i++)); do
+ zconf_mount_clients $clients $MOUNT$i "$mntopts" ||
+ error_exit "Failed $clients on $MOUNT$i"
+ local dir=$DIR$i/d0.mdtest$i
+ test_mkdir $dir
+ setstripe_getstripe $dir $mdtest_SETSTRIPEPARAMS
+ chmod 0777 $dir
+ testdir="$testdir@$dir"
+ done
+ # mpi_run uses mpiuser
+
+ # -i # : repeat each test # times
+ # -d : test dir
+ # -n # : number of file/dir to create/stat/remove
+ # -u : each process create/stat/remove individually
+
+ local cmd="$MDTEST -d $testdir -i $mdtest_iteration \
+ -n $mdtest_nFiles $mdtest_custom_params"
+
+ [ $type = "fpp" ] && cmd="$cmd -u"
+
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mdtest_THREADS)) \
+ -p $SRUN_PARTITION -- $cmd
+ else
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * mdtest_THREADS)) $cmd
+ fi
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "mdtest failed! $rc"
+ fi
+ rm -rf $testdir
+ for ((i=1; i<mdtest_Nmntp; i++)); do
+ local dir=$DIR$i/d0.mdtest$i
+ rm -rf $dir
+ zconf_umount_clients $clients $MOUNT$i ||
+ error_exit "Failed umount $MOUNT$i on $clients"
+ done
+}
+
+run_connectathon() {
+ local dir=${1:-$DIR}
+ cnt_DIR=${cnt_DIR:-""}
+ cnt_NRUN=${cnt_NRUN:-10}
+
+ print_opts cnt_DIR cnt_NRUN
+
+ [ x$cnt_DIR = x ] && skip_env "connectathon dir not found"
+ [ -e $cnt_DIR/runtests ] || skip_env "No connectathon runtests found"
+
+ # Space estimation:
+ # "special" tests create a 30 MB file + misc. small files
+ # required space ~40 MB
+ local space=$(df -P $dir | tail -n 1 | awk '{ print $4 }')
+ if [[ $space -le $((1024 * 40)) ]]; then
+ skip_env "Need free space at least 40MB, have $space KB"
+ fi
+ echo "free space = $space KB"
+
+ local testdir=$dir/d0.connectathon
+ test_mkdir -p $testdir
+ setstripe_getstripe $testdir $cnt_STRIPEPARAMS
+
+ local savePWD=$PWD
+ cd $cnt_DIR
+
+ #
+ # To run connectathon:
+ # runtests [-a|-b|-g|-s|-l] [-f|-n|-t] [-N numpasses] [test-directory]
+ #
+ # One of the following test types
+ # -b basic
+ # -g general
+ # -s special
+ # -l lock
+ # -a all of the above
+ #
+ # -f a quick functional test
+ # -n suppress directory operations (mkdir and rmdir)
+ # -t run with time statistics (default for basic tests)
+ #
+ # -N numpasses - specifies the number of times to run
+ # the tests. Optional.
+
+ tests="-b -g -s"
+ # Include lock tests unless we're running on nfsv4
+ local fstype=$(df -TP $testdir | awk 'NR==2 {print $2}')
+ echo "$testdir: $fstype"
+ if [[ $fstype != "nfs4" ]]; then
+ tests="$tests -l"
+ fi
+ echo "tests: $tests"
+ for test in $tests; do
+ local cmd="sh ./runtests -N $cnt_NRUN $test -f $testdir"
+ local rc=0
+
+ log "$cmd"
+ eval $cmd
+ rc=$?
+ [ $rc = 0 ] || error "connectathon failed: $rc"
+ done
+
+ cd $savePWD
+ rm -rf $testdir
+}
+
+run_ior() {
+ local type=${1:="ssf"}
+ local dir=${2:-$DIR}
+ local testdir=$dir/d0.ior.$type
+ local nfs_srvmntpt=$3
+
+ if [ "$NFSCLIENT" ]; then
+ [[ -n $nfs_srvmntpt ]] ||
+ { error "NFSCLIENT mode, but nfs exported dir"\
+ "is not set!" && return 1; }
+ fi
+
+ IOR=${IOR:-$(which IOR 2> /dev/null || true)}
+ [ x$IOR = x ] && skip_env "IOR not found"
+
+ # threads per client
+ ior_THREADS=${ior_THREADS:-2}
+ ior_iteration=${ior_iteration:-1}
+ ior_blockSize=${ior_blockSize:-6}
+ ior_blockUnit=${ior_blockUnit:-M} # K, M, G
+ ior_xferSize=${ior_xferSize:-1M}
+ ior_type=${ior_type:-POSIX}
+ ior_DURATION=${ior_DURATION:-30} # minutes
+ ior_CLEANUP=${ior_CLEANUP:-true}
+ local multiplier=1
+ case ${ior_blockUnit} in
+ [G])
+ multiplier=$((1024 * 1024 * 1024))
+ ;;
+ [M])
+ multiplier=$((1024 * 1024))
+ ;;
+ [K])
+ multiplier=1024
+ ;;
+ *) error "Incorrect block unit should be one of [KMG]"
+ ;;
+ esac
+
+ # calculate the space in bytes
+ local space=$(df -B 1 -P $dir | tail -n 1 | awk '{ print $4 }')
+ local total_threads=$((num_clients * ior_THREADS))
+ echo "+ $ior_blockSize * $multiplier * $total_threads "
+ if [ $((space / 2)) -le \
+ $((ior_blockSize * multiplier * total_threads)) ]; then
+ ior_blockSize=$((space / 2 / multiplier / total_threads))
+ [ $ior_blockSize -eq 0 ] &&
+ skip_env "Need free space more than $((2 * total_threads)) \
+ ${ior_blockUnit}: have $((space / multiplier))"
+
+ echo "(reduced blockSize to $ior_blockSize \
+ ${ior_blockUnit} bytes)"
+ fi
+
+ print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
+
+ test_mkdir -p $testdir
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+ [[ "$ior_stripe_params" && -z "$ior_STRIPEPARAMS" ]] &&
+ ior_STRIPEPARAMS="$ior_stripe_params" &&
+ echo "got deprecated ior_stripe_params,"\
+ "use ior_STRIPEPARAMS instead"
+ setstripe_getstripe $testdir $ior_STRIPEPARAMS
+
+ #
+ # -b N blockSize --
+ # contiguous bytes to write per task (e.g.: 8, 4K, 2M, 1G)"
+ # -o S testFileName
+ # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4K, 2M, 1G)"
+ # -w writeFile -- write file"
+ # -r readFile -- read existing file"
+ # -W checkWrite -- check read after write"
+ # -C reorderTasks -- changes task ordering to n+1 ordering for readback
+ # -T maxTimeDuration -- max time in minutes to run tests"
+ # -k keepFile -- keep testFile(s) on program exit
+
+ local cmd
+ if [ -n "$ior_custom_params" ]; then
+ cmd="$IOR -o $testdir/iorData $ior_custom_params"
+ else
+ cmd="$IOR -a $ior_type -b ${ior_blockSize}${ior_blockUnit} \
+ -o $testdir/iorData -t $ior_xferSize -v -C -w -r -W \
+ -i $ior_iteration -T $ior_DURATION -k"
+ fi
+
+ [ $type = "fpp" ] && cmd="$cmd -F"
+
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * ior_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_ior_custom_threads=${mpi_ior_custom_threads:-"$((num_clients * ior_THREADS))"}
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $mpi_ior_custom_threads $cmd
+ fi
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "ior failed! $rc"
+ fi
+ $ior_CLEANUP && rm -rf $testdir || true
+}
+
+run_mib() {
+ MIB=${MIB:=$(which mib 2> /dev/null || true)}
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+ [ x$MIB = x ] && skip_env "MIB not found"
+
+ # threads per client
+ mib_THREADS=${mib_THREADS:-2}
+ mib_xferSize=${mib_xferSize:-1m}
+ mib_xferLimit=${mib_xferLimit:-5000}
+ mib_timeLimit=${mib_timeLimit:-300}
+ mib_STRIPEPARAMS=${mib_STRIPEPARAMS:-"-c -1"}
+
+ print_opts MIB mib_THREADS mib_xferSize mib_xferLimit mib_timeLimit \
+ MACHINEFILE
+
+ local testdir=$DIR/d0.mib
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $mib_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ #
+ # -I Show intermediate values in output
+ # -H Show headers in output
+ # -L Do not issue new system calls after this many seconds
+ # -s Use system calls of this size
+ # -t test dir
+ # -l Issue no more than this many system calls
+ local cmd="$MIB -t $testdir -s $mib_xferSize -l $mib_xferLimit \
+ -L $mib_timeLimit -HI -p mib.$(date +%Y%m%d%H%M%S)"
+
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mib_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * mib_THREADS)) $cmd
+ fi
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "mib failed! $rc"
+ fi
+ rm -rf $testdir
+}
+
+run_cascading_rw() {
+ CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
+ [ x$CASC_RW = x ] && skip_env "cascading_rw not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+
+ # threads per client
+ casc_THREADS=${casc_THREADS:-2}
+ casc_REP=${casc_REP:-300}
+
+ # FIXME
+ # Need space estimation here.
+
+ print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
+
+ local testdir=$DIR/d0.cascading_rw
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $casc_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ # -g: debug mode
+ # -n: repeat test # times
+
+ local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
+
+ echo "+ $cmd"
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $casc_THREADS)) $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "cascading_rw failed! $rc"
+ fi
+ rm -rf $testdir
+}
+
+run_write_append_truncate() {
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+ # location is lustre/tests dir
+ if ! which write_append_truncate > /dev/null 2>&1 ; then
+ skip_env "write_append_truncate not found"
+ fi
+
+ # threads per client
+ write_THREADS=${write_THREADS:-8}
+ write_REP=${write_REP:-10000}
+
+ # FIXME
+ # Need space estimation here.
+
+ local testdir=$DIR/d0.write_append_truncate
+ local file=$testdir/f0.wat
+
+ print_opts clients write_REP write_THREADS MACHINEFILE
+
+ test_mkdir $testdir
+ # mpi_run uses mpiuser
+ setstripe_getstripe $testdir $write_STRIPEPARAMS
+
+ chmod 0777 $testdir
+
+ local cmd="write_append_truncate -n $write_REP $file"
+
+ echo "+ $cmd"
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $write_THREADS)) $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "write_append_truncate failed! $rc"
+ return $rc
+ fi
+ rm -rf $testdir
+}
+
+run_write_disjoint() {
+ WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null ||
+ true)}
+ [ x$WRITE_DISJOINT = x ] && skip_env "write_disjoint not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+
+ # threads per client
+ wdisjoint_THREADS=${wdisjoint_THREADS:-4}
+ wdisjoint_REP=${wdisjoint_REP:-10000}
+ chunk_size_limit=$1
+
+ # FIXME
+ # Need space estimation here.
+
+ print_opts WRITE_DISJOINT clients wdisjoint_THREADS wdisjoint_REP \
+ MACHINEFILE
+ local testdir=$DIR/d0.write_disjoint
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $wdisjoint_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP -m \
+ $chunk_size_limit"
+
+ echo "+ $cmd"
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $wdisjoint_THREADS)) $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "write_disjoint failed! $rc"
+ fi
+ rm -rf $testdir
+}
+
+run_parallel_grouplock() {
+ PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock \
+ 2> /dev/null || true)}
+
+ [ x$PARALLEL_GROUPLOCK = x ] && skip "PARALLEL_GROUPLOCK not found"
+ [ "$NFSCLIENT" ] && skip "skipped for NFSCLIENT mode"
+
+ parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
+
+ print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
+
+ local testdir=$DIR/d0.parallel_grouplock
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $parallel_grouplock_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ local cmd
+ local status=0
+ local subtest
+ for i in $(seq 12); do
+ subtest="-t $i"
+ local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
+ echo "+ $cmd"
+
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $parallel_grouplock_MINTASKS $cmd
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error_noexit "parallel_grouplock subtests $subtest " \
+ "failed! $rc"
+ else
+ echo "parallel_grouplock subtests $subtest PASS"
+ fi
+ let status=$((status + rc))
+ # clear debug to collect one log per one test
+ do_nodes $(comma_list $(nodes_list)) lctl clear
+ done
+ [ $status -eq 0 ] || error "parallel_grouplock status: $status"
+ rm -rf $testdir
+}
+
+cleanup_statahead () {
+ trap 0
+
+ local clients=$1
+ local mntpt_root=$2
+ local num_mntpts=$3
+
+ for i in $(seq 0 $num_mntpts);do
+ zconf_umount_clients $clients ${mntpt_root}$i ||
+ error_exit "Failed to umount lustre on ${mntpt_root}$i"
+ done
+}
+
+run_statahead () {
+ if [[ -n $NFSCLIENT ]]; then
+ skip "Statahead testing is not supported on NFS clients."
+ fi
+ [ x$MDSRATE = x ] && skip_env "mdsrate not found"
+
+ statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
+ statahead_NUMFILES=${statahead_NUMFILES:-500000}
+
+ print_opts MDSRATE clients statahead_NUMMNTPTS statahead_NUMFILES
+
+ # create large dir
+
+ # do not use default "d[0-9]*" dir name
+ # to avoid of rm $statahead_NUMFILES (500k) files in t-f cleanup
+ local dir=dstatahead
+ local testdir=$DIR/$dir
+
+ # cleanup only if dir exists
+ # cleanup only $statahead_NUMFILES number of files
+ # ignore the other files created by someone else
+ [ -d $testdir ] &&
+ mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE \
+ $statahead_NUMFILES $testdir 'f%%d' --ignore
+
+ test_mkdir $testdir
+ setstripe_getstripe $testdir $statahead_STRIPEPARAMS
+
+ # mpi_run uses mpiuser
+ chmod 0777 $testdir
+
+ local num_files=$statahead_NUMFILES
+
+ local IFree=$(inodes_available)
+ if [ $IFree -lt $num_files ]; then
+ num_files=$IFree
+ fi
+
+ cancel_lru_locks mdc
+
+ local cmd1="${MDSRATE} ${MDSRATE_DEBUG} --mknod --dir $testdir"
+ local cmd2="--nfiles $num_files --filefmt 'f%%d'"
+ local cmd="$cmd1 $cmd2"
+ echo "+ $cmd"
+
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * 32)) $cmd
+
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error "mdsrate failed to create $rc"
+ return $rc
+ fi
+
+ local num_mntpts=$statahead_NUMMNTPTS
+ local mntpt_root=$TMP/mntpt/lustre
+ local mntopts=$MNTOPTSTATAHEAD
+
+ echo "Mounting $num_mntpts lustre clients starts on $clients"
+ trap "cleanup_statahead $clients $mntpt_root $num_mntpts" EXIT ERR
+ for i in $(seq 0 $num_mntpts); do
+ zconf_mount_clients $clients ${mntpt_root}$i "$mntopts" ||
+ error_exit "Failed to mount lustre on ${mntpt_root}$i on $clients"
+ done
+
+ do_rpc_nodes $clients cancel_lru_locks mdc
+
+ do_rpc_nodes $clients do_ls $mntpt_root $num_mntpts $dir
+
+ mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE \
+ $num_files $testdir 'f%%d' --ignore
+
+ # use rm instead of rmdir because of
+ # testdir could contain the files created by someone else,
+ # or by previous run where is num_files prev > num_files current
+ rm -rf $testdir
+ cleanup_statahead $clients $mntpt_root $num_mntpts