-n $((num_clients * mbench_THREADS)) \
-p $SRUN_PARTITION -- $cmd
else
- mpi_run -np $((num_clients * $mbench_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $mbench_THREADS)) $cmd
fi
local rc=$?
-n $((num_clients * simul_THREADS)) -p $SRUN_PARTITION \
-- $cmd
else
- mpi_run -np $((num_clients * simul_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * simul_THREADS)) $cmd
fi
local rc=$?
-n $((num_clients * mdtest_THREADS)) \
-p $SRUN_PARTITION -- $cmd
else
- mpi_run -np $((num_clients * mdtest_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * mdtest_THREADS)) $cmd
fi
local rc=$?
-n $((num_clients * ior_THREADS)) -p $SRUN_PARTITION \
-- $cmd
else
- mpi_run -np $((num_clients * $ior_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $ior_THREADS)) $cmd
fi
local rc=$?
-n $((num_clients * mib_THREADS)) -p $SRUN_PARTITION \
-- $cmd
else
- mpi_run -np $((num_clients * mib_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * mib_THREADS)) $cmd
fi
local rc=$?
local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
echo "+ $cmd"
- mpi_run -np $((num_clients * $casc_THREADS)) ${MACHINEFILE_OPTION} \
- ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $casc_THREADS)) $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd="write_append_truncate -n $write_REP $file"
echo "+ $cmd"
- mpi_run -np $((num_clients * $write_THREADS)) ${MACHINEFILE_OPTION} \
- ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $write_THREADS)) $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
echo "+ $cmd"
- mpi_run -np $((num_clients * $wdisjoint_THREADS)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * $wdisjoint_THREADS)) $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
echo "+ $cmd"
- mpi_run -np $parallel_grouplock_MINTASKS ${MACHINEFILE_OPTION} \
- ${MACHINEFILE} $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $parallel_grouplock_MINTASKS $cmd
local rc=$?
if [ $rc != 0 ] ; then
error_noexit "parallel_grouplock subtests $subtest " \
local cmd="$cmd1 $cmd2"
echo "+ $cmd"
- mpi_run -np $((num_clients * 32)) ${MACHINEFILE_OPTION} ${MACHINEFILE} \
- $cmd
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((num_clients * 32)) $cmd
local rc=$?
if [ $rc != 0 ] ; then
while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
{
ha_on ${ha_clients[0]} mkdir -p "$dir" && \
- mpirun -np ${#ha_clients[@]} ${MACHINEFILE_OPTION} \
- "$ha_machine_file" $cmd && \
+ mpirun ${MACHINEFILE_OPTION} "$ha_machine_file" \
+ -np ${#ha_clients[@]} $cmd && \
ha_on ${ha_clients[0]} rm -rf "$dir"
} >>"$log" 2>&1 || rc=$?
--nfiles $num_files --filefmt 'file%%d'"
echo "# $command"
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) -machinefile \
- $MACHINEFILE $command
+ mpi_run -machinefile $MACHINEFILE \
+ -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) $command
if [ ${PIPESTATUS[0]} != 0 ]; then
error "mdsrate create failed"
COMMAND="${MDSRATE} --create --nfiles $nfiles --dir
$dir --filefmt 'f%%d'"
- mpi_run -np $((num * nthreads)) ${MACHINEFILE_OPTION} \
- $machinefile ${COMMAND} | tee ${LOG} &
+ mpi_run ${MACHINEFILE_OPTION} $machinefile \
+ -np $((num * nthreads)) ${COMMAND} | tee ${LOG}&
pid=$!
echo "pid=$pid"
BASEDIR=$MOUNT/mdsrate
# Requirements
-# set NUM_FILES=0 to force TIME_PERIOD work
+# set NUM_FILES=0 to force TIME_PERIOD work
NUM_FILES=${NUM_FILES:-1000000}
TIME_PERIOD=${TIME_PERIOD:-600} # seconds
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
#!/bin/bash
#
-# This test was used in a set of CMD3 tests (cmd3-3 test).
+# This test was used in a set of CMD3 tests (cmd3-3 test).
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
# Make sure we start with a clean slate
rm -f ${LOG}
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
if [ $IFree -lt $NUM_FILES ]; then
NUM_FILES=$IFree
fi
-
+
generate_machine_file $NODES_TO_USE $MACHINEFILE || error "can not generate machinefile"
if [ -n "$NOSINGLE" ]; then
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles $NUM_FILES --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
--nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate create on multiple nodes failed, aborting"
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate unlinks multiple nodes failed, aborting"
complete $SECONDS
rmdir $BASEDIR || true
-rm -f $MACHINEFILE
+rm -f $MACHINEFILE
check_and_cleanup_lustre
#rm -f $LOG
# Directory lookup retrieval rate 10 directories 1 million files each
# 6000 random lookups/sec per client node 62,000 random lookups/sec aggregate
-#
+#
# In 10 dirs containing 1 million files each the mdsrate Test Program will
# perform lookups for 10 minutes. This test is run from a single node for
# #1 and from all nodes for #2 aggregate test to measure lookup performance.
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
# This is just a test preparation, does not matter how many threads we
# use for files creation; we just should be aware that NUM_DIRS is less
# than or equal to the number of threads np
- mpi_run -np ${NUM_DIRS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_DIRS} \
${COMMAND} 2>&1
# No lookup if error occurs on file creation, abort.
else
log "===== $0 ### 1 NODE LOOKUPS ###"
echo "+" ${COMMAND}
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
else
log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
# Directory lookup retrieval rate single directory 10 million files
# 5900 random lookups/sec per client node 62,000 random lookups/sec aggregate
-#
+#
# In a dir containing 10 million non-striped files the mdsrate Test Program will
# perform lookups for 10 minutes. This test can be run from a single node for
# #1 and from all nodes for #2 aggregate test to measure lookup performance.
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --mknod --dir ${TESTDIR}
--nfiles ${NUM_FILES} --filefmt 'f%%d'"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_THREADS} \
${COMMAND} 2>&1
# No lockup if error occurs on file creation, abort.
else
log "===== $0 ### 1 NODE LOOKUPS ###"
echo "+" ${COMMAND}
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
else
log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
-#!/bin/sh
+#!/bin/bash
#
# This test was used in a set of CMD3 tests (cmd3-8 test).
# In a dir containing 10 million striped files, the mdsrate Test Program will
# perform directory ordered stat's (readdir) for 10 minutes. This test will be
# run from a single node for #1 and from all nodes for #2 aggregate test to
-# measure stat performance.
+# measure stat performance.
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
NUM_THREADS=$NUM_CLIENTS
fi
- mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_THREADS} \
${COMMAND} 2>&1
[ ${PIPESTATUS[0]} != 0 ] &&
error "mdsrate file creation failed, aborting"
log "===== $0 ### 1 NODE STAT ###"
echo "+" ${COMMAND}
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
log "===== $0 ### ${NUM_CLIENTS} NODES STAT ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ ! -x ${MDSRATE} ] && error "${MDSRATE} not built."
-log "===== $0 ====== "
+log "===== $0 ====== "
check_and_setup_lustre
NUM_THREADS=$NUM_CLIENTS
fi
- mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_THREADS} \
${COMMAND} 2>&1
[ ${PIPESTATUS[0]} != 0 ] &&
error "mdsrate file creation failed, aborting"
log "===== $0 ### 1 NODE STAT ###"
echo "+" ${COMMAND}
- mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np 1 ${COMMAND} |
tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
log "===== $0 ### ${NUM_CLIENTS} NODES STAT ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} -np ${NUM_CLIENTS} \
${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
set $TRACE
MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
generate_machine_file $NODES_TO_USE $MACHINEFILE
- mpi_run -np $(get_node_count ${NODES_TO_USE//,/ }) \
- ${MACHINEFILE_OPTION} $MACHINEFILE $WRITE_DISJOINT \
+ mpi_run ${MACHINEFILE_OPTION} $MACHINEFILE \
+ -np $(get_node_count ${NODES_TO_USE//,/ }) $WRITE_DISJOINT \
-f $WRITE_DISJOINT_FILE -n $NUMLOOPS || STATUS=1
else
skip_env "$0 : write_disjoint not found "
# common setup
MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
clients=${CLIENTS:-$HOSTNAME}
-generate_machine_file $clients $MACHINEFILE || \
+generate_machine_file $clients $MACHINEFILE ||
error "Failed to generate machine file"
num_clients=$(get_node_count ${clients//,/ })
# need this only if TESTDIR is not default
chmod -R 777 $TESTDIR
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
- ${MACHINEFILE_OPTION} ${MACHINEFILE} $IOR -a POSIX -b 1g \
+ mpi_run ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) $IOR -a POSIX -b 1g \
-o $TESTDIR/IOR-file -s 1 -t 1m -v -w -r 1>$LOG &
load_pid=$!
wait $load_pid
mdsrate_cleanup () {
if [ -d $4 ]; then
- mpi_run -np $1 ${MACHINEFILE_OPTION} $2 ${MDSRATE} --unlink \
+ mpi_run ${MACHINEFILE_OPTION} $2 -np $1 ${MDSRATE} --unlink \
--nfiles $3 --dir $4 --filefmt $5 $6
rmdir $4
fi
########################
-convert_facet2label() {
+convert_facet2label() {
local facet=$1
if [ x$facet = xost ]; then
if [ -n ${!varsvc} ]; then
echo ${!varsvc}
- else
+ else
error "No lablel for $facet!"
fi
}