noinst_SCRIPTS += sgpdd-survey.sh maloo_upload.sh auster setup-nfs.sh
noinst_SCRIPTS += mds-survey.sh parallel-scale-nfs.sh large-lun.sh
noinst_SCRIPTS += parallel-scale-nfsv3.sh parallel-scale-nfsv4.sh
-noinst_SCRIPTS += posix.sh sanity-scrub.sh scrub-performance.sh
+noinst_SCRIPTS += posix.sh sanity-scrub.sh scrub-performance.sh ha.sh
noinst_SCRIPTS += sanity-quota-old.sh
noinst_SCRIPTS += resolveip
nobase_noinst_SCRIPTS = cfg/local.sh
MPIRUN=$(which mpirun 2>/dev/null) || true
MPI_USER=${MPI_USER:-mpiuser}
SHARED_DIR_LOGS=${SHARED_DIR_LOGS:-""}
+MACHINEFILE_OPTION=${MACHINEFILE_OPTION:-"-machinefile"}
# This is used by a small number of tests to share state between the client
# running the tests, or in some cases between the servers (e.g. lfsck.sh).
local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S -k"
echo "+ $cmd"
- # find out if we need to use srun by checking $SRUN_PARTITION
- if [ "$SRUN_PARTITION" ]; then
- $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
- -n $((num_clients * mbench_THREADS)) -p $SRUN_PARTITION -- $cmd
- else
- mpi_run -np $((num_clients * $mbench_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
- fi
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mbench_THREADS)) \
+ -p $SRUN_PARTITION -- $cmd
+ else
+ mpi_run -np $((num_clients * $mbench_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ fi
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
- echo "+ $cmd"
- # find out if we need to use srun by checking $SRUN_PARTITION
- if [ "$SRUN_PARTITION" ]; then
- $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
- -n $((num_clients * simul_THREADS)) -p $SRUN_PARTITION -- $cmd
- else
- mpi_run -np $((num_clients * simul_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
- fi
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * simul_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_run -np $((num_clients * simul_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ fi
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$MDTEST -d $testdir -i $mdtest_iteration -n $mdtest_nFiles"
[ $type = "fpp" ] && cmd="$cmd -u"
- echo "+ $cmd"
- # find out if we need to use srun by checking $SRUN_PARTITION
- if [ "$SRUN_PARTITION" ]; then
- $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
- -n $((num_clients * mdtest_THREADS)) -p $SRUN_PARTITION -- $cmd
- else
- mpi_run -np $((num_clients * mdtest_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
- fi
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mdtest_THREADS)) \
+ -p $SRUN_PARTITION -- $cmd
+ else
+ mpi_run -np $((num_clients * mdtest_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ fi
local rc=$?
if [ $rc != 0 ] ; then
-t $ior_xferSize -v -w -r -i $ior_iteration -T $ior_DURATION -k"
[ $type = "fpp" ] && cmd="$cmd -F"
- echo "+ $cmd"
- # find out if we need to use srun by checking $SRUN_PARTITION
- if [ "$SRUN_PARTITION" ]; then
- $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
- -n $((num_clients * ior_THREADS)) -p $SRUN_PARTITION -- $cmd
- else
- mpi_run -np $((num_clients * $ior_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
- fi
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * ior_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_run -np $((num_clients * $ior_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ fi
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$MIB -t $testdir -s $mib_xferSize -l $mib_xferLimit \
-L $mib_timeLimit -HI -p mib.$(date +%Y%m%d%H%M%S)"
- echo "+ $cmd"
- # find out if we need to use srun by checking $SRUN_PARTITION
- if [ "$SRUN_PARTITION" ]; then
- $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
- -n $((num_clients * mib_THREADS)) -p $SRUN_PARTITION -- $cmd
- else
- mpi_run -np $((num_clients * mib_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
- fi
+ echo "+ $cmd"
+ # find out if we need to use srun by checking $SRUN_PARTITION
+ if [ "$SRUN_PARTITION" ]; then
+ $SRUN $SRUN_OPTIONS -D $testdir -w $clients -N $num_clients \
+ -n $((num_clients * mib_THREADS)) -p $SRUN_PARTITION \
+ -- $cmd
+ else
+ mpi_run -np $((num_clients * mib_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
+ fi
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
- echo "+ $cmd"
- mpi_run -np $((num_clients * $casc_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
+ echo "+ $cmd"
+ mpi_run -np $((num_clients * $casc_THREADS)) ${MACHINEFILE_OPTION} \
+ ${MACHINEFILE} $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd="write_append_truncate -n $write_REP $file"
- echo "+ $cmd"
- mpi_run -np $((num_clients * $write_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
+ echo "+ $cmd"
+ mpi_run -np $((num_clients * $write_THREADS)) ${MACHINEFILE_OPTION} \
+ ${MACHINEFILE} $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
- echo "+ $cmd"
- mpi_run -np $((num_clients * $wdisjoint_THREADS)) \
- -machinefile ${MACHINEFILE} $cmd
+ echo "+ $cmd"
+ mpi_run -np $((num_clients * $wdisjoint_THREADS)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $cmd
local rc=$?
if [ $rc != 0 ] ; then
local cmd
local status=0
local subtest
- for i in $(seq 12); do
- subtest="-t $i"
- local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
- echo "+ $cmd"
-
- mpi_run -np $parallel_grouplock_MINTASKS \
- -machinefile ${MACHINEFILE} $cmd
- local rc=$?
- if [ $rc != 0 ] ; then
- error_noexit "parallel_grouplock subtests $subtest failed! $rc"
- else
- echo "parallel_grouplock subtests $subtest PASS"
- fi
- let status=$((status + rc))
- # clear debug to collect one log per one test
- do_nodes $(comma_list $(nodes_list)) lctl clear
- done
- [ $status -eq 0 ] || error "parallel_grouplock status: $status"
- rm -rf $testdir
+ for i in $(seq 12); do
+ subtest="-t $i"
+ local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
+ echo "+ $cmd"
+
+ mpi_run -np $parallel_grouplock_MINTASKS ${MACHINEFILE_OPTION} \
+ ${MACHINEFILE} $cmd
+ local rc=$?
+ if [ $rc != 0 ] ; then
+ error_noexit "parallel_grouplock subtests $subtest " \
+ "failed! $rc"
+ else
+ echo "parallel_grouplock subtests $subtest PASS"
+ fi
+ let status=$((status + rc))
+ # clear debug to collect one log per one test
+ do_nodes $(comma_list $(nodes_list)) lctl clear
+ done
+ [ $status -eq 0 ] || error "parallel_grouplock status: $status"
+ rm -rf $testdir
}
cleanup_statahead () {
local cmd="$cmd1 $cmd2"
echo "+ $cmd"
- mpi_run -np $((num_clients * 32)) -machinefile ${MACHINEFILE} $cmd
+ mpi_run -np $((num_clients * 32)) ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ $cmd
local rc=$?
if [ $rc != 0 ] ; then
ha_info "Starting $tag"
- while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
- {
- ha_on ${ha_clients[0]} mkdir -p "$dir" && \
- mpirun -np ${#ha_clients[@]} -machinefile "$ha_machine_file" \
- $cmd && \
- ha_on ${ha_clients[0]} rm -rf "$dir"
- } >>"$log" 2>&1 || rc=$?
-
- if ((rc != 0)); then
- ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
- touch "$ha_fail_file"
- touch "$ha_stop_file"
- fi
- echo $rc >"$status"
-
- nr_loops=$((nr_loops + 1))
- done
-
- avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
-
- ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
+ while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
+ {
+ ha_on ${ha_clients[0]} mkdir -p "$dir" && \
+ mpirun -np ${#ha_clients[@]} ${MACHINEFILE_OPTION} \
+ "$ha_machine_file" $cmd && \
+ ha_on ${ha_clients[0]} rm -rf "$dir"
+ } >>"$log" 2>&1 || rc=$?
+
+ if ((rc != 0)); then
+ ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
+ touch "$ha_fail_file"
+ touch "$ha_stop_file"
+ fi
+ echo $rc >"$status"
+
+ nr_loops=$((nr_loops + 1))
+ done
+
+ avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
+
+ ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
}
ha_start_mpi_loads()
local num=$increment
- while [ $num -le $CLIENTCOUNT ]; do
- list=$(comma_list ${nodes[@]:0:$num})
-
- generate_machine_file $list $machinefile ||
- { error "can not generate machinefile"; exit 1; }
-
- for i in $(seq $iters); do
- mdsrate_cleanup $num $machinefile $nfiles $dir 'f%%d' --ignore
-
- COMMAND="${MDSRATE} --create --nfiles $nfiles --dir $dir --filefmt 'f%%d'"
- mpi_run -np $((num * nthreads)) -machinefile $machinefile ${COMMAND} | tee ${LOG} &
-
- pid=$!
- echo "pid=$pid"
-
- # 2 threads 100000 creates 117 secs
- sleep 20
-
- log "$i : Starting failover on $SINGLEMDS"
- facet_failover $SINGLEMDS
- if ! wait_recovery_complete $SINGLEMDS $((TIMEOUT * 10)); then
- echo "$SINGLEMDS recovery is not completed!"
- kill -9 $pid
- exit 7
- fi
-
- duration=$(do_facet $SINGLEMDS lctl get_param -n $procfile | grep recovery_duration)
-
- res=( "${res[@]}" "$num" )
- res=( "${res[@]}" "$duration" )
- echo "RECOVERY TIME: NFILES=$nfiles number of clients: $num $duration"
- wait $pid
-
- done
- num=$((num + increment))
- done
+ while [ $num -le $CLIENTCOUNT ]; do
+ list=$(comma_list ${nodes[@]:0:$num})
+
+ generate_machine_file $list $machinefile ||
+ { error "can not generate machinefile"; exit 1; }
+
+ for i in $(seq $iters); do
+ mdsrate_cleanup $num $machinefile $nfiles $dir 'f%%d' \
+ --ignore
+
+ COMMAND="${MDSRATE} --create --nfiles $nfiles --dir
+ $dir --filefmt 'f%%d'"
+ mpi_run -np $((num * nthreads)) ${MACHINEFILE_OPTION} \
+ $machinefile ${COMMAND} | tee ${LOG} &
+
+ pid=$!
+ echo "pid=$pid"
+
+ # 2 threads 100000 creates 117 secs
+ sleep 20
+
+ log "$i : Starting failover on $SINGLEMDS"
+ facet_failover $SINGLEMDS
+ if ! wait_recovery_complete $SINGLEMDS \
+ $((TIMEOUT * 10)); then
+ echo "$SINGLEMDS recovery is not completed!"
+ kill -9 $pid
+ exit 7
+ fi
+
+ duration=$(do_facet $SINGLEMDS lctl get_param -n \
+ $procfile | grep recovery_duration)
+
+ res=( "${res[@]}" "$num" )
+ res=( "${res[@]}" "$duration" )
+ echo "RECOVERY TIME: NFILES=$nfiles number of clients: $num $duration"
+ wait $pid
+ done
+ num=$((num + increment))
+ done
mdsrate_cleanup $num $machinefile $nfiles $dir 'f%%d' --ignore
log "===== $0 ### 1 NODE CREATE ###"
- COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
- --nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
- echo "+ ${COMMAND}"
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
+ --nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
+ echo "+ ${COMMAND}"
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate creates for a single client failed, aborting"
fi
-
+
log "===== $0 ### 1 NODE UNLINK ###"
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
-
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate unlink on a single client failed, aborting"
log "===== $0 ### $NUM_CLIENTS NODES CREATE ###"
- COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
- --nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
- echo "+ ${COMMAND}"
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
+ --nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
+ echo "+ ${COMMAND}"
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
log "===== $0 ### $NUM_CLIENTS NODES UNLINK ###"
- COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
- --nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
- echo "+ ${COMMAND}"
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
+ --nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
+ echo "+ ${COMMAND}"
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
log "===== $0 ### 1 NODE CREATE ###"
- COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
- --nfiles $NUM_FILES --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
- echo "+ ${COMMAND}"
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
+ --nfiles $NUM_FILES --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
+ echo "+ ${COMMAND}"
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_SINGLE} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
log "===== $0 ### $NUM_CLIENTS NODES CREATE with $THREADS_PER_CLIENT threads per client ###"
- COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
- --nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
- echo "+ ${COMMAND}"
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) -machinefile ${MACHINEFILE} \
- ${COMMAND} | tee ${LOG}
+ COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --create --time ${TIME_PERIOD}
+ --nfiles $NUM_FILES --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
+ echo "+ ${COMMAND}"
+ mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate create on multiple nodes failed, aborting"
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --unlink
--nfiles ${NUM_FILES} --dir ${TESTDIR_MULTI} --filefmt 'f%%d'"
echo "+ ${COMMAND}"
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) -machinefile ${MACHINEFILE} \
- ${COMMAND} | tee ${LOG}
+ mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate unlinks multiple nodes failed, aborting"
--ndirs ${NUM_DIRS} --dirfmt '${DIRfmt}'
--nfiles ${NUM_FILES} --filefmt 'f%%d'"
- echo "+" ${COMMAND}
- # For files creation we can use -np equal to NUM_DIRS
- # This is just a test preparation, does not matter how many threads we use for files creation;
- # we just should be aware that NUM_DIRS is less than or equal to the number of threads np
- mpi_run -np ${NUM_DIRS} -machinefile ${MACHINEFILE} ${COMMAND} 2>&1
+ echo "+" ${COMMAND}
+ # For files creation we can use -np equal to NUM_DIRS
+ # This is just a test preparation, does not matter how many threads we
+ # use for files creation; we just should be aware that NUM_DIRS is less
+ # than or equal to the number of threads np
+ mpi_run -np ${NUM_DIRS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} 2>&1
# No lookup if error occurs on file creation, abort.
[ ${PIPESTATUS[0]} != 0 ] && error "mdsrate file creation failed, aborting"
if [ -n "$NOSINGLE" ]; then
echo "NO Test for lookups on a single client."
else
- log "===== $0 ### 1 NODE LOOKUPS ###"
- echo "+" ${COMMAND}
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ log "===== $0 ### 1 NODE LOOKUPS ###"
+ echo "+" ${COMMAND}
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
if [ -n "$NOMULTI" ]; then
echo "NO test for lookups on multiple nodes."
else
- log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
- echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
+ echo "+" ${COMMAND}
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
fi
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --mknod --dir ${TESTDIR}
--nfiles ${NUM_FILES} --filefmt 'f%%d'"
- echo "+" ${COMMAND}
- mpi_run -np ${NUM_THREADS} -machinefile ${MACHINEFILE} ${COMMAND} 2>&1
+ echo "+" ${COMMAND}
+ mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} 2>&1
# No lockup if error occurs on file creation, abort.
[ ${PIPESTATUS[0]} != 0 ] && error "mdsrate file creation failed, aborting"
if [ -n "$NOSINGLE" ]; then
echo "NO Test for lookups on a single client."
else
- log "===== $0 ### 1 NODE LOOKUPS ###"
- echo "+" ${COMMAND}
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ log "===== $0 ### 1 NODE LOOKUPS ###"
+ echo "+" ${COMMAND}
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
if [ -n "$NOMULTI" ]; then
echo "NO test for lookups on multiple nodes."
else
- log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
- echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ log "===== $0 ### ${NUM_CLIENTS} NODES LOOKUPS ###"
+ echo "+" ${COMMAND}
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
NUM_THREADS=$NUM_CLIENTS
fi
- mpi_run -np ${NUM_THREADS} -machinefile ${MACHINEFILE} ${COMMAND} 2>&1
- [ ${PIPESTATUS[0]} != 0 ] && error "mdsrate file creation failed, aborting"
-
+ mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} 2>&1
+ [ ${PIPESTATUS[0]} != 0 ] &&
+ error "mdsrate file creation failed, aborting"
fi
COMMAND="${MDSRATE} ${MDSRATE_DEBUG} --stat --time ${TIME_PERIOD}
log "===== $0 ### 1 NODE STAT ###"
echo "+" ${COMMAND}
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
log "===== $0 ### ${NUM_CLIENTS} NODES STAT ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
NUM_THREADS=$NUM_CLIENTS
fi
- mpi_run -np ${NUM_THREADS} -machinefile ${MACHINEFILE} ${COMMAND} 2>&1
- [ ${PIPESTATUS[0]} != 0 ] && error "mdsrate file creation failed, aborting"
+ mpi_run -np ${NUM_THREADS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} 2>&1
+ [ ${PIPESTATUS[0]} != 0 ] &&
+ error "mdsrate file creation failed, aborting"
fi
log "===== $0 ### 1 NODE STAT ###"
echo "+" ${COMMAND}
- mpi_run -np 1 -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
-
+ mpi_run -np 1 ${MACHINEFILE_OPTION} ${MACHINEFILE} ${COMMAND} |
+ tee ${LOG}
+
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
error "mdsrate on a single client failed, aborting"
log "===== $0 ### ${NUM_CLIENTS} NODES STAT ###"
echo "+" ${COMMAND}
- mpi_run -np ${NUM_CLIENTS} -machinefile ${MACHINEFILE} ${COMMAND} | tee ${LOG}
+ mpi_run -np ${NUM_CLIENTS} ${MACHINEFILE_OPTION} ${MACHINEFILE} \
+ ${COMMAND} | tee ${LOG}
if [ ${PIPESTATUS[0]} != 0 ]; then
[ -f $LOG ] && sed -e "s/^/log: /" $LOG
# "write_disjoint" test
echo "Part 5. write_disjoint test: see lustre/tests/mpi/write_disjoint.c for details"
if [ -f "$WRITE_DISJOINT" ]; then
- set $TRACE
- MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
- generate_machine_file $NODES_TO_USE $MACHINEFILE
- mpi_run -np $(get_node_count ${NODES_TO_USE//,/ }) -machinefile $MACHINEFILE \
- $WRITE_DISJOINT -f $WRITE_DISJOINT_FILE -n $NUMLOOPS || STATUS=1
+ set $TRACE
+ MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
+ generate_machine_file $NODES_TO_USE $MACHINEFILE
+ mpi_run -np $(get_node_count ${NODES_TO_USE//,/ }) \
+ ${MACHINEFILE_OPTION} $MACHINEFILE $WRITE_DISJOINT \
+ -f $WRITE_DISJOINT_FILE -n $NUMLOOPS || STATUS=1
else
skip_env "$0 : write_disjoint not found "
fi
# need this only if TESTDIR is not default
chmod -R 777 $TESTDIR
- mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) -machinefile ${MACHINEFILE} \
- $IOR -a POSIX -b 1g -o $TESTDIR/IOR-file -s 1 -t 1m -v -w -r 1>$LOG &
+ mpi_run -np $((NUM_CLIENTS * THREADS_PER_CLIENT)) \
+ ${MACHINEFILE_OPTION} ${MACHINEFILE} $IOR -a POSIX -b 1g \
+ -o $TESTDIR/IOR-file -s 1 -t 1m -v -w -r 1>$LOG &
load_pid=$!
wait $load_pid
if [ ${PIPESTATUS[0]} -eq 0 ]; then
}
mdsrate_cleanup () {
- if [ -d $4 ]; then
- mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6
- rmdir $4
- fi
+ if [ -d $4 ]; then
+ mpi_run -np $1 ${MACHINEFILE_OPTION} $2 ${MDSRATE} --unlink \
+ --nfiles $3 --dir $4 --filefmt $5 $6
+ rmdir $4
+ fi
}
delayed_recovery_enabled () {