5 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
6 . $LUSTRE/tests/test-framework.sh
8 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
13 cbench_DIR=${cbench_DIR:-""}
14 cbench_IDIRS=${cbench_IDIRS:-4}
15 cbench_RUNS=${cbench_RUNS:-4} # FIXME: wiki page requirements is 30, do we really need 30 ?
17 if [ "$SLOW" = "no" ]; then
25 METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
26 mbench_NFILES=${mbench_NFILES:-30400}
27 [ "$SLOW" = "no" ] && mbench_NFILES=10000
28 MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
30 mbench_THREADS=${mbench_THREADS:-4}
35 SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
37 simul_THREADS=${simul_THREADS:-2}
38 simul_REP=${simul_REP:-20}
39 [ "$SLOW" = "no" ] && simul_REP=2
44 cnt_DIR=${cnt_DIR:-""}
45 cnt_NRUN=${cnt_NRUN:-10}
46 [ "$SLOW" = "no" ] && cnt_NRUN=2
51 CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
53 casc_THREADS=${casc_THREADS:-2}
54 casc_REP=${casc_REP:-300}
55 [ "$SLOW" = "no" ] && casc_REP=10
60 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
62 ior_THREADS=${ior_THREADS:-2}
63 ior_blockSize=${ior_blockSize:-6} # Gb
64 ior_DURATION=${ior_DURATION:-30} # minutes
65 [ "$SLOW" = "no" ] && ior_DURATION=5
68 # write_append_truncate
71 write_THREADS=${write_THREADS:-8}
72 write_REP=${write_REP:-10000}
73 [ "$SLOW" = "no" ] && write_REP=100
78 WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null || true)}
80 wdisjoint_THREADS=${wdisjoint_THREADS:-4}
81 wdisjoint_REP=${wdisjoint_REP:-10000}
82 [ "$SLOW" = "no" ] && wdisjoint_REP=100
85 check_and_setup_lustre
87 get_mpiuser_id $MPI_USER
88 MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
89 $GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
100 [ -e $MACHINEFILE ] && cat $MACHINEFILE
104 # 5 min * cbench_RUNS
108 # compile dir kernel-1 680MB
109 # required space 680MB * cbench_IDIRS = ~7 Gb
111 test_compilebench() {
112 print_opts cbench_DIR cbench_IDIRS cbench_RUNS
114 [ x$cbench_DIR = x ] &&
115 { skip "compilebench not found" && return; }
117 [ -e $cbench_DIR/compilebench ] || \
118 { skip "No compilebench build" && return; }
120 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
121 if [ $space -le $((680 * 1024 * cbench_IDIRS)) ]; then
122 cbench_IDIRS=$(( space / 680 / 1024))
123 [ $cbench_IDIRS = 0 ] && \
124 skip "Need free space atleast 680 Mb, have $space" && return
126 log free space=$space, reducing initial dirs to $cbench_IDIRS
129 # t-f _base needs to be modifyed to set properly tdir
130 # for new "test_foo" functions names
131 # local testdir=$DIR/$tdir
132 local testdir=$DIR/d0.compilebench
137 local cmd="./compilebench -D $testdir -i $cbench_IDIRS -r $cbench_RUNS --makej"
146 [ $rc = 0 ] || error "compilebench failed: $rc"
149 run_test compilebench "compilebench"
152 [ x$METABENCH = x ] &&
153 { skip "metabench not found" && return; }
155 local clients=$CLIENTS
156 [ -z $clients ] && clients=$(hostname)
158 num_clients=$(get_node_count ${clients//,/ })
161 # Need space estimation here.
163 generate_machine_file $clients $MACHINEFILE || \
164 error "can not generate machinefile $MACHINEFILE"
166 print_opts METABENCH clients mbench_NFILES mbench_THREADS
168 local testdir=$DIR/d0.metabench
170 # mpi_run uses mpiuser
173 # -C Run the file creation tests.
174 # -S Run the file stat tests.
175 # -c nfile Number of files to be used in each test.
176 # -k Cleanup. Remove the test directories.
177 local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S -k"
179 mpi_run -np $((num_clients * $mbench_THREADS)) -machinefile ${MACHINEFILE} $cmd
181 if [ $rc != 0 ] ; then
182 error "metabench failed! $rc"
186 run_test metabench "metabench"
190 { skip "simul not found" && return; }
192 local clients=$CLIENTS
193 [ -z $clients ] && clients=$(hostname)
195 local num_clients=$(get_node_count ${clients//,/ })
198 # Need space estimation here.
200 generate_machine_file $clients $MACHINEFILE || \
201 error "can not generate machinefile $MACHINEFILE"
203 print_opts SIMUL clients simul_REP simul_THREADS
205 local testdir=$DIR/d0.simul
207 # mpi_run uses mpiuser
210 # -n # : repeat each test # times
211 # -N # : repeat the entire set of tests # times
213 local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
216 mpi_run -np $((num_clients * $simul_THREADS)) -machinefile ${MACHINEFILE} $cmd
219 if [ $rc != 0 ] ; then
220 error "simul failed! $rc"
224 run_test simul "simul"
226 test_connectathon() {
227 print_opts cnt_DIR cnt_NRUN
230 { skip "connectathon dir not found" && return; }
232 [ -e $cnt_DIR/runtests ] || \
233 { skip "No connectathon runtests found" && return; }
235 local testdir=$DIR/d0.connectathon
241 # -f a quick functionality test
242 # -a run basic, general, special, and lock tests
243 # -N numpasses - will be passed to the runtests script. This argument
244 # is optional. It specifies the number of times to run
247 local cmd="./runtests -N $cnt_NRUN -a -f $testdir"
256 [ $rc = 0 ] || error "connectathon failed: $rc"
259 run_test connectathon "connectathon"
263 { skip "IOR not found" && return; }
265 local clients=$CLIENTS
266 [ -z $clients ] && clients=$(hostname)
268 local num_clients=$(get_node_count ${clients//,/ })
270 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
271 echo "+ $ior_blockSize * 1024 * 1024 * $num_clients * $ior_THREADS "
272 if [ $((space / 2)) -le $(( ior_blockSize * 1024 * 1024 * num_clients * ior_THREADS)) ]; then
273 echo "+ $space * 9/10 / 1024 / 1024 / $num_clients / $ior_THREADS"
274 ior_blockSize=$(( space /2 /1024 /1024 / num_clients / ior_THREADS ))
275 [ $ior_blockSize = 0 ] && \
276 skip "Need free space more than ($num_clients * $ior_THREADS )Gb: $((num_clients*ior_THREADS *1024 *1024*2)), have $space" && return
278 echo "free space=$space, Need: $num_clients x $ior_THREADS x $ior_blockSize Gb (blockSize reduced to $ior_blockSize Gb)"
281 generate_machine_file $clients $MACHINEFILE || \
282 error "can not generate machinefile $MACHINEFILE"
284 print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
286 local testdir=$DIR/d0.ior
288 # mpi_run uses mpiuser
292 # -b N blockSize -- contiguous bytes to write per task (e.g.: 8, 4k, 2m, 1g)"
294 # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)"
295 # -w writeFile -- write file"
296 # -r readFile -- read existing file"
297 # -T maxTimeDuration -- max time in minutes to run tests"
298 # -k keepFile -- keep testFile(s) on program exit
299 local cmd="$IOR -a POSIX -b ${ior_blockSize}g -o $testdir/iorData -t 2m -v -w -r -T $ior_DURATION -k"
302 mpi_run -np $((num_clients * $ior_THREADS)) -machinefile ${MACHINEFILE} $cmd
305 if [ $rc != 0 ] ; then
306 error "ior failed! $rc"
312 test_cascading_rw() {
314 { skip "cascading_rw not found" && return; }
316 local clients=$CLIENTS
317 [ -z $clients ] && clients=$(hostname)
319 num_clients=$(get_node_count ${clients//,/ })
322 # Need space estimation here.
324 generate_machine_file $clients $MACHINEFILE || \
325 error "can not generate machinefile $MACHINEFILE"
327 print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
329 local testdir=$DIR/d0.cascading_rw
331 # mpi_run uses mpiuser
335 # -n: repeat test # times
337 local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
340 mpi_run -np $((num_clients * $casc_THREADS)) -machinefile ${MACHINEFILE} $cmd
343 if [ $rc != 0 ] ; then
344 error "cascading_rw failed! $rc"
348 run_test cascading_rw "cascading_rw"
350 test_write_append_truncate() {
351 # location is lustre/tests dir
352 if ! which write_append_truncate > /dev/null 2>&1 ; then
353 skip "write_append_truncate not found"
357 local clients=$CLIENTS
358 [ -z $clients ] && clients=$(hostname)
360 local num_clients=$(get_node_count ${clients//,/ })
363 # Need space estimation here.
365 generate_machine_file $clients $MACHINEFILE || \
366 error "can not generate machinefile $MACHINEFILE"
368 local testdir=$DIR/d0.write_append_truncate
369 local file=$testdir/f0.wat
371 print_opts clients write_REP write_THREADS MACHINEFILE
374 # mpi_run uses mpiuser
377 local cmd="write_append_truncate -n $write_REP $file"
380 mpi_run -np $((num_clients * $write_THREADS)) -machinefile ${MACHINEFILE} $cmd
383 if [ $rc != 0 ] ; then
384 error "write_append_truncate failed! $rc"
389 run_test write_append_truncate "write_append_truncate"
391 test_write_disjoint() {
392 [ x$WRITE_DISJOINT = x ] &&
393 { skip "write_disjoint not found" && return; }
395 local clients=$CLIENTS
396 [ -z $clients ] && clients=$(hostname)
398 local num_clients=$(get_node_count ${clients//,/ })
401 # Need space estimation here.
403 generate_machine_file $clients $MACHINEFILE || \
404 error "can not generate machinefile $MACHINEFILE"
406 print_opts WRITE_DISJOINT clients wdisjoint_THREADS wdisjoint_REP MACHINEFILE
407 local testdir=$DIR/d0.write_disjoint
409 # mpi_run uses mpiuser
412 local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
415 mpi_run -np $((num_clients * $wdisjoint_THREADS)) -machinefile ${MACHINEFILE} $cmd
418 if [ $rc != 0 ] ; then
419 error "write_disjoint failed! $rc"
423 run_test write_disjoint "write_disjoint"
425 equals_msg `basename $0`: test complete, cleaning up
426 check_and_cleanup_lustre
427 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true