5 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
6 . $LUSTRE/tests/test-framework.sh
8 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
13 cbench_DIR=${cbench_DIR:-""}
14 cbench_IDIRS=${cbench_IDIRS:-10}
15 cbench_RUNS=${cbench_RUNS:-10} # FIXME: wiki page requirements is 30, do we really need 30 ?
17 if [ "$SLOW" = "no" ]; then
25 METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
26 mbench_NFILES=${mbench_NFILES:-30400}
27 [ "$SLOW" = "no" ] && mbench_NFILES=10000
28 MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
30 mbench_THREADS=${mbench_THREADS:-4}
35 SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
37 simul_THREADS=${simul_THREADS:-2}
38 simul_REP=${simul_REP:-20}
39 [ "$SLOW" = "no" ] && simul_REP=2
44 cnt_DIR=${cnt_DIR:-""}
45 cnt_NRUN=${cnt_NRUN:-10}
46 [ "$SLOW" = "no" ] && cnt_NRUN=2
51 CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
53 casc_THREADS=${casc_THREADS:-2}
54 casc_REP=${casc_REP:-300}
55 [ "$SLOW" = "no" ] && casc_REP=10
60 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
62 ior_THREADS=${ior_THREADS:-2}
63 ior_blockSize=${ior_blockSize:-6} # Gb
64 ior_DURATION=${ior_DURATION:-30} # minutes
65 [ "$SLOW" = "no" ] && ior_DURATION=5
68 # write_append_truncate
71 write_THREADS=${write_THREADS:-8}
72 write_REP=${write_REP:-10000}
73 [ "$SLOW" = "no" ] && write_REP=100
78 WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null || true)}
80 wdisjoint_THREADS=${wdisjoint_THREADS:-4}
81 wdisjoint_REP=${wdisjoint_REP:-10000}
82 [ "$SLOW" = "no" ] && wdisjoint_REP=100
88 PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock 2> /dev/null || true)}
89 parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
92 check_and_setup_lustre
101 echo "${var}=${!var}"
103 [ -e $MACHINEFILE ] && cat $MACHINEFILE
107 # 5 min * cbench_RUNS
111 # compile dir kernel-1 680MB
112 # required space 680MB * cbench_IDIRS = ~7 Gb
114 test_compilebench() {
115 print_opts cbench_DIR cbench_IDIRS cbench_RUNS
117 [ x$cbench_DIR = x ] &&
118 { skip_env "compilebench not found" && return; }
120 [ -e $cbench_DIR/compilebench ] || \
121 { skip_env "No compilebench build" && return; }
123 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
124 if [ $space -le $((680 * 1024 * cbench_IDIRS)) ]; then
125 cbench_IDIRS=$(( space / 680 / 1024))
126 [ $cbench_IDIRS = 0 ] && \
127 skip_env "Need free space atleast 680 Mb, have $space" && return
129 log free space=$space, reducing initial dirs to $cbench_IDIRS
132 # t-f _base needs to be modifyed to set properly tdir
133 # for new "test_foo" functions names
134 # local testdir=$DIR/$tdir
135 local testdir=$DIR/d0.compilebench
140 local cmd="./compilebench -D $testdir -i $cbench_IDIRS -r $cbench_RUNS --makej"
149 [ $rc = 0 ] || error "compilebench failed: $rc"
152 run_test compilebench "compilebench"
155 [ x$METABENCH = x ] &&
156 { skip_env "metabench not found" && return; }
158 local clients=$CLIENTS
159 [ -z $clients ] && clients=$(hostname)
161 num_clients=$(get_node_count ${clients//,/ })
164 # Need space estimation here.
166 generate_machine_file $clients $MACHINEFILE || \
167 error "can not generate machinefile $MACHINEFILE"
169 print_opts METABENCH clients mbench_NFILES mbench_THREADS
171 local testdir=$DIR/d0.metabench
173 # mpi_run uses mpiuser
176 # -C Run the file creation tests.
177 # -S Run the file stat tests.
178 # -c nfile Number of files to be used in each test.
179 # -k Cleanup. Remove the test directories.
180 local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S -k"
182 mpi_run -np $((num_clients * $mbench_THREADS)) -machinefile ${MACHINEFILE} $cmd
184 if [ $rc != 0 ] ; then
185 error "metabench failed! $rc"
189 run_test metabench "metabench"
193 { skip_env "simul not found" && return; }
195 local clients=$CLIENTS
196 [ -z $clients ] && clients=$(hostname)
198 local num_clients=$(get_node_count ${clients//,/ })
201 # Need space estimation here.
203 generate_machine_file $clients $MACHINEFILE || \
204 error "can not generate machinefile $MACHINEFILE"
206 print_opts SIMUL clients simul_REP simul_THREADS
208 local testdir=$DIR/d0.simul
210 # mpi_run uses mpiuser
213 # -n # : repeat each test # times
214 # -N # : repeat the entire set of tests # times
216 local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
219 mpi_run -np $((num_clients * $simul_THREADS)) -machinefile ${MACHINEFILE} $cmd
222 if [ $rc != 0 ] ; then
223 error "simul failed! $rc"
227 run_test simul "simul"
229 test_connectathon() {
230 print_opts cnt_DIR cnt_NRUN
233 { skip_env "connectathon dir not found" && return; }
235 [ -e $cnt_DIR/runtests ] || \
236 { skip_env "No connectathon runtests found" && return; }
238 local testdir=$DIR/d0.connectathon
244 # -f a quick functionality test
245 # -a run basic, general, special, and lock tests
246 # -N numpasses - will be passed to the runtests script. This argument
247 # is optional. It specifies the number of times to run
250 local cmd="./runtests -N $cnt_NRUN -a -f $testdir"
259 [ $rc = 0 ] || error "connectathon failed: $rc"
262 run_test connectathon "connectathon"
266 { skip_env "IOR not found" && return; }
268 local clients=$CLIENTS
269 [ -z $clients ] && clients=$(hostname)
271 local num_clients=$(get_node_count ${clients//,/ })
273 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
274 echo "+ $ior_blockSize * 1024 * 1024 * $num_clients * $ior_THREADS "
275 if [ $((space / 2)) -le $(( ior_blockSize * 1024 * 1024 * num_clients * ior_THREADS)) ]; then
276 echo "+ $space * 9/10 / 1024 / 1024 / $num_clients / $ior_THREADS"
277 ior_blockSize=$(( space /2 /1024 /1024 / num_clients / ior_THREADS ))
278 [ $ior_blockSize = 0 ] && \
279 skip_env "Need free space more than ($num_clients * $ior_THREADS )Gb: $((num_clients*ior_THREADS *1024 *1024*2)), have $space" && return
281 echo "free space=$space, Need: $num_clients x $ior_THREADS x $ior_blockSize Gb (blockSize reduced to $ior_blockSize Gb)"
284 generate_machine_file $clients $MACHINEFILE || \
285 error "can not generate machinefile $MACHINEFILE"
287 print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
289 local testdir=$DIR/d0.ior
291 # mpi_run uses mpiuser
293 $LFS setstripe $testdir -c -1
296 # -b N blockSize -- contiguous bytes to write per task (e.g.: 8, 4k, 2m, 1g)"
298 # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)"
299 # -w writeFile -- write file"
300 # -r readFile -- read existing file"
301 # -T maxTimeDuration -- max time in minutes to run tests"
302 # -k keepFile -- keep testFile(s) on program exit
303 local cmd="$IOR -a POSIX -b ${ior_blockSize}g -o $testdir/iorData -t 2m -v -w -r -T $ior_DURATION -k"
306 mpi_run -np $((num_clients * $ior_THREADS)) -machinefile ${MACHINEFILE} $cmd
309 if [ $rc != 0 ] ; then
310 error "ior failed! $rc"
316 test_cascading_rw() {
317 if [ "$NFSCLIENT" ]; then
318 skip "skipped for NFSCLIENT mode"
322 { skip_env "cascading_rw not found" && return; }
324 local clients=$CLIENTS
325 [ -z $clients ] && clients=$(hostname)
327 num_clients=$(get_node_count ${clients//,/ })
330 # Need space estimation here.
332 generate_machine_file $clients $MACHINEFILE || \
333 error "can not generate machinefile $MACHINEFILE"
335 print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
337 local testdir=$DIR/d0.cascading_rw
339 # mpi_run uses mpiuser
343 # -n: repeat test # times
345 local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
348 mpi_run -np $((num_clients * $casc_THREADS)) -machinefile ${MACHINEFILE} $cmd
351 if [ $rc != 0 ] ; then
352 error "cascading_rw failed! $rc"
356 run_test cascading_rw "cascading_rw"
358 test_write_append_truncate() {
359 # location is lustre/tests dir
360 if ! which write_append_truncate > /dev/null 2>&1 ; then
361 skip_env "write_append_truncate not found"
365 local clients=$CLIENTS
366 [ -z $clients ] && clients=$(hostname)
368 local num_clients=$(get_node_count ${clients//,/ })
371 # Need space estimation here.
373 generate_machine_file $clients $MACHINEFILE || \
374 error "can not generate machinefile $MACHINEFILE"
376 local testdir=$DIR/d0.write_append_truncate
377 local file=$testdir/f0.wat
379 print_opts clients write_REP write_THREADS MACHINEFILE
382 # mpi_run uses mpiuser
385 local cmd="write_append_truncate -n $write_REP $file"
388 mpi_run -np $((num_clients * $write_THREADS)) -machinefile ${MACHINEFILE} $cmd
391 if [ $rc != 0 ] ; then
392 error "write_append_truncate failed! $rc"
397 run_test write_append_truncate "write_append_truncate"
399 test_write_disjoint() {
400 [ x$WRITE_DISJOINT = x ] &&
401 { skip_env "write_disjoint not found" && return; }
403 local clients=$CLIENTS
404 [ -z $clients ] && clients=$(hostname)
406 local num_clients=$(get_node_count ${clients//,/ })
409 # Need space estimation here.
411 generate_machine_file $clients $MACHINEFILE || \
412 error "can not generate machinefile $MACHINEFILE"
414 print_opts WRITE_DISJOINT clients wdisjoint_THREADS wdisjoint_REP MACHINEFILE
415 local testdir=$DIR/d0.write_disjoint
417 # mpi_run uses mpiuser
420 local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
423 mpi_run -np $((num_clients * $wdisjoint_THREADS)) -machinefile ${MACHINEFILE} $cmd
426 if [ $rc != 0 ] ; then
427 error "write_disjoint failed! $rc"
431 run_test write_disjoint "write_disjoint"
433 test_parallel_grouplock() {
434 [ x$PARALLEL_GROUPLOCK = x ] &&
435 { skip "PARALLEL_GROUPLOCK not found" && return; }
437 local clients=$CLIENTS
438 [ -z $clients ] && clients=$(hostname)
440 local num_clients=$(get_node_count ${clients//,/ })
442 generate_machine_file $clients $MACHINEFILE || \
443 error "can not generate machinefile $MACHINEFILE"
445 print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
447 local testdir=$DIR/d0.parallel_grouplock
449 # mpi_run uses mpiuser
452 do_nodes $clients "lctl set_param llite.*.max_rw_chunk=0" ||
453 error "set_param max_rw_chunk=0 failed "
458 for i in $(seq 12); do
460 local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
463 mpi_run -np $parallel_grouplock_MINTASKS -machinefile ${MACHINEFILE} $cmd
465 if [ $rc != 0 ] ; then
466 error_noexit "parallel_grouplock subtests $subtest failed! $rc"
468 echo "parallel_grouplock subtests $subtest PASS"
470 let status=$((status + rc))
471 # clear debug to collect one log per one test
472 do_nodes $(comma_list $(nodes_list)) lctl clear
474 [ $status -eq 0 ] || error "parallel_grouplock status: $status"
477 run_test parallel_grouplock "parallel_grouplock"
479 statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
480 statahead_NUMFILES=${statahead_NUMFILES:-500000}
482 cleanup_statahead () {
489 for i in $(seq 0 $num_mntpts);do
490 zconf_umount_clients $clients ${mntpt_root}$i ||
491 error_exit "Failed to umount lustre on ${mntpt_root}$i"
499 local dir=d0.statahead
500 # FIXME has to use DIR
501 local testdir=$DIR/$dir
505 local num_files=$statahead_NUMFILES
507 local IFree=$(inodes_available)
508 if [ $IFree -lt $num_files ]; then
514 log "createmany -o $testdir/f-%d $num_files"
515 createmany -o $testdir/$f-%d $num_files
518 if [ $rc != 0 ] ; then
519 error "createmany failed to create $rc"
523 local num_mntpts=$statahead_NUMMNTPTS
524 local mntpt_root=$TMP/mntpt/lustre
525 mntopts=${MNTOPTSTATAHEAD:-$MOUNTOPT}
527 local clients=$CLIENTS
528 [ -z $clients ] && clients=$(hostname)
530 echo "Mounting $num_mntpts lustre clients starts on $clients"
531 trap "cleanup_statahead $clients $mntpt_root $num_mntpts" EXIT ERR
532 for i in $(seq 0 $num_mntpts);do
533 zconf_mount_clients $clients ${mntpt_root}$i $mntopts ||
534 error_exit "Failed to mount lustre on ${mntpt_root}$i on $clients"
537 do_rpc_nodes $clients cancel_lru_locks mdc
539 do_rpc_nodes $clients do_ls $mntpt_root $num_mntpts $dir
541 cleanup_statahead $clients $mntpt_root $num_mntpts
544 run_test statahead "statahead test, multiple clients"
546 equals_msg `basename $0`: test complete, cleaning up
547 check_and_cleanup_lustre
548 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true