5 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
6 . $LUSTRE/tests/test-framework.sh
8 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
12 ALWAYS_EXCEPT="parallel_grouplock $PARALLEL_SCALE_EXCEPT"
17 cbench_DIR=${cbench_DIR:-""}
18 cbench_IDIRS=${cbench_IDIRS:-4}
19 cbench_RUNS=${cbench_RUNS:-4} # FIXME: wiki page requirements is 30, do we really need 30 ?
21 if [ "$SLOW" = "no" ]; then
29 METABENCH=${METABENCH:-$(which metabench 2> /dev/null || true)}
30 mbench_NFILES=${mbench_NFILES:-30400}
31 [ "$SLOW" = "no" ] && mbench_NFILES=10000
32 MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
34 mbench_THREADS=${mbench_THREADS:-4}
39 SIMUL=${SIMUL:=$(which simul 2> /dev/null || true)}
41 simul_THREADS=${simul_THREADS:-2}
42 simul_REP=${simul_REP:-20}
43 [ "$SLOW" = "no" ] && simul_REP=2
48 cnt_DIR=${cnt_DIR:-""}
49 cnt_NRUN=${cnt_NRUN:-10}
50 [ "$SLOW" = "no" ] && cnt_NRUN=2
55 CASC_RW=${CASC_RW:-$(which cascading_rw 2> /dev/null || true)}
57 casc_THREADS=${casc_THREADS:-2}
58 casc_REP=${casc_REP:-300}
59 [ "$SLOW" = "no" ] && casc_REP=10
64 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
66 ior_THREADS=${ior_THREADS:-2}
67 ior_blockSize=${ior_blockSize:-6} # Gb
68 ior_DURATION=${ior_DURATION:-30} # minutes
69 [ "$SLOW" = "no" ] && ior_DURATION=5
72 # write_append_truncate
75 write_THREADS=${write_THREADS:-8}
76 write_REP=${write_REP:-10000}
77 [ "$SLOW" = "no" ] && write_REP=100
82 WRITE_DISJOINT=${WRITE_DISJOINT:-$(which write_disjoint 2> /dev/null || true)}
84 wdisjoint_THREADS=${wdisjoint_THREADS:-4}
85 wdisjoint_REP=${wdisjoint_REP:-10000}
86 [ "$SLOW" = "no" ] && wdisjoint_REP=100
92 PARALLEL_GROUPLOCK=${PARALLEL_GROUPLOCK:-$(which parallel_grouplock 2> /dev/null || true)}
93 parallel_grouplock_MINTASKS=${parallel_grouplock_MINTASKS:-5}
96 check_and_setup_lustre
98 get_mpiuser_id $MPI_USER
99 MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
100 $GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
109 echo "${var}=${!var}"
111 [ -e $MACHINEFILE ] && cat $MACHINEFILE
115 # 5 min * cbench_RUNS
119 # compile dir kernel-1 680MB
120 # required space 680MB * cbench_IDIRS = ~7 Gb
122 test_compilebench() {
123 print_opts cbench_DIR cbench_IDIRS cbench_RUNS
125 [ x$cbench_DIR = x ] &&
126 { skip_env "compilebench not found" && return; }
128 [ -e $cbench_DIR/compilebench ] || \
129 { skip_env "No compilebench build" && return; }
131 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
132 if [ $space -le $((680 * 1024 * cbench_IDIRS)) ]; then
133 cbench_IDIRS=$(( space / 680 / 1024))
134 [ $cbench_IDIRS = 0 ] && \
135 skip_env "Need free space atleast 680 Mb, have $space" && return
137 log free space=$space, reducing initial dirs to $cbench_IDIRS
140 # t-f _base needs to be modifyed to set properly tdir
141 # for new "test_foo" functions names
142 # local testdir=$DIR/$tdir
143 local testdir=$DIR/d0.compilebench
148 local cmd="./compilebench -D $testdir -i $cbench_IDIRS -r $cbench_RUNS --makej"
157 [ $rc = 0 ] || error "compilebench failed: $rc"
160 run_test compilebench "compilebench"
163 [ x$METABENCH = x ] &&
164 { skip_env "metabench not found" && return; }
166 local clients=$CLIENTS
167 [ -z $clients ] && clients=$(hostname)
169 num_clients=$(get_node_count ${clients//,/ })
172 # Need space estimation here.
174 generate_machine_file $clients $MACHINEFILE || return $?
176 print_opts METABENCH clients mbench_NFILES mbench_THREADS
178 local testdir=$DIR/d0.metabench
180 # mpi_run uses mpiuser
183 # -C Run the file creation tests.
184 # -S Run the file stat tests.
185 # -c nfile Number of files to be used in each test.
186 # -k Cleanup. Remove the test directories.
187 local cmd="$METABENCH -w $testdir -c $mbench_NFILES -C -S -k"
189 mpi_run -np $((num_clients * $mbench_THREADS)) -machinefile ${MACHINEFILE} $cmd
191 if [ $rc != 0 ] ; then
192 error "metabench failed! $rc"
196 run_test metabench "metabench"
199 if [ "$NFSCLIENT" ]; then
200 skip "skipped for NFSCLIENT mode"
205 { skip_env "simul not found" && return; }
207 local clients=$CLIENTS
208 [ -z $clients ] && clients=$(hostname)
210 local num_clients=$(get_node_count ${clients//,/ })
213 # Need space estimation here.
215 generate_machine_file $clients $MACHINEFILE || return $?
217 print_opts SIMUL clients simul_REP simul_THREADS
219 local testdir=$DIR/d0.simul
221 # mpi_run uses mpiuser
224 # -n # : repeat each test # times
225 # -N # : repeat the entire set of tests # times
227 local cmd="$SIMUL -d $testdir -n $simul_REP -N $simul_REP"
230 mpi_run -np $((num_clients * $simul_THREADS)) -machinefile ${MACHINEFILE} $cmd
233 if [ $rc != 0 ] ; then
234 error "simul failed! $rc"
238 run_test simul "simul"
240 test_connectathon() {
241 print_opts cnt_DIR cnt_NRUN
244 { skip_env "connectathon dir not found" && return; }
246 [ -e $cnt_DIR/runtests ] || \
247 { skip_env "No connectathon runtests found" && return; }
249 local testdir=$DIR/d0.connectathon
255 # -f a quick functionality test
256 # -a run basic, general, special, and lock tests
257 # -N numpasses - will be passed to the runtests script. This argument
258 # is optional. It specifies the number of times to run
261 local cmd="./runtests -N $cnt_NRUN -a -f $testdir"
270 [ $rc = 0 ] || error "connectathon failed: $rc"
273 run_test connectathon "connectathon"
277 { skip_env "IOR not found" && return; }
279 local clients=$CLIENTS
280 [ -z $clients ] && clients=$(hostname)
282 local num_clients=$(get_node_count ${clients//,/ })
284 local space=$(df -P $DIR | tail -n 1 | awk '{ print $4 }')
285 echo "+ $ior_blockSize * 1024 * 1024 * $num_clients * $ior_THREADS "
286 if [ $((space / 2)) -le $(( ior_blockSize * 1024 * 1024 * num_clients * ior_THREADS)) ]; then
287 echo "+ $space * 9/10 / 1024 / 1024 / $num_clients / $ior_THREADS"
288 ior_blockSize=$(( space /2 /1024 /1024 / num_clients / ior_THREADS ))
289 [ $ior_blockSize = 0 ] && \
290 skip_env "Need free space more than ($num_clients * $ior_THREADS )Gb: $((num_clients*ior_THREADS *1024 *1024*2)), have $space" && return
292 echo "free space=$space, Need: $num_clients x $ior_THREADS x $ior_blockSize Gb (blockSize reduced to $ior_blockSize Gb)"
295 generate_machine_file $clients $MACHINEFILE || return $?
297 print_opts IOR ior_THREADS ior_DURATION MACHINEFILE
299 local testdir=$DIR/d0.ior
301 # mpi_run uses mpiuser
303 if [ "$NFSCLIENT" ]; then
304 setstripe_nfsserver $testdir -c -1 ||
305 { error "setstripe on nfsserver failed" && return 1; }
307 $LFS setstripe $testdir -c -1 ||
308 { error "setstripe failed" && return 2; }
311 # -b N blockSize -- contiguous bytes to write per task (e.g.: 8, 4k, 2m, 1g)"
313 # -t N transferSize -- size of transfer in bytes (e.g.: 8, 4k, 2m, 1g)"
314 # -w writeFile -- write file"
315 # -r readFile -- read existing file"
316 # -T maxTimeDuration -- max time in minutes to run tests"
317 # -k keepFile -- keep testFile(s) on program exit
318 local cmd="$IOR -a POSIX -b ${ior_blockSize}g -o $testdir/iorData -t 2m -v -w -r -T $ior_DURATION -k"
321 mpi_run -np $((num_clients * $ior_THREADS)) -machinefile ${MACHINEFILE} $cmd
324 if [ $rc != 0 ] ; then
325 error "ior failed! $rc"
331 test_cascading_rw() {
332 if [ "$NFSCLIENT" ]; then
333 skip "skipped for NFSCLIENT mode"
338 { skip_env "cascading_rw not found" && return; }
340 local clients=$CLIENTS
341 [ -z $clients ] && clients=$(hostname)
343 num_clients=$(get_node_count ${clients//,/ })
346 # Need space estimation here.
348 generate_machine_file $clients $MACHINEFILE || return $?
350 print_opts CASC_RW clients casc_THREADS casc_REP MACHINEFILE
352 local testdir=$DIR/d0.cascading_rw
354 # mpi_run uses mpiuser
358 # -n: repeat test # times
360 local cmd="$CASC_RW -g -d $testdir -n $casc_REP"
363 mpi_run -np $((num_clients * $casc_THREADS)) -machinefile ${MACHINEFILE} $cmd
366 if [ $rc != 0 ] ; then
367 error "cascading_rw failed! $rc"
371 run_test cascading_rw "cascading_rw"
373 test_write_append_truncate() {
374 if [ "$NFSCLIENT" ]; then
375 skip "skipped for NFSCLIENT mode"
379 # location is lustre/tests dir
380 if ! which write_append_truncate > /dev/null 2>&1 ; then
381 skip_env "write_append_truncate not found"
385 local clients=$CLIENTS
386 [ -z $clients ] && clients=$(hostname)
388 local num_clients=$(get_node_count ${clients//,/ })
391 # Need space estimation here.
393 generate_machine_file $clients $MACHINEFILE || return $?
395 local testdir=$DIR/d0.write_append_truncate
396 local file=$testdir/f0.wat
398 print_opts clients write_REP write_THREADS MACHINEFILE
401 # mpi_run uses mpiuser
404 local cmd="write_append_truncate -n $write_REP $file"
407 mpi_run -np $((num_clients * $write_THREADS)) -machinefile ${MACHINEFILE} $cmd
410 if [ $rc != 0 ] ; then
411 error "write_append_truncate failed! $rc"
416 run_test write_append_truncate "write_append_truncate"
418 test_write_disjoint() {
419 if [ "$NFSCLIENT" ]; then
420 skip "skipped for NFSCLIENT mode"
424 [ x$WRITE_DISJOINT = x ] &&
425 { skip_env "write_disjoint not found" && return; }
427 local clients=$CLIENTS
428 [ -z $clients ] && clients=$(hostname)
430 local num_clients=$(get_node_count ${clients//,/ })
433 # Need space estimation here.
435 generate_machine_file $clients $MACHINEFILE || return $?
437 print_opts WRITE_DISJOINT clients wdisjoint_THREADS wdisjoint_REP MACHINEFILE
438 local testdir=$DIR/d0.write_disjoint
440 # mpi_run uses mpiuser
443 local cmd="$WRITE_DISJOINT -f $testdir/file -n $wdisjoint_REP"
446 mpi_run -np $((num_clients * $wdisjoint_THREADS)) -machinefile ${MACHINEFILE} $cmd
449 if [ $rc != 0 ] ; then
450 error "write_disjoint failed! $rc"
454 run_test write_disjoint "write_disjoint"
456 test_parallel_grouplock() {
457 if [ "$NFSCLIENT" ]; then
458 skip "skipped for NFSCLIENT mode"
462 [ x$PARALLEL_GROUPLOCK = x ] &&
463 { skip "PARALLEL_GROUPLOCK not found" && return; }
465 local clients=$CLIENTS
466 [ -z $clients ] && clients=$(hostname)
468 local num_clients=$(get_node_count ${clients//,/ })
470 generate_machine_file $clients $MACHINEFILE || return $?
472 print_opts clients parallel_grouplock_MINTASKS MACHINEFILE
474 local testdir=$DIR/d0.parallel_grouplock
476 # mpi_run uses mpiuser
479 do_nodes $clients "lctl set_param llite.*.max_rw_chunk=0" ||
480 error "set_param max_rw_chunk=0 failed "
485 for i in $(seq 12); do
487 local cmd="$PARALLEL_GROUPLOCK -g -v -d $testdir $subtest"
490 mpi_run -np $parallel_grouplock_MINTASKS -machinefile ${MACHINEFILE} $cmd
492 if [ $rc != 0 ] ; then
493 error_noexit "parallel_grouplock subtests $subtest failed! $rc"
495 echo "parallel_grouplock subtests $subtest PASS"
497 let status=$((status + rc))
498 # clear debug to collect one log per one test
499 do_nodes $(comma_list $(nodes_list)) lctl clear
501 [ $status -eq 0 ] || error "parallel_grouplock status: $status"
504 run_test parallel_grouplock "parallel_grouplock"
506 statahead_NUMMNTPTS=${statahead_NUMMNTPTS:-5}
507 statahead_NUMFILES=${statahead_NUMFILES:-500000}
509 cleanup_statahead () {
516 for i in $(seq 0 $num_mntpts);do
517 zconf_umount_clients $clients ${mntpt_root}$i ||
518 error_exit "Failed to umount lustre on ${mntpt_root}$i"
523 if [[ -n $NFSCLIENT ]]; then
524 skip "Statahead testing is not supported on NFS clients."
529 { skip_env "mdsrate not found" && return; }
531 local clients=$CLIENTS
532 [ -z $clients ] && clients=$(hostname)
534 local num_clients=$(get_node_count ${clients//,/ })
536 generate_machine_file $clients $MACHINEFILE || return $?
538 print_opts MDSRATE clients statahead_NUMMNTPTS statahead_NUMFILES
542 # do not use default "d[0-9]*" dir name
543 # to avoid of rm $statahead_NUMFILES (500k) files in t-f cleanup
545 local testdir=$DIR/$dir
547 # cleanup only if dir exists
548 # cleanup only $statahead_NUMFILES number of files
549 # ignore the other files created by someone else
551 mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE $statahead_NUMFILES $testdir 'f%%d' --ignore
554 # mpi_run uses mpiuser
557 local num_files=$statahead_NUMFILES
559 local IFree=$(inodes_available)
560 if [ $IFree -lt $num_files ]; then
566 local cmd="${MDSRATE} ${MDSRATE_DEBUG} --mknod --dir $testdir --nfiles $num_files --filefmt 'f%%d'"
569 mpi_run -np $((num_clients * 32)) -machinefile ${MACHINEFILE} $cmd
572 if [ $rc != 0 ] ; then
573 error "mdsrate failed to create $rc"
577 local num_mntpts=$statahead_NUMMNTPTS
578 local mntpt_root=$TMP/mntpt/lustre
579 mntopts=${MNTOPTSTATAHEAD:-$MOUNTOPT}
581 echo "Mounting $num_mntpts lustre clients starts on $clients"
582 trap "cleanup_statahead $clients $mntpt_root $num_mntpts" EXIT ERR
583 for i in $(seq 0 $num_mntpts); do
584 zconf_mount_clients $clients ${mntpt_root}$i $mntopts ||
585 error_exit "Failed to mount lustre on ${mntpt_root}$i on $clients"
588 do_rpc_nodes $clients cancel_lru_locks mdc
590 do_rpc_nodes $clients do_ls $mntpt_root $num_mntpts $dir
592 mdsrate_cleanup $((num_clients * 32)) $MACHINEFILE $num_files $testdir 'f%%d' --ignore
594 # use rm instead of rmdir because of
595 # testdir could contain the files created by someone else,
596 # or by previous run where is num_files prev > num_files current
598 cleanup_statahead $clients $mntpt_root $num_mntpts
601 run_test statahead "statahead test, multiple clients"
603 equals_msg `basename $0`: test complete, cleaning up
604 check_and_cleanup_lustre
605 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true