X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Fparallel-scale.sh;h=840a8b8cdc20b217c0f6aad7f4d06689cd536b2b;hb=7955e2c62e7c97c2e56e1bfc8d7598f2e80a4e52;hp=ab526c87557ec2fee40b4ce31d2d17d15a9209f0;hpb=474bbd262b095cee441e24ee0818745678236d9c;p=fs%2Flustre-release.git diff --git a/lustre/tests/parallel-scale.sh b/lustre/tests/parallel-scale.sh index ab526c8..840a8b8 100644 --- a/lustre/tests/parallel-scale.sh +++ b/lustre/tests/parallel-scale.sh @@ -8,24 +8,24 @@ init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} init_logging -# bug 20670 -ALWAYS_EXCEPT="parallel_grouplock $PARALLEL_SCALE_EXCEPT" +# bug number for skipped test: LU-9429 + ALWAYS_EXCEPT=" parallel_grouplock $PARALLEL_SCALE_EXCEPT " if [ $(facet_fstype $SINGLEMDS) = zfs -o $(facet_fstype "ost1") = zfs ]; then ZFSSLOW=$SLOW SLOW=no - cbench_IDIRS=1 - cbench_RUNS=1 + cbench_IDIRS=${cbench_IDIRS:-1} + cbench_RUNS=${cbench_RUNS:-1} - mdtest_nFiles=10000 - statahead_NUMFILES=100000 + mdtest_nFiles=${mdtest_nFiles:-"10000"} + statahead_NUMFILES=${statahead_NUMFILES:-100000} fi # common setup MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines} clients=${CLIENTS:-$HOSTNAME} -generate_machine_file $clients $MACHINEFILE || \ +generate_machine_file $clients $MACHINEFILE || error "Failed to generate machine file" num_clients=$(get_node_count ${clients//,/ }) @@ -36,25 +36,34 @@ if [ "$SLOW" = "no" ]; then fi # metabench -[ "$SLOW" = "no" ] && mbench_NFILES=10000 +[ "$SLOW" = "no" ] && mbench_NFILES=${mbench_NFILES:-10000} # simul -[ "$SLOW" = "no" ] && simul_REP=2 +[ "$SLOW" = "no" ] && simul_REP=${simul_REP:-2} # connectathon -[ "$SLOW" = "no" ] && cnt_NRUN=2 +[ "$SLOW" = "no" ] && cnt_NRUN=${cnt_NRUN:-2} # cascading rw -[ "$SLOW" = "no" ] && casc_REP=10 +[ "$SLOW" = "no" ] && casc_REP=${casc_REP:-10} # IOR -[ "$SLOW" = "no" ] && ior_DURATION=5 +[ "$SLOW" = "no" ] && ior_DURATION=${ior_DURATION:-5} # write_append_truncate -[ "$SLOW" = "no" ] && write_REP=100 +[ "$SLOW" = "no" ] && write_REP=${write_REP:-100} # write_disjoint -[ "$SLOW" = "no" ] && wdisjoint_REP=100 +[ "$SLOW" = "no" ] && wdisjoint_REP=${wdisjoint_REP:-100} + +# fs_test +if [ "$SLOW" = "no" ]; then + fs_test_ndirs=${fs_test_ndirs:-10000} + fs_test_nobj=${fs_test_nobj:-2} +fi + +# xdd +[ "$SLOW" = "no" ] && xdd_passes=${xdd_passes:-15} . $LUSTRE/tests/functions.sh @@ -105,6 +114,16 @@ test_iorfpp() { } run_test iorfpp "iorfpp" +test_ior_mdtest_parallel_ssf() { + ior_mdtest_parallel "ssf" +} +run_test ior_mdtest_parallel_ssf "iormdtestssf" + +test_ior_mdtest_parallel_fpp() { + ior_mdtest_parallel "fpp" +} +run_test ior_mdtest_parallel_fpp "iormdtestfpp" + test_mib() { run_mib } @@ -120,11 +139,18 @@ test_write_append_truncate() { } run_test write_append_truncate "write_append_truncate" +# Argument is chunk size limit, the upper bound on write size test_write_disjoint() { - run_write_disjoint + run_write_disjoint 123456 } run_test write_disjoint "write_disjoint" +# Make sure to exercise the tiny write code +test_write_disjoint() { + run_write_disjoint 16384 +} +run_test write_disjoint "write_disjoint_tiny" + test_parallel_grouplock() { run_parallel_grouplock } @@ -135,7 +161,27 @@ test_statahead () { } run_test statahead "statahead test, multiple clients" -[ $(facet_fstype $SINGLEMDS) = zfs -o $(facet_fstype "ost1") = zfs] && +test_rr_alloc () { + run_rr_alloc +} +run_test rr_alloc "Checking even file distribution over OSTs in RR policy" + +test_fs_test () { + run_fs_test +} +run_test fs_test "fs_test" + +test_fio () { + run_fio +} +run_test fio "fio" + +test_xdd () { + run_xdd +} +run_test xdd "xdd" + +[ $(facet_fstype $SINGLEMDS) = zfs -o $(facet_fstype "ost1") = zfs ] && SLOW=$ZFSSLOW complete $SECONDS