3 LUSTRE=${LUSTRE:-$(dirname $0)/..}
4 . $LUSTRE/tests/test-framework.sh
8 init_stripe_dir_params RECOVERY_SCALE_ENABLE_REMOTE_DIRS \
9 RECOVERY_SCALE_ENABLE_STRIPED_DIRS
11 ALWAYS_EXCEPT="$PARALLEL_SCALE_EXCEPT "
12 # bug number for skipped test: LU-9429
13 ALWAYS_EXCEPT+=" parallel_grouplock "
15 if [ "$mds1_FSTYPE" = zfs -o "$ost1_FSTYPE" = zfs ]; then
19 cbench_IDIRS=${cbench_IDIRS:-1}
20 cbench_RUNS=${cbench_RUNS:-1}
22 mdtest_nFiles=${mdtest_nFiles:-"10000"}
23 statahead_NUMFILES=${statahead_NUMFILES:-100000}
29 clients=${CLIENTS:-$HOSTNAME}
30 generate_machine_file $clients $MACHINEFILE ||
31 error "Failed to generate machine file"
32 num_clients=$(get_node_count ${clients//,/ })
35 if [ "$SLOW" = "no" ]; then
36 cbench_IDIRS=${cbench_IDIRS:-2}
37 cbench_RUNS=${cbench_RUNS:-2}
41 [ "$SLOW" = "no" ] && mbench_NFILES=${mbench_NFILES:-10000}
44 [ "$SLOW" = "no" ] && simul_REP=${simul_REP:-2}
47 [ "$SLOW" = "no" ] && cnt_NRUN=${cnt_NRUN:-2}
50 [ "$SLOW" = "no" ] && casc_REP=${casc_REP:-10}
53 [ "$SLOW" = "no" ] && ior_DURATION=${ior_DURATION:-5}
55 # write_append_truncate
56 [ "$SLOW" = "no" ] && write_REP=${write_REP:-100}
59 [ "$SLOW" = "no" ] && wdisjoint_REP=${wdisjoint_REP:-100}
62 if [ "$SLOW" = "no" ]; then
63 fs_test_ndirs=${fs_test_ndirs:-10000}
64 fs_test_nobj=${fs_test_nobj:-2}
68 [ "$SLOW" = "no" ] && xdd_passes=${xdd_passes:-15}
70 . $LUSTRE/tests/functions.sh
72 check_and_setup_lustre
74 ost_set_temp_seq_width_all $DATA_SEQ_MAX_WIDTH
76 MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
77 $GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
82 run_test compilebench "compilebench"
87 run_test metabench "metabench"
90 get_mpiuser_id $MPI_USER
93 run_test simul "simul"
96 get_mpiuser_id $MPI_USER
99 run_test mdtestssf "mdtestssf"
102 get_mpiuser_id $MPI_USER
105 run_test mdtestfpp "mdtestfpp"
107 test_connectathon() {
110 run_test connectathon "connectathon"
113 get_mpiuser_id $MPI_USER
116 run_test iorssf "iorssf"
119 get_mpiuser_id $MPI_USER
122 run_test iorfpp "iorfpp"
124 test_ior_mdtest_parallel_ssf() {
125 get_mpiuser_id $MPI_USER
126 ior_mdtest_parallel "ssf"
128 run_test ior_mdtest_parallel_ssf "iormdtestssf"
130 test_ior_mdtest_parallel_fpp() {
131 get_mpiuser_id $MPI_USER
132 ior_mdtest_parallel "fpp"
134 run_test ior_mdtest_parallel_fpp "iormdtestfpp"
137 get_mpiuser_id $MPI_USER
142 test_cascading_rw() {
143 get_mpiuser_id $MPI_USER
146 run_test cascading_rw "cascading_rw"
148 test_write_append_truncate() {
149 get_mpiuser_id $MPI_USER
150 run_write_append_truncate
152 run_test write_append_truncate "write_append_truncate"
154 # Argument is chunk size limit, the upper bound on write size
155 test_write_disjoint() {
156 get_mpiuser_id $MPI_USER
157 run_write_disjoint 123456
159 run_test write_disjoint "write_disjoint"
161 # Make sure to exercise the tiny write code
162 test_write_disjoint_tiny() {
163 get_mpiuser_id $MPI_USER
164 run_write_disjoint 16384
166 run_test write_disjoint_tiny "write_disjoint_tiny"
168 test_parallel_grouplock() {
169 get_mpiuser_id $MPI_USER
170 run_parallel_grouplock
172 run_test parallel_grouplock "parallel_grouplock"
177 run_test statahead "statahead test, multiple clients"
182 run_test rr_alloc "Checking even file distribution over OSTs in RR policy"
185 get_mpiuser_id $MPI_USER
188 run_test fs_test "fs_test"
196 get_mpiuser_id $MPI_USER
201 # If necessary, return SLOW to its original value
202 [[ "$mds1_FSTYPE" == zfs || "$ost1_FSTYPE" == zfs ]] && SLOW=$ZFSSLOW
204 complete_test $SECONDS
205 check_and_cleanup_lustre