Whamcloud - gitweb
LU-19098 hsm: don't print progname twice with lhsmtool
[fs/lustre-release.git] / lustre / tests / parallel-scale.sh
1 #!/bin/bash
2
3 LUSTRE=${LUSTRE:-$(dirname $0)/..}
4 . $LUSTRE/tests/test-framework.sh
5 init_test_env "$@"
6 init_logging
7
8 init_stripe_dir_params RECOVERY_SCALE_ENABLE_REMOTE_DIRS \
9         RECOVERY_SCALE_ENABLE_STRIPED_DIRS
10
11 ALWAYS_EXCEPT="$PARALLEL_SCALE_EXCEPT "
12 # bug number for skipped test: LU-9429
13 ALWAYS_EXCEPT+="               parallel_grouplock "
14
15 if [ "$mds1_FSTYPE" = zfs -o "$ost1_FSTYPE" = zfs ]; then
16         ZFSSLOW=$SLOW
17         SLOW=no
18
19         cbench_IDIRS=${cbench_IDIRS:-1}
20         cbench_RUNS=${cbench_RUNS:-1}
21
22         mdtest_nFiles=${mdtest_nFiles:-"10000"}
23         statahead_NUMFILES=${statahead_NUMFILES:-100000}
24 fi
25
26 build_test_filter
27
28 # common setup
29 clients=${CLIENTS:-$HOSTNAME}
30 generate_machine_file $clients $MACHINEFILE ||
31         error "Failed to generate machine file"
32 num_clients=$(get_node_count ${clients//,/ })
33
34 # compilbench
35 if [ "$SLOW" = "no" ]; then
36         cbench_IDIRS=${cbench_IDIRS:-2}
37         cbench_RUNS=${cbench_RUNS:-2}
38 fi
39
40 # metabench
41 [ "$SLOW" = "no" ] && mbench_NFILES=${mbench_NFILES:-10000}
42
43 # simul
44 [ "$SLOW" = "no" ] && simul_REP=${simul_REP:-2}
45
46 # connectathon
47 [ "$SLOW" = "no" ] && cnt_NRUN=${cnt_NRUN:-2}
48
49 # cascading rw
50 [ "$SLOW" = "no" ] && casc_REP=${casc_REP:-10}
51
52 # IOR
53 [ "$SLOW" = "no" ] && ior_DURATION=${ior_DURATION:-5}
54
55 # write_append_truncate
56 [ "$SLOW" = "no" ] && write_REP=${write_REP:-100}
57
58 # write_disjoint
59 [ "$SLOW" = "no" ] && wdisjoint_REP=${wdisjoint_REP:-100}
60
61 # fs_test
62 if [ "$SLOW" = "no" ]; then
63         fs_test_ndirs=${fs_test_ndirs:-10000}
64         fs_test_nobj=${fs_test_nobj:-2}
65 fi
66
67 # xdd
68 [ "$SLOW" = "no" ] && xdd_passes=${xdd_passes:-15}
69
70 . $LUSTRE/tests/functions.sh
71
72 check_and_setup_lustre
73
74 ost_set_temp_seq_width_all $DATA_SEQ_MAX_WIDTH
75
76 MPI_RUNAS=${MPI_RUNAS:-"runas -u $MPI_USER_UID -g $MPI_USER_GID"}
77 $GSS_KRB5 && refresh_krb5_tgt $MPI_USER_UID $MPI_USER_GID $MPI_RUNAS
78
79 test_compilebench() {
80         run_compilebench
81 }
82 run_test compilebench "compilebench"
83
84 test_metabench() {
85         run_metabench
86 }
87 run_test metabench "metabench"
88
89 test_simul() {
90         get_mpiuser_id $MPI_USER
91         run_simul
92 }
93 run_test simul "simul"
94
95 test_mdtestssf() {
96         get_mpiuser_id $MPI_USER
97         run_mdtest "ssf"
98 }
99 run_test mdtestssf "mdtestssf"
100
101 test_mdtestfpp() {
102         get_mpiuser_id $MPI_USER
103         run_mdtest "fpp"
104 }
105 run_test mdtestfpp "mdtestfpp"
106
107 test_connectathon() {
108         run_connectathon
109 }
110 run_test connectathon "connectathon"
111
112 test_iorssf() {
113         get_mpiuser_id $MPI_USER
114         run_ior "ssf"
115 }
116 run_test iorssf "iorssf"
117
118 test_iorfpp() {
119         get_mpiuser_id $MPI_USER
120         run_ior "fpp"
121 }
122 run_test iorfpp "iorfpp"
123
124 test_ior_mdtest_parallel_ssf() {
125         get_mpiuser_id $MPI_USER
126         ior_mdtest_parallel "ssf"
127 }
128 run_test ior_mdtest_parallel_ssf "iormdtestssf"
129
130 test_ior_mdtest_parallel_fpp() {
131         get_mpiuser_id $MPI_USER
132         ior_mdtest_parallel "fpp"
133 }
134 run_test ior_mdtest_parallel_fpp "iormdtestfpp"
135
136 test_mib() {
137         get_mpiuser_id $MPI_USER
138         run_mib
139 }
140 run_test mib "mib"
141
142 test_cascading_rw() {
143         get_mpiuser_id $MPI_USER
144         run_cascading_rw
145 }
146 run_test cascading_rw "cascading_rw"
147
148 test_write_append_truncate() {
149         get_mpiuser_id $MPI_USER
150         run_write_append_truncate
151 }
152 run_test write_append_truncate "write_append_truncate"
153
154 # Argument is chunk size limit, the upper bound on write size
155 test_write_disjoint() {
156         get_mpiuser_id $MPI_USER
157         run_write_disjoint 123456
158 }
159 run_test write_disjoint "write_disjoint"
160
161 # Make sure to exercise the tiny write code
162 test_write_disjoint_tiny() {
163         get_mpiuser_id $MPI_USER
164         run_write_disjoint 16384
165 }
166 run_test write_disjoint_tiny "write_disjoint_tiny"
167
168 test_parallel_grouplock() {
169         get_mpiuser_id $MPI_USER
170         run_parallel_grouplock
171 }
172 run_test parallel_grouplock "parallel_grouplock"
173
174 test_statahead () {
175         run_statahead
176 }
177 run_test statahead "statahead test, multiple clients"
178
179 test_rr_alloc () {
180         run_rr_alloc
181 }
182 run_test rr_alloc "Checking even file distribution over OSTs in RR policy"
183
184 test_fs_test () {
185         get_mpiuser_id $MPI_USER
186         run_fs_test
187 }
188 run_test fs_test "fs_test"
189
190 test_fio () {
191         run_fio
192 }
193 run_test fio "fio"
194
195 test_xdd () {
196         get_mpiuser_id $MPI_USER
197         run_xdd
198 }
199 run_test xdd "xdd"
200
201 # If necessary, return SLOW to its original value
202 [[ "$mds1_FSTYPE" == zfs || "$ost1_FSTYPE" == zfs ]] && SLOW=$ZFSSLOW
203
204 complete_test $SECONDS
205 check_and_cleanup_lustre
206 exit_status