3 # Requires the pre-configured samba machine
16 LUSTRE=${LUSTRE:-$(dirname $0)/..}
17 . $LUSTRE/tests/test-framework.sh
21 . $LUSTRE/tests/setup-cifs.sh
23 # lustre client used as samba server (default is mds node)
24 LUSTRE_CLIENT_SMBSRV=${LUSTRE_CLIENT_SMBSRV:-$(facet_active_host $SINGLEMDS)}
25 SMBSHARE=${SMBSHARE:-lustretest}
26 SMBUSER=${SMBUSER:-root}
27 SMBPASSWD=${SMBPASSWD:-lustre}
28 SMBSRVMNTPT=${SMBSRVMNTPT:-$MOUNT}
29 SMBCLIMNTPT=${SMBCLIMNTPT:-$MOUNT}
30 SMBCLIENTS=${SMBCLIENTS:-$CLIENTS}
31 SMBCLIENTS=$(exclude_items_from_list $SMBCLIENTS $LUSTRE_CLIENT_SMBSRV)
33 [ -z "$SMBCLIENTS" ] &&
34 skip_env "need at least two nodes: samba server and samba client"
36 do_nodes $SMBCLIENTS modinfo cifs | grep dummy > /dev/null &&
37 skip_env "OFED installation caused CIFS to break in RHEL8.4 mlnx 5.4"
39 check_and_setup_lustre
40 # first unmount all the lustre clients
43 # set CONFIGURE_SMB=false to skip smb config
44 CONFIGURE_SMB=${CONFIGURE_SMB:-true}
46 # store smb status to restart smb service if it was running initially
48 smb_status $LUSTRE_CLIENT_SMBSRV || SMBSTATUS=$?
49 SMBCONFTMP=$(do_node $LUSTRE_CLIENT_SMBSRV "mktemp -t smb.conf.XXX")
54 check_and_cleanup_lustre
59 cleanup_cifs $LUSTRE_CLIENT_SMBSRV $SMBCLIMNTPT $SMBCLIENTS ||
60 error_noexit false "failed to cleanup cifs"
61 zconf_umount $LUSTRE_CLIENT_SMBSRV $SMBSRVMNTPT force ||
62 error_noexit false "failed to umount lustre on $LUSTRE_CLIENT_SMBSRV"
63 # restore lustre mount
64 restore_mount $MOUNT ||
65 error_noexit false "failed to mount lustre"
67 $CONFIGURE_SMB && restore_config_smb $LUSTRE_CLIENT_SMBSRV $SMBCONFTMP
68 [[ $SMBSTATUS -eq 0 ]] &&
69 do_node $LUSTRE_CLIENT_SMBSRV "service smb start"
73 $CONFIGURE_SMB && configure_smb $LUSTRE_CLIENT_SMBSRV $SMBSHARE $SMBUSER \
74 $SMBPASSWD $SMBSRVMNTPT $SMBCONFTMP ||
75 echo -e "\nSkipping smb config ..."
77 trap cleanup_exit EXIT SIGHUP SIGINT
79 # mount lustre client on smb server
80 zconf_mount $LUSTRE_CLIENT_SMBSRV $SMBSRVMNTPT ||
81 error "mount lustre on $LUSTRE_CLIENT_SMBSRV failed"
84 setup_cifs $LUSTRE_CLIENT_SMBSRV $SMBSHARE $SMBCLIMNTPT $SMBUSER \
85 $SMBPASSWD $SMBCLIENTS ||
86 error false "setup cifs failed"
92 # Run short iteration in cifs mode
93 cbench_IDIRS=${cbench_IDIRS:-2}
94 cbench_RUNS=${cbench_RUNS:-2}
96 # source the common file after all parameters are set to take effect
97 . $LUSTRE/tests/functions.sh
101 check_prog_output() {
106 do_nodes $clients grep -q \\\"$str\\\" $file 2>/dev/null
114 local start_ts=$(date +%s)
117 while ! check_prog_output $clients $file "$str"; do
118 elapsed=$(($(date +%s) - start_ts))
119 if [ $elapsed -gt $time ]; then
126 test_compilebench() {
127 run_compilebench $SMBCLIMNTPT
129 run_test compilebench "compilebench on cifs clients"
132 local clients=$SMBCLIENTS
133 local duration=${DBENCH_DURATION:-300}
134 local nproc=${DBENCH_NPROC:-1}
135 local delay=${dbench_STARTDELAY:-120}
136 local log=$TMP/dbench.log
139 local cmd="rundbench $nproc -t $duration"
143 do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
144 PATH=\$PATH DBENCH_LIB=$DBENCH_LIB \
145 TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
146 DIR=$SMBCLIMNTPT/$tdir/\\\$(hostname) \
147 LCTL=$LCTL $cmd 2>&1 | tee $log; \
148 exit \\\${PIPESTATUS[0]}" &
151 # check that dbench is started on all clients after
152 # $dbench_STARTDELAY: the dbench log on each client
153 # is to be started for this moment and contain "dbench PID";
154 if ! wait_prog_output $clients $log "dbench PID" $delay; then
156 killall_process $clients dbench
157 error "dbench failed to start on $clients!"
160 log "Started rundbench load pid=$pid ..."
161 wait $pid || error "rundbench load on $clients failed!"
163 run_test dbench "dbench on cifs clients"
166 local clients=$SMBCLIENTS
167 local seed=${fsx_SEED:-$RANDOM}
168 local size=${fsx_SIZE:-1024}
169 local numop=${fsx_NUMOP:-100000}
170 local delay=${fsx_STARTDELAY:-120}
171 local log=$TMP/fsx.log
174 local nclients=$(get_node_count ${clients//,/ })
175 local space=$(df -P $SMBCLIMNTPT | tail -n 1 | awk '{ print $4 }')
176 [ $space -lt $((size * nclients)) ] && size=$((space * 3 / 4 / nclients))
180 local cmd="$FSX -c 50 -p 500 -S $seed -P $TMP -l $size -N $numop "
184 do_nodesv $clients "set -x; \
186 $cmd $SMBCLIMNTPT/f0.fsx_\\\$(hostname) 2>&1 | tee $log; \
187 exit \\\${PIPESTATUS[0]}" &
190 # check that fsx is started on all clients after
191 # $fsx_STARTDELAY: the fsx log on each client
192 # is to be started for this moment and contain "Seed set";
193 if ! wait_prog_output $clients $log "Seed set" $delay; then
195 killall_process $clients fsx
196 error "fsx failed to start on $clients!"
199 log "Started fsx load pid=$pid ..."
200 wait $pid || error "fsx load on $clients failed!"
202 run_test fsx "fsx on cifs clients"
205 local clients=$SMBCLIENTS
206 local size=${iozone_SIZE:-262144} # 256m
207 local delay=${iozone_STARTDELAY:-120}
208 local log=$TMP/iozone.log
211 local nclients=$(get_node_count ${clients//,/ })
213 local space=$(df -P $SMBCLIMNTPT | tail -n 1 | awk '{ print $4 }')
215 [[ $((size * nclients)) -gt $((space * 3 / 4)) ]] &&
216 size=$((space * 3 / 4 / nclients))
218 do_node $LUSTRE_CLIENT_SMBSRV "mkdir $SMBSRVMNTPT/$tdir
219 lfs setstripe -c -1 $SMBSRVMNTPT/$tdir"
221 log "free space: $space Kb, using $size size, $nclients number of clients"
223 local cmd="iozone -a -e -+d -s $size "
227 do_nodesv $clients "set -x; \
229 $cmd -f $SMBCLIMNTPT/$tdir/f0.iozone_\\\$(hostname) \
230 2>&1 | tee $log; exit \\\${PIPESTATUS[0]}" &
233 # check that iozone is started on all clients after
234 # $iozone_STARTDELAY: the iozone log on each client
235 # is to be started for this moment and contain "Command line used";
236 if ! wait_prog_output $clients $log "Command line used" $delay; then
238 killall_process $clients iozone
239 error "iozone failed to start on $clients!"
242 log "Started iozone load pid=$pid ..."
245 log "Processing iozone log"
246 do_nodesv $clients "tail -1 $log | grep -q complete" || rc=2
247 do_node $LUSTRE_CLIENT_SMBSRV "rm -rf $SMBSRVMNTPT/$tdir"
248 [ $rc -eq 0 ] || error "iozone load on $clients failed! rc=$rc"
250 run_test iozone "iozone on cifs clients"