5 LUSTRE=${LUSTRE:-`dirname $0`/..}
6 . $LUSTRE/tests/test-framework.sh
10 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
13 # 21 - open vs. unlink out of order replay: isn't solved yet
16 SETUP=${SETUP:-"setup"}
17 CLEANUP=${CLEANUP:-"cleanup"}
21 if [ "$MDSCOUNT" -gt 1 ]; then
23 for mds in `mds_list`; do
24 MDSDEV=$TMP/${mds}-`hostname`
25 add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
27 add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
28 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
31 add_mds mds1 --dev $MDSDEV --size $MDSSIZE
32 add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
33 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
37 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE --failover
38 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE --failover
39 add_client client ${MDS} --lov lov1 --path $MOUNT
44 SETUP=${SETUP:-"setup"}
45 CLEANUP=${CLEANUP:-"cleanup"}
48 # make sure we are using the primary MDS, so the config log will
49 # be able to clean up properly.
50 activemds=`facet_active mds1`
51 if [ $activemds != "mds1" ]; then
55 umount $MOUNT2 || true
60 # In mds recovery, the mds will clear orphans in ost by
61 # mds_lov_clear_orphan, which will sent the request to ost and waiting for
62 # the reply, if we stop mds at this time, we will got the obd_refcount > 1
63 # errors, because mds_lov_clear_orphan grab a export of mds,
64 # so the obd_refcount of mds will not be zero. So, wait a while before
65 # stop mds. This bug needs further work.
66 for mds in `mds_list`; do
68 stop $mds ${FORCE} $MDSLCONFARGS
71 stop ost ${FORCE} --dump cleanup-dual.log
76 if [ "$ONLY" == "cleanup" ]; then
77 sysctl -w portals.debug=0
85 start_krb5_kdc || exit 1
86 start_lsvcgssd || exit 2
88 start ost --reformat $OSTLCONFARGS
89 PINGER=`cat /proc/fs/lustre/pinger`
91 if [ "$PINGER" != "on" ]; then
92 echo "ERROR: Lustre must be built with --enable-pinger for replay-dual"
97 start ost2 --reformat $OSTLCONFARGS
98 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
99 for mds in `mds_list`; do
100 start $mds --reformat $MDSLCONFARGS
102 grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
103 grep " $MOUNT2 " /proc/mounts || zconf_mount `hostname` $MOUNT2
105 echo $TIMEOUT > /proc/sys/lustre/timeout
106 echo $UPCALL > /proc/sys/lustre/upcall
110 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
119 checkstat $MOUNT2/a || return 1
120 checkstat $MOUNT1/b || return 2
121 rm $MOUNT2/a $MOUNT1/b
122 checkstat $MOUNT1/a && return 3
123 checkstat $MOUNT2/b && return 4
127 run_test 1 "|X| simple create"
135 checkstat $MOUNT2/adir || return 1
137 checkstat $MOUNT2/adir && return 2
141 run_test 2 "|X| mkdir adir"
146 mkdir $MOUNT2/adir/bdir
149 checkstat $MOUNT2/adir || return 1
150 checkstat $MOUNT1/adir/bdir || return 2
151 rmdir $MOUNT2/adir/bdir $MOUNT1/adir
152 checkstat $MOUNT1/adir && return 3
153 checkstat $MOUNT2/adir/bdir && return 4
157 run_test 3 "|X| mkdir adir, mkdir adir/bdir "
162 mkdir $MOUNT1/adir && return 1
163 mkdir $MOUNT2/adir/bdir
166 checkstat $MOUNT2/adir || return 2
167 checkstat $MOUNT1/adir/bdir || return 3
169 rmdir $MOUNT2/adir/bdir $MOUNT1/adir
170 checkstat $MOUNT1/adir && return 4
171 checkstat $MOUNT2/adir/bdir && return 5
175 run_test 4 "|X| mkdir adir (-EEXIST), mkdir adir/bdir "
179 # multiclient version of replay_single.sh/test_8
181 multiop $MOUNT2/a o_tSc &
183 # give multiop a chance to open
188 wait $pid || return 1
191 [ -e $MOUNT2/a ] && return 2
194 run_test 5 "open, unlink |X| close"
199 multiop $MOUNT2/a o_c &
201 multiop $MOUNT1/a o_c &
203 # give multiop a chance to open
208 wait $pid1 || return 1
212 wait $pid2 || return 1
213 [ -e $MOUNT2/a ] && return 2
216 run_test 6 "open1, open2, unlink |X| close1 [fail mds] close2"
220 multiop $MOUNT2/a o_c &
222 multiop $MOUNT1/a o_c &
224 # give multiop a chance to open
229 wait $pid2 || return 1
233 wait $pid1 || return 1
234 [ -e $MOUNT2/a ] && return 2
237 run_test 6b "open1, open2, unlink |X| close2 [fail mds] close1"
241 drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1
243 checkstat $MOUNT2/$tfile || return 2
244 rm $MOUNT1/$tfile || return 3
248 run_test 8 "replay of resent request"
252 mcreate $MOUNT1/$tfile-1
253 mcreate $MOUNT2/$tfile-2
254 # drop first reint reply
255 sysctl -w lustre.fail_loc=0x80000119
257 sysctl -w lustre.fail_loc=0
259 rm $MOUNT1/$tfile-[1,2] || return 1
263 run_test 9 "resending a replayed create"
266 mcreate $MOUNT1/$tfile-1
268 munlink $MOUNT1/$tfile-1
269 mcreate $MOUNT2/$tfile-2
270 # drop first reint reply
271 sysctl -w lustre.fail_loc=0x80000119
273 sysctl -w lustre.fail_loc=0
275 checkstat $MOUNT1/$tfile-1 && return 1
276 checkstat $MOUNT1/$tfile-2 || return 2
281 run_test 10 "resending a replayed unlink"
285 mcreate $MOUNT1/$tfile-1
286 mcreate $MOUNT2/$tfile-2
287 mcreate $MOUNT1/$tfile-3
288 mcreate $MOUNT2/$tfile-4
289 mcreate $MOUNT1/$tfile-5
290 # drop all reint replies for a while
291 sysctl -w lustre.fail_loc=0x0119
293 #sleep for while, let both clients reconnect and timeout
294 sleep $((TIMEOUT * 2))
295 sysctl -w lustre.fail_loc=0
297 rm $MOUNT1/$tfile-[1-5] || return 1
301 run_test 11 "both clients timeout during replay"
306 multiop $DIR/$tfile mo_c &
311 sysctl -w lustre.fail_loc=0x80000302
313 df $MOUNT || return 1
314 sysctl -w lustre.fail_loc=0
317 $CHECKSTAT -t file $DIR/$tfile || return 2
318 kill -USR1 $MULTIPID || return 3
319 wait $MULTIPID || return 4
324 run_test 12 "open resend timeout"
327 multiop $DIR/$tfile mo_c &
333 kill -USR1 $MULTIPID || return 3
334 wait $MULTIPID || return 4
337 sysctl -w lustre.fail_loc=0x80000115
339 df $MOUNT || return 1
340 sysctl -w lustre.fail_loc=0
343 $CHECKSTAT -t file $DIR/$tfile || return 2
348 run_test 13 "close resend timeout"
353 createmany -o $MOUNT1/$tfile- 25
354 createmany -o $MOUNT2/$tfile-2- 1
355 createmany -o $MOUNT1/$tfile-3- 25
359 # expect failover to fail
360 df $MOUNT && return 1
363 # first 25 files shouuld have been
366 unlinkmany $MOUNT1/$tfile- 25 || return 2
368 zconf_mount `hostname` $MOUNT2
371 run_test 14 "timeouts waiting for lost client during replay"
375 createmany -o $MOUNT1/$tfile- 25
376 createmany -o $MOUNT2/$tfile-2- 1
380 df $MOUNT || return 1
383 unlinkmany $MOUNT1/$tfile- 25 || return 2
385 zconf_mount `hostname` $MOUNT2
388 run_test 15 "timeout waiting for lost client during replay, 1 client completes"
391 createmany -o $MOUNT1/$tfile- 25
392 createmany -o $MOUNT2/$tfile-2- 1
398 df $MOUNT || return 1
401 unlinkmany $MOUNT1/$tfile- 25 || return 2
403 zconf_mount `hostname` $MOUNT2
407 #run_test 16 "fail MDS during recovery (3571)"
410 createmany -o $MOUNT1/$tfile- 25
411 createmany -o $MOUNT2/$tfile-2- 1
413 # Make sure the disconnect is lost
417 echo -1 > /proc/sys/portals/debug
421 df $MOUNT || return 1
424 unlinkmany $MOUNT1/$tfile- 25 || return 2
426 zconf_mount `hostname` $MOUNT2
430 #Still not support ost fail over
431 #run_test 17 "fail OST during recovery (3571)"
435 multiop $MOUNT2/$tfile O_c &
437 multiop $MOUNT1/$tfile O_c &
439 # give multiop a chance to open
447 zconf_mount `hostname` $MOUNT2
449 run_test 18 "replay open, Abort recovery, don't assert (3892)"
451 # cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for
454 test_20() { # bug 3822 - evicting client with enqueued lock
455 mkdir -p $MOUNT1/$tdir
456 touch $MOUNT1/$tdir/f0
457 #define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
458 statmany -s $MOUNT1/$tdir/f 500 &
461 do_facet mds1 sysctl -w lustre.fail_loc=0x8000030b # hold enqueue
463 #define OBD_FAIL_LDLM_BL_CALLBACK 0x305
464 do_facet client sysctl -w lustre.fail_loc=0x80000305 # drop cb, evict
466 usleep 500 # wait to ensure first client is one that will be evicted
467 openfile -f O_RDONLY $MOUNT2/$tdir/f0
469 dmesg | grep "entering recovery in server" && \
470 error "client not evicted" || true
472 run_test 20 "ldlm_handle_enqueue succeeds on evicted export (3822)"
474 # $1 - fs num (1, 2, ...)
476 function find_dev_for_fs_and_mds()
478 local fs=`ls /proc/fs/lustre/llite|head -n $1|tail -n1`
479 local fsuuid=`cat /proc/fs/lustre/llite/$fs/uuid`
480 $LCTL device_list | awk "/mdc.*$2.*$fsuuid/ {print \$4}"
484 mdc1dev=`find_dev_for_fs_and_mds 1 mds1`
485 mdc2dev=`find_dev_for_fs_and_mds 2 mds1`
486 multiop $MOUNT1/f21 O
488 # generate IT_OPEN to be replayed against existing file
489 multiop $MOUNT1/f21 o_Sc &
492 # IT_OPEN will be committed by the failover time
495 # generate MDS_REINT_UNLINK to be replayed
496 rm -f $MOUNT2/f21 || return 1
498 # disable recovery on the both clients
499 $LCTL --device %$mdc1dev disable_recovery
500 $LCTL --device %$mdc2dev disable_recovery
503 # let unlink to be replayed first
504 $LCTL --device %$mdc2dev enable_recovery
507 # now let open to be replaye
508 $LCTL --device %$mdc1dev enable_recovery
510 wait $pid || return 2
512 run_test 21 "open vs. unlink out of order replay"
514 test_22() { # bug 6063 - AST during recovery
516 cat /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count
517 mdc1dev=`find_dev_for_fs_and_mds 1 mds1`
518 mdc2dev=`find_dev_for_fs_and_mds 2 mds1`
519 $LCTL --device %$mdc1dev disable_recovery
520 $LCTL --device %$mdc2dev disable_recovery
523 mknod $MOUNT1/${tdir}-1 c 0 0 # client1: request to be replayed
524 ls $MOUNT2 # client2: take lock needed for
527 # let's recover 2nd connection with granted UPDATE lock
528 $LCTL --device %$mdc2dev enable_recovery
529 sleep $((TIMEOUT / 2))
531 LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count`
532 if [ "$LOCKS" != "" ]; then
533 echo "The lock got replayed before mkdir is replayed: $LOCKS"
534 $LCTL --device %$mdc1dev enable_recovery
538 # let's recover 1st connection with mkdir replay that needs the lock
539 $LCTL --device %$mdc1dev enable_recovery
542 df $MOUNT || return 2
545 run_test 22 "AST during recovery"
547 if [ "$ONLY" != "setup" ]; then
548 equals_msg test complete, cleaning up
550 SLEEP=$((`date +%s` - $NOW))
551 [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP