Whamcloud - gitweb
- repair lmv.sh - it was broken for months
[fs/lustre-release.git] / lustre / tests / replay-dual.sh
index 9a275e9..76eb252 100755 (executable)
@@ -455,7 +455,7 @@ test_20() {     # bug 3822 - evicting client with enqueued lock
        mkdir -p $MOUNT1/$tdir
        touch $MOUNT1/$tdir/f0
 #define OBD_FAIL_LDLM_ENQUEUE_BLOCKED    0x30b
-       statmany -s $MOUNT1/$tdir/f 500 &
+       statmany -s $MOUNT1/$tdir/f 500 &
        OPENPID=$!
        NOW=`date +%s`
        do_facet mds1 sysctl -w lustre.fail_loc=0x8000030b  # hold enqueue
@@ -468,20 +468,22 @@ test_20() {     # bug 3822 - evicting client with enqueued lock
        wait $OPENPID
        dmesg | grep "entering recovery in server" && \
                error "client not evicted" || true
+       do_facet client sysctl -w lustre.fail_loc=0
 }
 run_test 20 "ldlm_handle_enqueue succeeds on evicted export (3822)"
 
-# $1 - number of mountpoint
+# $1 - fs num (1, 2, ...)
 # $2 - mds
 function find_dev_for_fs_and_mds()
 {
-       local fsuuid=`cat /proc/fs/lustre/llite/fs$1/uuid`
+       local fs=`ls /proc/fs/lustre/llite|head -n $1|tail -n1`
+       local fsuuid=`cat /proc/fs/lustre/llite/$fs/uuid`
        $LCTL device_list | awk "/mdc.*$2.*$fsuuid/ {print \$4}"
 }
 
 test_21() {
-       mdc1dev=`find_dev_for_fs_and_mds 0 mds1`
-       mdc2dev=`find_dev_for_fs_and_mds 1 mds1`
+       mdc1dev=`find_dev_for_fs_and_mds 1 mds1`
+       mdc2dev=`find_dev_for_fs_and_mds 2 mds1`
        multiop $MOUNT1/f21 O
        cancel_lru_locks MDC
        # generate IT_OPEN to be replayed against existing file
@@ -511,13 +513,15 @@ test_21() {
 run_test 21 "open vs. unlink out of order replay"
 
 test_22() {    # bug 6063 - AST during recovery
-       mdc1dev=`find_dev_for_fs_and_mds 0 mds1`
-       mdc2dev=`find_dev_for_fs_and_mds 1 mds1`
+       cancel_lru_locks MDC
+       cat /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count
+       mdc1dev=`find_dev_for_fs_and_mds 1 mds1`
+       mdc2dev=`find_dev_for_fs_and_mds 2 mds1`
        $LCTL --device %$mdc1dev disable_recovery
        $LCTL --device %$mdc2dev disable_recovery
 
        replay_barrier mds1
-       mkdir $MOUNT1/${tdir}-1 # client1: request to be replayed 
+       mknod $MOUNT1/${tdir}-1 c 0 0 # client1: request to be replayed 
        ls $MOUNT2              # client2: take lock needed for
        facet_failover mds1
 
@@ -525,7 +529,6 @@ test_22() { # bug 6063 - AST during recovery
        $LCTL --device %$mdc2dev enable_recovery
        sleep $((TIMEOUT / 2))
 
-       $LCTL mark "first recovered?"
        LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count`
        if [ "$LOCKS" != "" ]; then
                echo "The lock got replayed before mkdir is replayed: $LOCKS"
@@ -536,14 +539,8 @@ test_22() {        # bug 6063 - AST during recovery
        # let's recover 1st connection with mkdir replay that needs the lock 
        $LCTL --device %$mdc1dev enable_recovery
        sleep $TIMEOUT
-       $LCTL mark "second recovered?"
-
-       LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count`
-       if [ "$LOCKS" != "1" ]; then
-               echo "The lock hasn't replayed: $LOCKS"
-               return 2
-       fi
 
+       df $MOUNT || return 2
        return 0
 }
 run_test 22 "AST during recovery"