X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Freplay-single.sh;h=da4b6f9130b7319cd9225a8d8cff63aac6727e77;hp=e0225694ef87f86b7c87066d80fe0092983d081c;hb=879e8d045057941ae0a5117d096f53975ef12ad0;hpb=48062a17a0d016a56995eeb021f6f32adb01ac03 diff --git a/lustre/tests/replay-single.sh b/lustre/tests/replay-single.sh index e022569..da4b6f9 100755 --- a/lustre/tests/replay-single.sh +++ b/lustre/tests/replay-single.sh @@ -21,7 +21,7 @@ require_dsh_mds || exit 0 # Skip these tests # bug number: 17466 18857 -ALWAYS_EXCEPT="61d 33a 33b 89 $REPLAY_SINGLE_EXCEPT" +ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT" # 63 min 7 min AT AT AT AT" [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68" @@ -35,8 +35,12 @@ mkdir -p $DIR assert_DIR rm -rf $DIR/[df][0-9]* +# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels +if [ $LINUX_VERSION_CODE -lt $(kernel_version 2 6 33) ]; then + sync; sleep 5; sync; sleep 5; sync; sleep 5 +fi + test_0a() { # was test_0 - sleep 10 mkdir $DIR/$tfile replay_barrier $SINGLEMDS fail $SINGLEMDS @@ -55,74 +59,14 @@ test_0b() { } run_test 0b "ensure object created after recover exists. (3284)" -seq_set_width() -{ - local mds=$1 - local width=$2 - lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width -} - -seq_get_width() -{ - local mds=$1 - lctl get_param -n seq.cli-srv-$mds-mdc-*.width -} - -# This test should pass for single-mds and multi-mds configs. -# But for different configurations it tests different things. -# -# single-mds -# ---------- -# (1) fld_create replay should happen; -# -# (2) fld_create replay should not return -EEXISTS, if it does -# this means sequence manager recovery code is buggy and allocated -# same sequence two times after recovery. -# -# multi-mds -# --------- -# (1) fld_create replay may not happen, because its home MDS is -# MDS2 which is not involved to revovery; -# -# (2) as fld_create does not happen on MDS1, it does not make any -# problem. -test_0c() { - local label=`mdsdevlabel 1` - [ -z "$label" ] && echo "No label for mds1" && return 1 - +test_0d() { replay_barrier $SINGLEMDS - local sw=`seq_get_width $label` - - # make seq manager switch to next sequence each - # time as new fid is needed. - seq_set_width $label 1 - - # make sure that fld has created at least one new - # entry on server - touch $DIR/$tfile || return 2 - seq_set_width $label $sw - - # fail $SINGLEMDS and start recovery, replay RPCs, etc. - fail $SINGLEMDS - - # wait for recovery finish - sleep 10 - df $MOUNT - - # flush fld cache and dentry cache to make it lookup - # created entry instead of revalidating existent one umount $MOUNT - zconf_mount `hostname` $MOUNT - - # issue lookup which should call fld lookup which - # should fail if client did not replay fld create - # correctly and server has no fld entry - touch $DIR/$tfile || return 3 - rm $DIR/$tfile || return 4 + facet_failover $SINGLEMDS + zconf_mount `hostname` $MOUNT || error "mount fails" + client_up || error "post-failover df failed" } -start_full_debug_logging -run_test 0c "fld create" -stop_full_debug_logging +run_test 0d "expired recovery with no clients" test_1() { replay_barrier $SINGLEMDS @@ -792,10 +736,11 @@ test_37() { replay_barrier $SINGLEMDS # clear the dmesg buffer so we only see errors from this recovery - dmesg -c >/dev/null + do_facet $SINGLEMDS dmesg -c >/dev/null fail_abort $SINGLEMDS kill -USR1 $pid - dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1 + do_facet $SINGLEMDS dmesg | grep "error .* unlinking .* from PENDING" && + return 1 wait $pid || return 3 sync return 0 @@ -979,6 +924,19 @@ test_44b() { } run_test 44b "race in target handle connect" +test_44c() { + replay_barrier $SINGLEMDS + createmany -m $DIR/$tfile-%d 100 +#define OBD_FAIL_TGT_RCVG_FLAG 0x712 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712" + fail_abort $SINGLEMDS + unlinkmany $DIR/$tfile-%d 100 && return 1 + fail $SINGLEMDS + unlinkmany $DIR/$tfile-%d 100 && return 1 + return 0 +} +run_test 44c "race in target handle connect" + # Handle failed close test_45() { mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` @@ -1416,14 +1374,20 @@ test_58a() { run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)" test_58b() { + local orig + local new + + large_xattr_enabled && + orig="$(generate_string $(max_xattr_size))" || orig="bar" + mount_client $MOUNT2 mkdir -p $DIR/$tdir touch $DIR/$tdir/$tfile replay_barrier $SINGLEMDS - setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile + setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile fail $SINGLEMDS - VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile` - [ x$VAL = x"bar" ] || return 1 + new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile) + [[ "$new" = "$orig" ]] || return 1 rm -f $DIR/$tdir/$tfile rmdir $DIR/$tdir zconf_umount `hostname` $MOUNT2 @@ -1431,20 +1395,33 @@ test_58b() { run_test 58b "test replay of setxattr op" test_58c() { # bug 16570 - mount_client $MOUNT2 - mkdir -p $DIR/$tdir - touch $DIR/$tdir/$tfile - drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \ - return 1 - VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile` - [ x$VAL = x"bar" ] || return 2 - drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \ - return 3 - VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile` - [ x$VAL = x"bar1" ] || return 4 - rm -f $DIR/$tdir/$tfile - rmdir $DIR/$tdir - zconf_umount `hostname` $MOUNT2 + local orig + local orig1 + local new + + if large_xattr_enabled; then + local xattr_size=$(max_xattr_size) + orig="$(generate_string $((xattr_size / 2)))" + orig1="$(generate_string $xattr_size)" + else + orig="bar" + orig1="bar1" + fi + + mount_client $MOUNT2 + mkdir -p $DIR/$tdir + touch $DIR/$tdir/$tfile + drop_request "setfattr -n trusted.foo -v $orig $DIR/$tdir/$tfile" || + return 1 + new=$(get_xattr_value trusted.foo $MOUNT2/$tdir/$tfile) + [[ "$new" = "$orig" ]] || return 2 + drop_reint_reply "setfattr -n trusted.foo1 -v $orig1 $DIR/$tdir/$tfile" || + return 3 + new=$(get_xattr_value trusted.foo1 $MOUNT2/$tdir/$tfile) + [[ "$new" = "$orig1" ]] || return 4 + rm -f $DIR/$tdir/$tfile + rmdir $DIR/$tdir + zconf_umount $HOSTNAME $MOUNT2 } run_test 58c "resend/reconstruct setxattr op" @@ -1619,7 +1596,7 @@ test_65a() #bug 3055 at_start || return 0 $LCTL dk > /dev/null debugsave - sysctl -w lnet.debug="+other" + $LCTL set_param debug="other" # Slow down a request to the current service time, this is critical # because previous tests may have caused this value to increase. REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | @@ -1628,7 +1605,7 @@ test_65a() #bug 3055 do_facet $SINGLEMDS lctl set_param fail_val=$((${REQ_DELAY} * 1000)) #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x8000050a + do_facet $SINGLEMDS $LCTL set_param fail_loc=0x8000050a createmany -o $DIR/$tfile 10 > /dev/null unlinkmany $DIR/$tfile 10 > /dev/null # check for log message @@ -1648,7 +1625,7 @@ test_65b() #bug 3055 at_start || return 0 # turn on D_ADAPTTO debugsave - sysctl -w lnet.debug="other trace" + $LCTL set_param debug="other trace" $LCTL dk > /dev/null # Slow down a request to the current service time, this is critical # because previous tests may have caused this value to increase. @@ -1660,14 +1637,14 @@ test_65b() #bug 3055 do_facet ost1 lctl set_param fail_val=${REQ_DELAY} #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224 - do_facet ost1 sysctl -w lustre.fail_loc=0x224 + do_facet ost1 $LCTL set_param fail_loc=0x224 rm -f $DIR/$tfile lfs setstripe $DIR/$tfile --index=0 --count=1 # force some real bulk transfer multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c - do_facet ost1 sysctl -w lustre.fail_loc=0 + do_facet ost1 $LCTL set_param fail_loc=0 # check for log message $LCTL dk | grep "Early reply #" || error "No early reply" debugrestore @@ -1683,18 +1660,18 @@ test_66a() #bug 3055 at_start || return 0 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" # adjust 5s at a time so no early reply is sent (within deadline) - do_facet $SINGLEMDS "sysctl -w lustre.fail_val=5000" + do_facet $SINGLEMDS "$LCTL set_param fail_val=5000" #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000050a" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a" createmany -o $DIR/$tfile 20 > /dev/null unlinkmany $DIR/$tfile 20 > /dev/null lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" - do_facet $SINGLEMDS "sysctl -w lustre.fail_val=10000" - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000050a" + do_facet $SINGLEMDS "$LCTL set_param fail_val=10000" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x8000050a" createmany -o $DIR/$tfile 20 > /dev/null unlinkmany $DIR/$tfile 20 > /dev/null lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + do_facet $SINGLEMDS "$LCTL set_param fail_loc=0" sleep 9 createmany -o $DIR/$tfile 20 > /dev/null unlinkmany $DIR/$tfile 20 > /dev/null @@ -1712,11 +1689,11 @@ test_66b() #bug 3055 at_start || return 0 ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') - sysctl -w lustre.fail_val=$(($ORIG + 5)) + $LCTL set_param fail_val=$(($ORIG + 5)) #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c - sysctl -w lustre.fail_loc=0x50c + $LCTL set_param fail_loc=0x50c ls $DIR/$tfile > /dev/null 2>&1 - sysctl -w lustre.fail_loc=0 + $LCTL set_param fail_loc=0 CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}') echo "network timeout orig $ORIG, cur $CUR, worst $WORST" @@ -1731,12 +1708,12 @@ test_67a() #bug 3055 at_start || return 0 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') # sleeping threads may drive values above this - do_facet ost1 "sysctl -w lustre.fail_val=400" + do_facet ost1 "$LCTL set_param fail_val=400" #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a - do_facet ost1 "sysctl -w lustre.fail_loc=0x50a" + do_facet ost1 "$LCTL set_param fail_loc=0x50a" createmany -o $DIR/$tfile 20 > /dev/null unlinkmany $DIR/$tfile 20 > /dev/null - do_facet ost1 "sysctl -w lustre.fail_loc=0" + do_facet ost1 "$LCTL set_param fail_loc=0" CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') ATTEMPTS=$(($CONN2 - $CONN1)) echo "$ATTEMPTS osc reconnect attempts on gradual slow" @@ -1753,7 +1730,7 @@ test_67b() #bug 3055 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') # exhaust precreations on ost1 - local OST=$(lfs osts | grep ^0": " | awk '{print $2}' | sed -e 's/_UUID$//') + local OST=$(ostname_from_index 0) local mdtosc=$(get_mdtosc_proc_path mds $OST) local last_id=$(do_facet $SINGLEMDS lctl get_param -n \ osc.$mdtosc.prealloc_last_id) @@ -1764,8 +1741,8 @@ test_67b() #bug 3055 lfs setstripe $DIR/$tdir/${OST} -o 0 -c 1 || error "setstripe" echo "Creating to objid $last_id on ost $OST..." #define OBD_FAIL_OST_PAUSE_CREATE 0x223 - do_facet ost1 "sysctl -w lustre.fail_val=20000" - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223" + do_facet ost1 "$LCTL set_param fail_val=20000" + do_facet ost1 "$LCTL set_param fail_loc=0x80000223" createmany -o $DIR/$tdir/${OST}/f $next_id $((last_id - next_id + 2)) client_reconnect @@ -1775,9 +1752,9 @@ test_67b() #bug 3055 ATTEMPTS=$(($CONN2 - $CONN1)) echo "$ATTEMPTS osc reconnect attempts on instant slow" # do it again; should not timeout - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223" + do_facet ost1 "$LCTL set_param fail_loc=0x80000223" cp /etc/profile $DIR/$tfile || error "cp failed" - do_facet ost1 "sysctl -w lustre.fail_loc=0" + do_facet ost1 "$LCTL set_param fail_loc=0" client_reconnect do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts" CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') @@ -1806,13 +1783,13 @@ test_68 () #bug 13813 mkdir -p $DIR/$tdir lfs setstripe $DIR/$tdir --index=0 --count=1 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312 - sysctl -w lustre.fail_val=$(($TIMEOUT - 1)) - sysctl -w lustre.fail_loc=0x80000312 + $LCTL set_param fail_val=$(($TIMEOUT - 1)) + $LCTL set_param fail_loc=0x80000312 cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?" - sysctl -w lustre.fail_val=$((TIMEOUT * 5 / 4)) - sysctl -w lustre.fail_loc=0x80000312 + $LCTL set_param fail_val=$((TIMEOUT * 5 / 4)) + $LCTL set_param fail_loc=0x80000312 cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?" - sysctl -w lustre.fail_loc=0 + $LCTL set_param fail_loc=0 echo $ENQ_MIN >> $ldlm_enqueue_min do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r" @@ -1833,7 +1810,7 @@ test_70a () { { skip "Need two or more clients, have $CLIENTCOUNT" && return; } echo "mount clients $CLIENTS ..." - zconf_mount_clients $CLIENTS $DIR + zconf_mount_clients $CLIENTS $MOUNT local clients=${CLIENTS//,/ } echo "Write/read files on $DIR ; clients $CLIENTS ... " @@ -1854,59 +1831,64 @@ test_70a () { } run_test 70a "check multi client t-f" -check_dbench_load () { - local clients=${1//,/ } - local client= +check_for_process () { + local clients=$1 + shift + local prog=$@ - for client in $clients; do - if ! do_node $client "ps ax | grep -v grep | awk '{ print $6 }' | grep -q rundbench"; then - error_noexit "rundbench load on $client failed!" - return 1 - fi - done - return 0 + killall_process $clients "$prog" -0 } -kill_dbench_load () { +killall_process () { local clients=${1:-$(hostname)} - do_nodes $clients "killall dbench" + local name=$2 + local signal=$3 + local rc=0 + + do_nodes $clients "killall $signal $name" } test_70b () { local clients=${CLIENTS:-$HOSTNAME} - zconf_mount_clients $clients $DIR + zconf_mount_clients $clients $MOUNT local duration=300 [ "$SLOW" = "no" ] && duration=60 + # set duration to 900 because it takes some time to boot node + [ "$FAILURE_MODE" = HARD ] && duration=900 + local cmd="rundbench 1 -t $duration" - local PID="" + local pid="" do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \ PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \ DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \ LCTL=$LCTL $cmd" & - PID=$! - log "Started rundbench load PID=$PID ..." - ELAPSED=0 - NUM_FAILOVERS=0 - START_TS=$(date +%s) - CURRENT_TS=$START_TS - while [ $ELAPSED -lt $duration ]; do - if ! check_dbench_load $clients; then - kill_dbench_load $clients + pid=$! + log "Started rundbench load pid=$pid ..." + + # give rundbench a chance to start, bug 24118 + sleep 2 + local elapsed=0 + local num_failovers=0 + local start_ts=$(date +%s) + while [ $elapsed -lt $duration ]; do + if ! check_for_process $clients rundbench; then + error_noexit "rundbench not found on some of $clients!" + killall_process $clients dbench break fi sleep 1 replay_barrier $SINGLEMDS sleep 1 # give clients a time to do operations # Increment the number of failovers - NUM_FAILOVERS=$((NUM_FAILOVERS+1)) - log "$TESTNAME fail mds1 $NUM_FAILOVERS times" + num_failovers=$((num_failovers+1)) + log "$TESTNAME fail $SINGLEMDS $num_failovers times" fail $SINGLEMDS - CURRENT_TS=$(date +%s) - ELAPSED=$((CURRENT_TS - START_TS)) + elapsed=$(($(date +%s) - start_ts)) done - wait $PID || error "rundbench load on $CLIENTS failed!" + + wait $pid || error "rundbench load on $clients failed!" } run_test 70b "mds recovery; $CLIENTCOUNT clients" # end multi-client tests @@ -1992,10 +1974,10 @@ test_80b() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 mkdir -p $DIR/$tdir - replay_barrier mds1 + replay_barrier $SINGLEMDS $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed" rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed" - fail mds1 + fail $SINGLEMDS stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!" return 0 } @@ -2009,9 +1991,9 @@ test_81a() { sleep 10 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed" $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed" - replay_barrier mds1 + replay_barrier $SINGLEMDS rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed" - fail mds1 + fail $SINGLEMDS stat $DIR/$tdir/f1002 } run_test 81a "CMD: unlink cross-node file (fail mds with name)" @@ -2033,10 +2015,10 @@ test_82b() { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 local dir=$DIR/d82b - replay_barrier mds1 + replay_barrier $SINGLEMDS mkdir $dir || error "mkdir $dir failed" log "FAILOVER mds1" - fail mds1 + fail $SINGLEMDS stat $DIR $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed" } @@ -2100,7 +2082,10 @@ run_test 85a "check the cancellation of unused locks during recovery(IBITS)" test_85b() { #bug 16774 lctl set_param -n ldlm.cancel_unused_locks_before_replay "1" - lfs setstripe -o 0 -c 1 $DIR + do_facet mgs $LCTL pool_new $FSNAME.$TESTNAME || return 1 + do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $FSNAME-OST0000 || return 2 + + lfs setstripe -c 1 -p $FSNAME.$TESTNAME $DIR for i in `seq 100`; do dd if=/dev/urandom of=$DIR/$tfile-$i bs=4096 count=32 >/dev/null 2>&1 @@ -2116,12 +2101,16 @@ test_85b() { #bug 16774 addr=`echo $lov_id | awk '{print $4}' | awk -F '-' '{print $3}'` count=`lctl get_param -n ldlm.namespaces.*OST0000*$addr.lock_unused_count` echo "before recovery: unused locks count = $count" + [ $count != 0 ] || return 3 fail ost1 count2=`lctl get_param -n ldlm.namespaces.*OST0000*$addr.lock_unused_count` echo "after recovery: unused locks count = $count2" + do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $FSNAME-OST0000 || return 4 + do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME || return 5 + if [ $count2 -ge $count ]; then error "unused locks are not canceled" fi @@ -2181,14 +2170,14 @@ test_88() { #bug 17485 lfs setstripe $DIR/$tdir -o 0 -c 1 || error "setstripe" replay_barrier ost1 - replay_barrier mds1 + replay_barrier $SINGLEMDS # exhaust precreations on ost1 - local OST=$(lfs osts | grep ^0": " | awk '{print $2}' | sed -e 's/_UUID$//') - local mdtosc=$(get_mdtosc_proc_path $OST) - local last_id=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id) - local next_id=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id) - echo "before test: last_id = $last_id, next_id = $next_id" + local OST=$(ostname_from_index 0) + local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $OST) + local last_id=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id) + local next_id=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id) + echo "before test: last_id = $last_id, next_id = $next_id" echo "Creating to objid $last_id on ost $OST..." createmany -o $DIR/$tdir/f-%d $next_id $((last_id - next_id + 2)) @@ -2197,27 +2186,32 @@ test_88() { #bug 17485 last_id=$(($last_id + 1)) createmany -o $DIR/$tdir/f-%d $last_id 8 - last_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id) - next_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id) + last_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id) + next_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id) echo "before recovery: last_id = $last_id2, next_id = $next_id2" - shutdown_facet mds1 + # if test uses shutdown_facet && reboot_facet instead of facet_failover () + # it has to take care about the affected facets, bug20407 + local affected_mds1=$(affected_facets mds1) + local affected_ost1=$(affected_facets ost1) + + shutdown_facet $SINGLEMDS shutdown_facet ost1 - reboot_facet mds1 - change_active mds1 - wait_for mds1 - mount_facet mds1 || error "Restart of mds failed" + reboot_facet $SINGLEMDS + change_active $affected_mds1 + wait_for_facet $affected_mds1 + mount_facets $affected_mds1 || error "Restart of mds failed" reboot_facet ost1 - change_active ost1 - wait_for ost1 - mount_facet ost1 || error "Restart of ost1 failed" + change_active $affected_ost1 + wait_for_facet $affected_ost1 + mount_facets $affected_ost1 || error "Restart of ost1 failed" clients_up - last_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_last_id) - next_id2=$(do_facet mds1 lctl get_param -n osc.$mdtosc.prealloc_next_id) + last_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_last_id) + next_id2=$(do_facet $SINGLEMDS lctl get_param -n osc.$mdtosc.prealloc_next_id) echo "after recovery: last_id = $last_id2, next_id = $next_id2" # create new files, which should use new objids, and ensure the orphan @@ -2250,10 +2244,12 @@ test_88() { #bug 17485 run_test 88 "MDS should not assign same objid to different files " test_89() { + cancel_lru_locks osc mkdir -p $DIR/$tdir rm -f $DIR/$tdir/$tfile - sleep 2 - BLOCKS1=$(df $MOUNT | tail -n 1 | awk '{ print $3 }') + wait_mds_ost_sync + wait_destroy_complete + BLOCKS1=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }') lfs setstripe -i 0 -c 1 $DIR/$tdir/$tfile dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile sync @@ -2263,14 +2259,102 @@ test_89() { umount $MOUNT mount_facet ost1 zconf_mount $(hostname) $MOUNT - wait_mds_ost_sync - df $MOUNT - BLOCKS2=$(df $MOUNT | tail -n 1 | awk '{ print $3 }') + client_up || return 1 + wait_mds_ost_sync + BLOCKS2=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }') [ "$BLOCKS1" == "$BLOCKS2" ] || error $((BLOCKS2 - BLOCKS1)) blocks leaked } run_test 89 "no disk space leak on late ost connection" -equals_msg `basename $0`: test complete, cleaning up +cleanup_90 () { + local facet=$1 + trap 0 + reboot_facet $facet + change_active $facet + wait_for_facet $facet + mount_facet $facet || error "Restart of $facet failed" + clients_up +} + +test_90() { # bug 19494 + local dir=$DIR/$tdir + local ostfail=$(get_random_entry $(get_facets OST)) + + if [[ $FAILURE_MODE = HARD ]]; then + local affected=$(affected_facets $ostfail); + if [[ "$affected" != $ostfail ]]; then + skip not functional with FAILURE_MODE=$FAILURE_MODE, affected: $affected + return 0 + fi + fi + + mkdir -p $dir + + echo "Create the files" + + # file "f${index}" striped over 1 OST + # file "all" striped over all OSTs + + $LFS setstripe -c $OSTCOUNT $dir/all || error "setstripe failed to create $dir/all" + + for (( i=0; i<$OSTCOUNT; i++ )); do + local f=$dir/f$i + $LFS setstripe -i $i -c 1 $f || error "setstripe failed to create $f" + + # confirm that setstripe actually created the stripe on the requested OST + local uuid=$(ostuuid_from_index $i) + for file in f$i all; do + if [[ $dir/$file != $($LFS find --obd $uuid --name $file $dir) ]]; then + $LFS getstripe $dir/file + error wrong stripe: $file, uuid: $uuid + fi + done + done + + # Before failing an OST, get its obd name and index + local varsvc=${ostfail}_svc + local obd=$(do_facet $ostfail lctl get_param -n obdfilter.${!varsvc}.uuid) + local index=${obd:(-6):1} + + echo "Fail $ostfail $obd, display the list of affected files" + shutdown_facet $ostfail || return 2 + + trap "cleanup_90 $ostfail" EXIT INT + echo "General Query: lfs find $dir" + local list=$($LFS find $dir) + echo "$list" + for (( i=0; i<$OSTCOUNT; i++ )); do + list_member "$list" $dir/f$i || error_noexit "lfs find $dir: no file f$i" + done + list_member "$list" $dir/all || error_noexit "lfs find $dir: no file all" + + # focus on the missing OST, + # we expect to see only two files affected: "f$(index)" and "all" + + echo "Querying files on shutdown $ostfail: lfs find --obd $obd" + list=$($LFS find --obd $obd $dir) + echo "$list" + for file in all f$index; do + list_member "$list" $dir/$file || + error_noexit "lfs find does not report the affected $obd for $file" + done + + [[ $(echo $list | wc -w) -eq 2 ]] || + error_noexit "lfs find reports the wrong list of affected files ${#list[@]}" + + echo "Check getstripe: lfs getstripe -r --obd $obd" + list=$($LFS getstripe -r --obd $obd $dir) + echo "$list" + for file in all f$index; do + echo "$list" | grep $dir/$file || + error_noexit "lfs getsripe does not report the affected $obd for $file" + done + + cleanup_90 $ostfail +} +run_test 90 "lfs find identifies the missing striped file segments" + +complete $(basename $0) $SECONDS check_and_cleanup_lustre -[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true +exit_status