X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-small.sh;h=c311e813b7bbacea365a737082781dd92673468b;hp=9641565916b6a7ce35051ab38d9213261de55b31;hb=44cb3a92d756728195409b368b3e89b1a78ef0ae;hpb=6e704244293aec98b39b8243fb2d13e2a8c9b674 diff --git a/lustre/tests/recovery-small.sh b/lustre/tests/recovery-small.sh index 9641565..c311e81 100755 --- a/lustre/tests/recovery-small.sh +++ b/lustre/tests/recovery-small.sh @@ -16,12 +16,8 @@ init_logging require_dsh_mds || exit 0 # also long tests: 19, 21a, 21e, 21f, 23, 27 -# 1 2.5 2.5 4 4 (min)" -[ "$SLOW" = "no" ] && EXCEPT_SLOW="17 26a 26b 50 51 57" -[ $(facet_fstype $SINGLEMDS) = "zfs" ] && -# bug number for skipped test: LU-2194 - ALWAYS_EXCEPT="$ALWAYS_EXCEPT 19b" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="" build_test_filter @@ -149,21 +145,181 @@ test_9() { run_test 9 "pause bulk on OST (bug 1420)" #bug 1521 -test_10() { - do_facet client mcreate $DIR/$tfile || - { error "mcreate failed: $?"; return 1; } - drop_bl_callback "chmod 0777 $DIR/$tfile" || echo "evicted as expected" - # wait for the mds to evict the client - #echo "sleep $(($TIMEOUT*2))" - #sleep $(($TIMEOUT*2)) - do_facet client touch $DIR/$tfile || echo "touch failed, evicted" - do_facet client checkstat -v -p 0777 $DIR/$tfile || - { error "client checkstat failed: $?"; return 3; } - do_facet client "munlink $DIR/$tfile" - # allow recovery to complete - client_up || client_up || sleep $TIMEOUT +test_10a() { + local before=$(date +%s) + local evict + + do_facet client "stat $DIR > /dev/null" || + error "failed to stat $DIR: $?" + drop_bl_callback "chmod 0777 $DIR" || + error "failed to chmod $DIR: $?" + + # let the client reconnect + client_reconnect + evict=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | + awk -F"[ [,]" '/EVICTED ]$/ { if (mx<$5) {mx=$5;} } END { print mx }') + [ ! -z "$evict" ] && [[ $evict -gt $before ]] || + (do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state; + error "no eviction: $evict before:$before") + + do_facet client checkstat -v -p 0777 $DIR || + error "client checkstat failed: $?" +} +run_test 10a "finish request on server after client eviction (bug 1521)" + +test_10b() { + local before=$(date +%s) + local evict + + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.6.53) ]] && + skip "Need MDS version at least 2.6.53" && return + do_facet client "stat $DIR > /dev/null" || + error "failed to stat $DIR: $?" + drop_bl_callback_once "chmod 0777 $DIR" || + error "failed to chmod $DIR: $?" + + # let the client reconnect + client_reconnect + evict=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | + awk -F"[ [,]" '/EVICTED ]$/ { if (mx<$5) {mx=$5;} } END { print mx }') + + [ -z "$evict" ] || [[ $evict -le $before ]] || + (do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state; + error "eviction happened: $evict before:$before") + + do_facet client checkstat -v -p 0777 $DIR || + error "client checkstat failed: $?" +} +run_test 10b "re-send BL AST" + +test_10c() { + local before=$(date +%s) + local evict + local mdccli + local mdcpath + local conn_uuid + local workdir + local pid + local rc + + workdir="${DIR}/${tdir}" + mkdir -p ${workdir} || error "can't create workdir $?" + stat ${workdir} > /dev/null || + error "failed to stat ${workdir}: $?" + mdtidx=$($LFS getdirstripe -i ${workdir}) + mdtname=$($LFS mdts ${workdir} | grep -e "^$mdtidx:" | + awk '{sub("_UUID", "", $2); print $2;}') + #assume one client + mdccli=$($LCTL dl | grep "${mdtname}-mdc" | awk '{print $4;}') + conn_uuid=$($LCTL get_param -n mdc.${mdccli}.mds_conn_uuid) + mdcpath="mdc.${mdccli}.import=connection=${conn_uuid}" + + drop_bl_callback_once "chmod 0777 ${workdir}" & + pid=$! + + # let chmod blocked + sleep 1 + # force client reconnect + $LCTL set_param "${mdcpath}" + + # wait client reconnect + client_reconnect + wait $pid + rc=$? + evict=$($LCTL get_param mdc.${mdccli}.state | + awk -F"[ [,]" '/EVICTED]$/ { if (t<$4) {t=$4;} } END { print t }') + + [[ $evict -le $before ]] || + ( $LCTL get_param mdc.$FSNAME-MDT*.state; + error "eviction happened: $EVICT before:$BEFORE" ) + + [ $rc -eq 0 ] || error "chmod must finished OK" + checkstat -v -p 0777 "${workdir}" || + error "client checkstat failed: $?" +} +run_test 10c "re-send BL AST vs reconnect race (LU-5569)" + +test_10d() { + local before=$(date +%s) + local evict + + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.6.90) ]] && + skip "Need MDS version at least 2.6.90" && return + + # sleep 1 is to make sure that BEFORE is not equal to EVICTED below + sleep 1 + rm -f $TMP/$tfile + echo -n ", world" | dd of=$TMP/$tfile bs=1c seek=5 + + remount_client $MOUNT + mount_client $MOUNT2 + + cancel_lru_locks osc + $LFS setstripe -i 0 -c 1 $DIR1/$tfile + echo -n hello | dd of=$DIR1/$tfile bs=5 + + stat $DIR2/$tfile >& /dev/null + $LCTL set_param fail_err=71 + drop_bl_callback "echo -n \\\", world\\\" >> $DIR2/$tfile" + + client_reconnect + + cancel_lru_locks osc + cmp -l $DIR1/$tfile $DIR2/$tfile || error "file contents differ" + cmp -l $DIR1/$tfile $TMP/$tfile || error "wrong content found" + + evict=$(do_facet client $LCTL get_param osc.$FSNAME-OST0000*.state | \ + tr -d '\-\[\] ' | \ + awk -F"[ [,]" '/EVICTED$/ { if (mx<$1) {mx=$1;} } END { print mx }') + + [[ $evict -gt $before ]] || + (do_facet client $LCTL get_param osc.$FSNAME-OST0000*.state; + error "no eviction: $evict before:$before") + + $LCTL set_param fail_err=0 + rm $TMP/$tfile + umount_client $MOUNT2 } -run_test 10 "finish request on server after client eviction (bug 1521)" +run_test 10d "test failed blocking ast" + +test_10e() +{ + [[ $(lustre_version_code ost1) -le $(version_code 2.8.58) ]] && + skip "Need OST version at least 2.8.59" && return 0 + [ $CLIENTCOUNT -lt 2 ] && skip "need two clients" && return 0 + [ $(facet_host client) == $(facet_host ost1) ] && + skip "need ost1 and client on different nodes" && return 0 + local -a clients=(${CLIENTS//,/ }) + local client1=${clients[0]} + local client2=${clients[1]} + + $LFS setstripe -c 1 -i 0 $DIR/$tfile-1 $DIR/$tfile-2 + $MULTIOP $DIR/$tfile-1 Ow1048576c + +#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305 + $LCTL set_param fail_loc=0x80000305 + +#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e + do_facet ost1 "$LCTL set_param fail_loc=0x1000030e" + # hit OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT twice: + # 1. to return ENOTCONN from ldlm_handle_enqueue0 + # 2. to pause reconnect handling between resend and setting + # import to LUSTRE_IMP_FULL state + do_facet ost1 "$LCTL set_param fail_val=3" + + # client1 fails ro respond to bl ast + do_node $client2 "$MULTIOP $DIR/$tfile-1 Ow1048576c" & + MULTIPID=$! + + # ost1 returns error on enqueue, which causes client1 to reconnect + do_node $client1 "$MULTIOP $DIR/$tfile-2 Ow1048576c" || + error "multiop failed" + wait $MULTIPID + + do_facet ost1 "$LCTL set_param fail_loc=0" + do_facet ost1 "$LCTL set_param fail_val=0" +} +run_test 10e "re-send BL AST vs reconnect race 2" #bug 2460 # wake up a thread waiting for completion after eviction @@ -177,7 +333,8 @@ test_11(){ do_facet client $MULTIOP $DIR/$tfile or || { error "multiop read failed: $?"; return 3; } - drop_bl_callback $MULTIOP $DIR/$tfile Ow || echo "evicted as expected" + drop_bl_callback_once $MULTIOP $DIR/$tfile Ow || + echo "evicted as expected" do_facet client munlink $DIR/$tfile || { error "munlink failed: $?"; return 4; } @@ -264,10 +421,11 @@ test_16() { sleep $TIMEOUT do_facet client "cmp $TMP/$tfile $DIR/$tfile" || return 2 start_read_ahead + rm -f $TMP/$tfile } run_test 16 "timeout bulk put, don't evict client (2732)" -test_17() { +test_17a() { local at_max_saved=0 remote_ost_nodsh && skip "remote OST with nodsh" && return 0 @@ -301,7 +459,93 @@ test_17() { [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved ost1 return 0 } -run_test 17 "timeout bulk get, don't evict client (2732)" +run_test 17a "timeout bulk get, don't evict client (2732)" + +test_17b() { + [ -z "$RCLIENTS" ] && skip "Needs multiple clients" && return 0 + + # get one of the clients from client list + local rcli=$(echo $RCLIENTS | cut -d ' ' -f 1) + local p="$TMP/$TESTSUITE-$TESTNAME.parameters" + local ldlm_enqueue_min=$(do_facet ost1 find /sys -name ldlm_enqueue_min) + [ -z "$ldlm_enqueue_min" ] && + skip "missing /sys/.../ldlm_enqueue_min" && return 0 + + $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/$tfile || + error "setstripe failed" + $LFS setstripe -i 0 -c 1 -S 1048576 $DIR/${tfile}2 || + error "setstripe 2 failed" + + save_lustre_params ost1 "at_history" > $p + save_lustre_params ost1 "bulk_timeout" >> $p + local dev="${FSNAME}-OST0000" + save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p + local ldlm_enqueue_min_save=$(do_facet ost1 cat $ldlm_enqueue_min) + + local new_at_history=15 + + do_facet ost1 "$LCTL set_param at_history=$new_at_history" + do_facet ost1 "$LCTL set_param bulk_timeout=30" + do_facet ost1 "echo 30 > /sys/module/ptlrpc/parameters/ldlm_enqueue_min" + + # brw_size is required to be 4m so that bulk transfer timeout + # could be simulated with OBD_FAIL_PTLRPC_CLIENT_BULK_CB3 + local brw_size=$($LCTL get_param -n \ + osc.$FSNAME-OST0000-osc-[^M]*.import | + awk '/max_brw_size/{print $2}') + if [ $brw_size -ne 4194304 ] + then + save_lustre_params ost1 "obdfilter.$dev.brw_size" >> $p + do_facet ost1 "$LCTL set_param obdfilter.$dev.brw_size=4" + remount_client $MOUNT + fi + + # get service estimate expanded + #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224 + do_facet ost1 "$LCTL set_param fail_loc=0x80000224 fail_val=35" + echo "delay rpc servicing by 35 seconds" + dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=fdatasync + + local estimate + estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts | + awk '/portal 6/ {print $5}') + echo "service estimates increased to $estimate" + + # let current worst service estimate to get closer to obliteration + sleep $((new_at_history / 3)) + + # start i/o and simulate bulk transfer loss + #define OBD_FAIL_PTLRPC_CLIENT_BULK_CB3 0x520 + do_facet ost1 "$LCTL set_param fail_loc=0xa0000520 fail_val=1" + dd if=/dev/zero of=$DIR/$tfile bs=2M count=1 conv=fdatasync,notrunc & + local writedd=$! + + # start lock conflict handling + sleep $((new_at_history / 3)) + do_node $rcli "dd if=$DIR/$tfile of=/dev/null bs=1M count=1" & + local readdd=$! + + # obliterate the worst service estimate + sleep $((new_at_history / 3 + 1)) + dd if=/dev/zero of=$DIR/${tfile}2 bs=1M count=1 + + estimate=$($LCTL get_param -n osc.$dev-osc-*.timeouts | + awk '/portal 6/ {print $5}') + echo "service estimate dropped to $estimate" + + wait $writedd + [[ $? == 0 ]] || error "write failed" + wait $readdd + [[ $? == 0 ]] || error "read failed" + + restore_lustre_params <$p + if [ $brw_size -ne 4194304 ] + then + remount_client $MOUNT || error "remount_client failed" + fi + do_facet ost1 "echo $ldlm_enqueue_min_save > $ldlm_enqueue_min" +} +run_test 17b "timeout bulk get, dont evict client (3582)" test_18a() { [ -z ${ost2_svc} ] && skip_env "needs 2 osts" && return 0 @@ -332,7 +576,7 @@ test_18a() { rc=0 pgcache_empty || rc=2 $LCTL --device $osc2dev activate - rm -f $f + rm -f $f $TMP/$tfile return $rc } run_test 18a "manual ost invalidate clears page cache immediately" @@ -365,7 +609,7 @@ test_18b() { # cache after the client reconnects? rc=0 pgcache_empty || rc=2 - rm -f $f + rm -f $f $TMP/$tfile return $rc } run_test 18b "eviction and reconnect clears page cache (2766)" @@ -398,13 +642,13 @@ test_18c() { do_facet ost1 lctl set_param fail_loc=0x80000225 # force reconnect sleep 1 - df $MOUNT > /dev/null 2>&1 + $LFS df $MOUNT > /dev/null 2>&1 sleep 2 # my understanding is that there should be nothing in the page - # cache after the client reconnects? + # cache after the client reconnects? rc=0 pgcache_empty || rc=2 - rm -f $f + rm -f $f $TMP/$tfile return $rc } run_test 18c "Dropped connect reply after eviction handing (14755)" @@ -427,8 +671,9 @@ test_19a() { # let the client reconnect client_reconnect - EVICT=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | \ - awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }') + EVICT=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | + awk -F"[ [,]" '/EVICTED ]$/ \ + { if (mx<$5) {mx=$5;} } END { print mx }') [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || (do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state; @@ -456,8 +701,9 @@ test_19b() { # let the client reconnect client_reconnect - EVICT=$(do_facet client $LCTL get_param osc.$FSNAME-OST*.state | \ - awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }') + EVICT=$(do_facet client $LCTL get_param osc.$FSNAME-OST*.state | + awk -F"[ [,]" '/EVICTED ]$/ \ + { if (mx < $5) {mx = $5;} } END { print mx }') [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || (do_facet client $LCTL get_param osc.$FSNAME-OST*.state; @@ -489,7 +735,7 @@ test_19c() { # let the client reconnect sleep 5 EVICT=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | - awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }') + awk -F"[ [,]" '/EVICTED ]$/ { if (mx<$5) {mx=$5;} } END { print mx }') [ -z "$EVICT" ] || [[ $EVICT -le $BEFORE ]] || error "eviction happened" } @@ -767,7 +1013,8 @@ test_24a() { # bug 11710 details correct fsync() behavior rc=$? lctl set_param fail_loc=0x0 client_reconnect - [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true + [ $rc -eq 0 ] && + error_ignore bz5494 "multiop didn't fail fsync: rc $rc" || true } run_test 24a "fsync error (should return error)" @@ -786,7 +1033,8 @@ test_24b() { dmesg -c > /dev/null mkdir -p $DIR/$tdir - lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1 + lfs setstripe $DIR/$tdir -S 0 -i 0 -c 1 || + error "$LFS setstripe failed" cancel_lru_locks osc multiop_bg_pause $DIR/$tdir/$tfile-1 Ow8192_yc || error "mulitop Ow8192_yc failed" @@ -807,10 +1055,10 @@ test_24b() { lctl set_param fail_loc=0x0 client_reconnect [ $rc1 -eq 0 -o $rc2 -eq 0 ] && - error_ignore 5494 "multiop didn't fail fsync: $rc1 or close: $rc2" || + error_ignore bz5494 "multiop didn't fail fsync: $rc1 or close: $rc2" || true - dmesg | grep "dirty page discard:" || \ + dmesg | grep "dirty page discard:" || error "no discarded dirty page found!" } run_test 24b "test dirty page discard due to client eviction" @@ -828,20 +1076,21 @@ test_26a() { # was test_26 bug 5921 - evict dead exports by pinger check_timeout || return 1 - local OST_NEXP=$(do_facet ost1 lctl get_param -n obdfilter.${ost1_svc}.num_exports | cut -d' ' -f2) - - echo starting with $OST_NEXP OST exports # OBD_FAIL_PTLRPC_DROP_RPC 0x505 do_facet client lctl set_param fail_loc=0x505 - # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict. - # But if there's a race to start the evictor from various obds, - # the loser might have to wait for the next ping. - + local before=$(date +%s) local rc=0 - wait_client_evicted ost1 $OST_NEXP $((TIMEOUT * 2 + TIMEOUT * 3 / 4)) - rc=$? + + # evictor takes PING_EVICT_TIMEOUT + 3 * PING_INTERVAL to evict. + # But if there's a race to start the evictor from various obds, + # the loser might have to wait for the next ping. + sleep $((TIMEOUT * 2 + TIMEOUT * 3 / 4)) do_facet client lctl set_param fail_loc=0x0 - [ $rc -eq 0 ] || error "client not evicted from OST" + do_facet client df > /dev/null + + local oscs=$(lctl dl | awk '/-osc-/ {print $4}') + check_clients_evicted $before ${oscs[@]} + check_clients_full 10 ${oscs[@]} } run_test 26a "evict dead exports" @@ -907,7 +1156,8 @@ run_test 27 "fail LOV while using OSC's" test_28() { # bug 6086 - error adding new clients do_facet client mcreate $DIR/$tfile || return 1 - drop_bl_callback "chmod 0777 $DIR/$tfile" ||echo "evicted as expected" + drop_bl_callback_once "chmod 0777 $DIR/$tfile" || + echo "evicted as expected" #define OBD_FAIL_MDS_CLIENT_ADD 0x12f do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012f" # fail once (evicted), reconnect fail (fail_loc), ok @@ -923,7 +1173,7 @@ test_29a() { # bug 22273 - error adding new clients # fail abort so client will be new again fail_abort $SINGLEMDS client_up || error "reconnect failed" - wait_osc_import_state mds ost FULL + wait_osc_import_state $SINGLEMDS ost FULL return 0 } run_test 29a "error adding new clients doesn't cause LBUG (bug 22273)" @@ -959,7 +1209,8 @@ test_50() { rc=$? echo writemany returned $rc #these may fail because of eviction due to slow AST response. - [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true + [ $rc -eq 0 ] || + error_ignore bz13652 "writemany returned rc $rc" || true } run_test 50 "failover MDS under load" @@ -988,10 +1239,11 @@ test_51() { # and recovery was interrupted sleep $TIMEOUT kill -USR1 $CLIENT_PID - wait $CLIENT_PID + wait $CLIENT_PID rc=$? echo writemany returned $rc - [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true + [ $rc -eq 0 ] || + error_ignore bz13652 "writemany returned rc $rc" || true } run_test 51 "failover MDS during recovery" @@ -1060,57 +1312,58 @@ test_55() { mkdir -p $DIR/$tdir + # Minimum pass speed is 2MBps + local ddtimeout=64 + # LU-2887/LU-3089 - set min pass speed to 500KBps + [ "$(facet_fstype ost1)" = "zfs" ] && ddtimeout=256 + # first dd should be finished quickly $LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-1 - dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 & + dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 & DDPID=$! count=0 echo "step1: testing ......" - while [ true ]; do - if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi - count=$[count+1] - if [ $count -gt 64 ]; then - error "dd should be finished!" - fi - sleep 1 - done + while kill -0 $DDPID 2> /dev/null; do + let count++ + if [ $count -gt $ddtimeout ]; then + error "dd should be finished!" + fi + sleep 1 + done echo "(dd_pid=$DDPID, time=$count)successful" $LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-2 #define OBD_FAIL_OST_DROP_REQ 0x21d do_facet ost1 lctl set_param fail_loc=0x0000021d # second dd will be never finished - dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 & + dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 & DDPID=$! count=0 echo "step2: testing ......" - while [ $count -le 64 ]; do - dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`" - if [ -z $dd_name ]; then - ls -l $DIR/$tdir - echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)" - error "dd shouldn't be finished!" - fi - count=$[count+1] - sleep 1 - done + while [ $count -le $ddtimeout ]; do + if ! kill -0 $DDPID 2> /dev/null; then + ls -l $DIR/$tdir + error "dd shouldn't be finished! (time=$count)" + fi + let count++ + sleep 1 + done echo "(dd_pid=$DDPID, time=$count)successful" #Recover fail_loc and dd will finish soon do_facet ost1 lctl set_param fail_loc=0 count=0 echo "step3: testing ......" - while [ true ]; do - if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi - count=$[count+1] - if [ $count -gt 500 ]; then - error "dd should be finished!" - fi - sleep 1 - done + while kill -0 $DDPID 2> /dev/null; do + let count++ + if [ $count -gt $((ddtimeout + 440)) ]; then + error "dd should be finished!" + fi + sleep 1 + done echo "(dd_pid=$DDPID, time=$count)successful" - rm -rf $DIR/$tdir + rm -rf $DIR/$tdir } run_test 55 "ost_brw_read/write drops timed-out read/write request" @@ -1118,11 +1371,11 @@ test_56() { # b=11277 #define OBD_FAIL_MDS_RESEND 0x136 touch $DIR/$tfile do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000136" - stat $DIR/$tfile + stat $DIR/$tfile || error "stat failed" do_facet $SINGLEMDS "lctl set_param fail_loc=0" rm -f $DIR/$tfile } -run_test 56 "do not allow reconnect to busy exports" +run_test 56 "do not fail on getattr resend" test_57_helper() { # no oscs means no client or mdt @@ -1156,10 +1409,10 @@ test_58() { # bug 11546 pid=$! sleep 1 lctl set_param fail_loc=0 - drop_bl_callback rm -f $DIR/$tfile + drop_bl_callback_once rm -f $DIR/$tfile wait $pid # the first 'df' could tigger the eviction caused by - # 'drop_bl_callback', and it's normal case. + # 'drop_bl_callback_once', and it's normal case. # but the next 'df' should return successfully. do_facet client "df $DIR" || do_facet client "df $DIR" } @@ -1184,17 +1437,17 @@ test_59() { # bug 10589 run_test 59 "Read cancel race on client eviction" err17935 () { - # we assume that all md changes are in the MDT0 changelog - if [ $MDSCOUNT -gt 1 ]; then - error_ignore 17935 $* - else - error $* - fi + # we assume that all md changes are in the MDT0 changelog + if [ $MDSCOUNT -gt 1 ]; then + error_ignore bz17935 $* + else + error $* + fi } test_60() { - MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid | \ - awk '{gsub(/_UUID/,""); print $1}' | head -1) + MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid | + awk '{ gsub(/_UUID/,""); print $1 }' | head -n1) NUM_FILES=15000 mkdir -p $DIR/$tdir @@ -1258,12 +1511,14 @@ test_61() replay_barrier $SINGLEMDS createmany -o $DIR/$tdir/$tfile-%d 10 - local oid=`do_facet ost1 "lctl get_param -n obdfilter.${ost1_svc}.last_id"` + local oid=$(do_facet ost1 "lctl get_param -n \ + obdfilter.${ost1_svc}.last_id" | sed -e 's/.*://') fail_abort $SINGLEMDS - + touch $DIR/$tdir/$tfile - local id=`$LFS getstripe $DIR/$tdir/$tfile | awk '$1 == 0 { print $2 }'` + local id=$($LFS getstripe $DIR/$tdir/$tfile | + awk '$1 == 0 { print $2 }') [ $id -le $oid ] && error "the orphan objid was reused, failed" # Cleanup @@ -1281,6 +1536,72 @@ run_test 61 "Verify to not reuse orphan objects - bug 17025" #} #run_test 62 "Verify connection flags race - bug LU-1716" +test_66() +{ + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.51) ]] || + { skip "Need MDS version at least 2.7.51"; return 0; } + + local list=$(comma_list $(osts_nodes)) + + # modify dir so that next revalidate would not obtain UPDATE lock + touch $DIR + + # drop 1 reply with UPDATE lock + mcreate $DIR/$tfile || error "mcreate failed: $?" + drop_ldlm_reply_once "stat $DIR/$tfile" & + sleep 2 + + # make the re-sent lock to sleep +#define OBD_FAIL_MDS_RESEND 0x136 + do_nodes $list $LCTL set_param fail_loc=0x80000136 + + #initiate the re-connect & re-send + local mdccli=$($LCTL dl | awk '/-mdc-/ {print $4;}') + local conn_uuid=$($LCTL get_param -n mdc.${mdccli}.mds_conn_uuid) + $LCTL set_param "mdc.${mdccli}.import=connection=${conn_uuid}" + sleep 2 + + #initiate the client eviction while enqueue re-send is in progress + mds_evict_client + + client_reconnect + wait +} +run_test 66 "lock enqueue re-send vs client eviction" + +test_65() { + mount_client $DIR2 + + #grant lock1, export2 + $SETSTRIPE -i -0 $DIR2/$tfile || return 1 + $MULTIOP $DIR2/$tfile Ow || return 2 + +#define OBD_FAIL_LDLM_BL_EVICT 0x31e + do_facet ost $LCTL set_param fail_loc=0x31e + #get waiting lock2, export1 + $MULTIOP $DIR/$tfile Ow & + PID1=$! + # let enqueue to get asleep + sleep 2 + + #get lock2 blocked + $MULTIOP $DIR2/$tfile Ow & + PID2=$! + sleep 2 + + #evict export1 + ost_evict_client + + sleep 2 + do_facet ost $LCTL set_param fail_loc=0 + + wait $PID1 + wait $PID2 + + umount_client $DIR2 +} +run_test 65 "lock enqueue for destroyed export" + check_cli_ir_state() { local NODE=${1:-$HOSTNAME} @@ -1299,6 +1620,11 @@ check_target_ir_state() local recovery_proc=obdfilter.${!name}.recovery_status local st + while : ; do + st=$(do_facet $target "$LCTL get_param -n $recovery_proc | + awk '/status:/{ print \\\$2}'") + [ x$st = xRECOVERING ] || break + done st=$(do_facet $target "lctl get_param -n $recovery_proc | awk '/IR:/{ print \\\$2}'") [ $st != ON -o $st != OFF -o $st != ENABLED -o $st != DISABLED ] || @@ -1380,8 +1706,8 @@ target_instance_match() local target=${srv}_svc local si=$(do_facet $srv lctl get_param -n $obdname.${!target}.instance) - local ci=$(lctl get_param -n $cliname.${!target}-${cliname}-*.import | \ - awk '/instance/{ print $2 }' |head -1) + local ci=$(lctl get_param -n $cliname.${!target}-${cliname}-*.import | + awk '/instance/{ print $2 }' | head -n1) return $([ $si -eq $ci ]) } @@ -1495,7 +1821,7 @@ test_103() do_facet mgs $LCTL list_param mgs.*.ir_timeout || { skip "MGS without IR support"; return 0; } - combined_mgs_mds && skip "mgs and mds on the same target" && return 0 + combined_mgs_mds && skip "needs separate mgs and mds" && return 0 # workaround solution to generate config log on the mds remount_facet mds1 @@ -1504,10 +1830,11 @@ test_103() stop mds1 # We need this test because mds is like a client in IR context. - start mds1 $MDSDEV1 || error "MDS should start w/o mgs" + start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS || + error "MDS should start w/o mgs" # start mgs and remount mds w/ ir - start mgs $MGSDEV + start mgs $(mgsdevname) $MGS_MOUNT_OPTS clients_up # remount client so that fsdb will be created on the MGS @@ -1560,10 +1887,9 @@ test_105() # get one of the clients from client list local rcli=$(echo $RCLIENTS |cut -d' ' -f 1) - local old_MOUNTOPT=$MOUNTOPT - MOUNTOPT=${MOUNTOPT},noir + local mount_opts=${MOUNT_OPTS:+$MOUNT_OPTS,}noir zconf_umount $rcli $MOUNT || error "umount failed" - zconf_mount $rcli $MOUNT || error "mount failed" + zconf_mount $rcli $MOUNT $mount_opts || error "mount failed" # make sure lustre mount at $rcli disabling IR local ir_state=$(check_cli_ir_state $rcli) @@ -1585,8 +1911,7 @@ test_105() [ $ir_state = "DISABLED" -o $ir_state = "OFF" ] || error "IR status on ost1 should be DISABLED" - # restore it - MOUNTOPT=$old_MOUNTOPT + # remount with the default MOUNT_OPTS zconf_umount $rcli $MOUNT || error "umount failed" zconf_mount $rcli $MOUNT || error "mount failed" @@ -1675,13 +2000,38 @@ test_107 () { } run_test 107 "drop reint reply, then restart MDT" +test_108() { + mkdir -p $DIR/$tdir + $SETSTRIPE -c 1 -i 0 $DIR/$tdir + + dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=256 & + local dd_pid=$! + sleep 0.1 + + ost_evict_client + + wait $dd_pid + + client_up || error "reconnect failed" + rm -f $DIR/$tdir/$tfile +} +run_test 108 "client eviction don't crash" + test_110a () { [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 local remote_dir=$DIR/$tdir/remote_dir local MDTIDX=1 + local num + + #prepare for 110 test, which need set striped dir on remote MDT. + for num in $(seq $MDSCOUNT); do + do_facet mds$num \ + lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \ + 2>/dev/null + done mkdir -p $DIR/$tdir - drop_request "$LFS mkdir -i $MDTIDX $remote_dir" || + drop_request "$LFS mkdir -i $MDTIDX -c2 $remote_dir" || error "lfs mkdir failed" local diridx=$($GETSTRIPE -M $remote_dir) [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX" @@ -1696,7 +2046,7 @@ test_110b () { local MDTIDX=1 mkdir -p $DIR/$tdir - drop_reint_reply "$LFS mkdir -i $MDTIDX $remote_dir" || + drop_reint_reply "$LFS mkdir -i $MDTIDX -c2 $remote_dir" || error "lfs mkdir failed" diridx=$($GETSTRIPE -M $remote_dir) @@ -1712,7 +2062,7 @@ test_110c () { local MDTIDX=1 mkdir -p $DIR/$tdir - drop_update_reply $((MDTIDX + 1)) "$LFS mkdir -i $MDTIDX $remote_dir" || + drop_update_reply $MDTIDX "$LFS mkdir -i $MDTIDX -c2 $remote_dir" || error "lfs mkdir failed" diridx=$($GETSTRIPE -M $remote_dir) @@ -1728,7 +2078,7 @@ test_110d () { local MDTIDX=1 mkdir -p $DIR/$tdir - $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed" drop_request "rm -rf $remote_dir" || error "rm remote dir failed" @@ -1744,7 +2094,7 @@ test_110e () { local MDTIDX=1 mkdir -p $DIR/$tdir - $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed" drop_reint_reply "rm -rf $remote_dir" || error "rm remote dir failed" rm -rf $DIR/$tdir || error "rmdir failed" @@ -1759,7 +2109,7 @@ test_110f () { local MDTIDX=1 mkdir -p $DIR/$tdir - $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed" drop_update_reply $MDTIDX "rm -rf $remote_dir" || error "rm remote dir failed" @@ -1767,8 +2117,110 @@ test_110f () { } run_test 110f "remove remote directory: drop slave rep" +test_110g () { + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.6.57) ]] || + { skip "Need MDS version at least 2.6.57"; return 0; } + + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local remote_dir=$DIR/$tdir/remote_dir + local MDTIDX=1 + + mkdir -p $remote_dir + + createmany -o $remote_dir/f 100 + + #define OBD_FAIL_MIGRATE_NET_REP 0x1800 + do_facet mds$MDTIDX lctl set_param fail_loc=0x1800 + $LFS migrate -m $MDTIDX $remote_dir || error "migrate failed" + do_facet mds$MDTIDX lctl set_param fail_loc=0x0 + + for file in $(find $remote_dir); do + mdt_index=$($LFS getstripe -M $file) + [ $mdt_index == $MDTIDX ] || + error "$file is not on MDT${MDTIDX}" + done + + rm -rf $DIR/$tdir || error "rmdir failed" +} +run_test 110g "drop reply during migration" + +test_110h () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local src_dir=$DIR/$tdir/source_dir + local tgt_dir=$DIR/$tdir/target_dir + local MDTIDX=1 + + mkdir -p $src_dir + $LFS mkdir -i $MDTIDX $tgt_dir + + dd if=/etc/hosts of=$src_dir/src_file + touch $tgt_dir/tgt_file + drop_update_reply $MDTIDX \ + "mrename $src_dir/src_file $tgt_dir/tgt_file" || + error "mrename failed" + + $CHECKSTAT -t file $src_dir/src_file && + error "src_file present after rename" + + diff /etc/hosts $tgt_dir/tgt_file || + error "file changed after rename" + +} +run_test 110h "drop update reply during cross-MDT file rename" + +test_110i () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local src_dir=$DIR/$tdir/source_dir + local tgt_dir=$DIR/$tdir/target_dir + local MDTIDX=1 + + mkdir -p $src_dir + $LFS mkdir -i $MDTIDX $tgt_dir + + mkdir $src_dir/src_dir + touch $src_dir/src_dir/a + mkdir $tgt_dir/tgt_dir + drop_update_reply $MDTIDX \ + "mrename $src_dir/src_dir $tgt_dir/tgt_dir" || + error "mrename failed" + + $CHECKSTAT -t dir $src_dir/src_dir && + error "src_dir present after rename" + + $CHECKSTAT -t dir $tgt_dir/tgt_dir || + error "tgt_dir not present after rename" + + $CHECKSTAT -t file $tgt_dir/tgt_dir/a || + error "a not present after rename" +} +run_test 110i "drop update reply during cross-MDT dir rename" + +test_110j () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local remote_dir=$DIR/$tdir/remote_dir + local local_dir=$DIR/$tdir/local_dir + local MDTIDX=1 + + mkdir -p $DIR/$tdir + mkdir $DIR/$tdir/local_dir + $LFS mkdir -i $MDTIDX $remote_dir + + touch $local_dir/local_file + drop_update_reply $MDTIDX \ + "ln $local_dir/local_file $remote_dir/remote_file" || + error "ln failed" + + $CHECKSTAT -t file $remote_dir/remote_file || + error "remote not present after ln" +} +run_test 110j "drop update reply during cross-MDT ln" + # LU-2844 mdt prepare fail should not cause umount oops -test_111 () { +test_111 () +{ + [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.62) ]] || + { skip "Need MDS version at least 2.3.62"; return 0; } + local mdsdev=$(mdsdevname ${SINGLEMDS//mds/}) #define OBD_FAIL_MDS_CHANGELOG_INIT 0x151 do_facet $SINGLEMDS lctl set_param fail_loc=0x151 @@ -1779,6 +2231,461 @@ test_111 () { } run_test 111 "mdd setup fail should not cause umount oops" +# LU-793 +test_112a() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + do_facet_random_file client $TMP/$tfile 100K || + error_noexit "Create random file $TMP/$tfile" + + pause_bulk "cp $TMP/$tfile $DIR/$tfile" $TIMEOUT || + error_noexit "Can't pause_bulk copy" + + df $DIR + # expect cmp to succeed, client resent bulk + cmp $TMP/$tfile $DIR/$tfile || + error_noexit "Wrong data has been written" + rm $DIR/$tfile || + error_noexit "Can't remove file" + rm $TMP/$tfile +} +run_test 112a "bulk resend while orignal request is in progress" + +test_115_read() { + local fail1=$1 + local fail2=$2 + + df $DIR + dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 + cancel_lru_locks osc + + # OST_READ = 3, + $LCTL set_param fail_loc=$fail1 fail_val=3 + dd of=/dev/null if=$DIR/$tfile bs=4096 count=1 & + pid=$! + sleep 1 + + set_nodes_failloc "$(osts_nodes)" $fail2 + + wait $pid || error "dd failed" + return 0 +} + +test_115_write() { + local fail1=$1 + local fail2=$2 + local error=$3 + local fail_val2=${4:-0} + + df $DIR + touch $DIR/$tfile + + # OST_WRITE = 4, + $LCTL set_param fail_loc=$fail1 fail_val=4 + dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 oflag=dsync & + pid=$! + sleep 1 + + df $MOUNT + set_nodes_failloc "$(osts_nodes)" $fail2 $fail_val2 + + wait $pid + rc=$? + [ $error -eq 0 ] && [ $rc -ne 0 ] && error "dd error ($rc)" + [ $error -ne 0 ] && [ $rc -eq 0 ] && error "dd success" + return 0 +} + +test_115a() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b + #define OBD_FAIL_PTLRPC_DROP_BULK 0x51a + test_115_read 0x8000051b 0x8000051a +} +run_test 115a "read: late REQ MDunlink and no bulk" + +test_115b() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b + #define OBD_FAIL_OST_ENOSPC 0x215 + + # pass $OSTCOUNT for the fail_loc to be caught + # appropriately by the IO thread + test_115_write 0x8000051b 0x80000215 1 $OSTCOUNT +} +run_test 115b "write: late REQ MDunlink and no bulk" + +test_115c() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f + #define OBD_FAIL_PTLRPC_DROP_BULK 0x51a + test_115_read 0x8000050f 0x8000051a +} +run_test 115c "read: late Reply MDunlink and no bulk" + +test_115d() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f + #define OBD_FAIL_OST_ENOSPC 0x215 + test_115_write 0x8000050f 0x80000215 0 +} +run_test 115d "write: late Reply MDunlink and no bulk" + +test_115e() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510 + #define OBD_FAIL_OST_ALL_REPLY_NET 0x211 + test_115_read 0x80000510 0x80000211 +} +run_test 115e "read: late Bulk MDunlink and no reply" + +test_115f() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b + #define OBD_FAIL_OST_ALL_REPLY_NET 0x211 + test_115_read 0x8000051b 0x80000211 +} +run_test 115f "read: late REQ MDunlink and no reply" + +test_115g() { + [ $(lustre_version_code ost1) -lt $(version_code 2.8.50) ] && + skip "need at least 2.8.50 on OST" && return 0 + + #define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c + test_115_read 0x8000051c 0 +} +run_test 115g "read: late REQ MDunlink and Reply MDunlink" + +# parameters: fail_loc CMD RC +test_120_reply() { + local PID + local PID2 + local rc=5 + local fail + + #define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320 + #define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321 + #define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322 + #define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323 + + echo + echo -n "** FLOCK REPLY vs. EVICTION race, lock $2" + [ "$1" = "CLEANUP" ] && + fail=0x80000320 && echo ", $1 cp first" + [ "$1" = "REPLY" ] && + fail=0x80000321 && echo ", $1 cp first" + [ "$1" = "DEADLOCK CLEANUP" ] && + fail=0x80000322 && echo " DEADLOCK, CLEANUP cp first" + [ "$1" = "DEADLOCK REPLY" ] && + fail=0x80000323 && echo " DEADLOCK, REPLY cp first" + + if [ x"$2" = x"get" ]; then + #for TEST lock, take a conflict in advance + # sleep longer than evictor to not confuse fail_loc: 2+2+4 + echo "** Taking conflict **" + flocks_test 5 set read sleep 10 $DIR/$tfile & + PID2=$! + + sleep 2 + fi + + $LCTL set_param fail_loc=$fail + + flocks_test 5 $2 write $DIR/$tfile & + PID=$! + + sleep 2 + echo "** Evicting and re-connecting client **" + mds_evict_client + + client_reconnect + + if [ x"$2" = x"get" ]; then + wait $PID2 + fi + + wait $PID + rc=$? + + # check if the return value is allowed + [ $rc -eq $3 ] && rc=0 + + $LCTL set_param fail_loc=0 + return $rc +} + +# a lock is taken, unlock vs. cleanup_resource() race for destroying +# the ORIGINAL lock. +test_120_destroy() +{ + local PID + + flocks_test 5 set write sleep 4 $DIR/$tfile & + PID=$! + sleep 2 + + # let unlock to sleep in CP CB + $LCTL set_param fail_loc=$1 + sleep 4 + + # let cleanup to cleep in CP CB + mds_evict_client + + client_reconnect + + wait $PID + rc=$? + + $LCTL set_param fail_loc=0 + return $rc +} + +test_120() { + flock_is_enabled || { skip "mount w/o flock enabled" && return; } + touch $DIR/$tfile + + test_120_reply "CLEANUP" set 5 || error "SET race failed" + test_120_reply "CLEANUP" get 5 || error "GET race failed" + test_120_reply "CLEANUP" unlock 5 || error "UNLOCK race failed" + + test_120_reply "REPLY" set 5 || error "SET race failed" + test_120_reply "REPLY" get 5 || error "GET race failed" + test_120_reply "REPLY" unlock 5 || error "UNLOCK race failed" + + # DEADLOCK tests + test_120_reply "DEADLOCK CLEANUP" set 5 || error "DEADLOCK race failed" + test_120_reply "DEADLOCK REPLY" set 35 || error "DEADLOCK race failed" + + test_120_destroy 0x320 || error "unlock-cleanup race failed" +} +run_test 120 "flock race: completion vs. evict" + +test_113() { + local BEFORE=$(date +%s) + local EVICT + + # modify dir so that next revalidate would not obtain UPDATE lock + touch $DIR + + # drop 1 reply with UPDATE lock, + # resend should not create 2nd lock on server + mcreate $DIR/$tfile || error "mcreate failed: $?" + drop_ldlm_reply_once "stat $DIR/$tfile" || error "stat failed: $?" + + # 2 BL AST will be sent to client, both must find the same lock, + # race them to not get EINVAL for 2nd BL AST + #define OBD_FAIL_LDLM_PAUSE_CANCEL2 0x31f + $LCTL set_param fail_loc=0x8000031f + + $LCTL set_param ldlm.namespaces.*.early_lock_cancel=0 > /dev/null + chmod 0777 $DIR/$tfile || error "chmod failed: $?" + $LCTL set_param ldlm.namespaces.*.early_lock_cancel=1 > /dev/null + + # let the client reconnect + client_reconnect + EVICT=$($LCTL get_param mdc.$FSNAME-MDT*.state | + awk -F"[ [,]" '/EVICTED ]$/ { if (mx<$5) {mx=$5;} } END { print mx }') + + [ -z "$EVICT" ] || [[ $EVICT -le $BEFORE ]] || error "eviction happened" +} +run_test 113 "ldlm enqueue dropped reply should not cause deadlocks" + +T130_PID=0 +test_130_base() { + test_mkdir -p $DIR/$tdir + + # Prevent interference from layout intent RPCs due to + # asynchronous writeback. These will be tested in 130c below. + do_nodes ${CLIENTS:-$HOSTNAME} sync + + # get only LOOKUP lock on $tdir + cancel_lru_locks mdc + ls $DIR/$tdir/$tfile 2>/dev/null + + # get getattr by fid on $tdir + # + # we need to race with unlink, unlink must complete before we will + # take a DLM lock, otherwise unlink will wait until getattr will + # complete; but later than getattr starts so that getattr found + # the object +#define OBD_FAIL_MDS_INTENT_DELAY 0x160 + set_nodes_failloc "$(mdts_nodes)" 0x80000160 + stat $DIR/$tdir & + T130_PID=$! + sleep 2 + + rm -rf $DIR/$tdir + + # drop the reply so that resend happens on an unlinked file. +#define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157 + set_nodes_failloc "$(mdts_nodes)" 0x80000157 +} + +test_130a() { + remote_mds_nodsh && skip "remote MDS with nodsh" && return + local server_version=$(lustre_version_code $SINGLEMDS) + [[ $server_version -ge $(version_code 2.7.2) ]] || + { skip "Need server version newer than 2.7.1"; return 0; } + + test_130_base + + wait $T130_PID || [ $? -eq 0 ] && error "stat should fail" + return 0 +} +run_test 130a "enqueue resend on not existing file" + +test_130b() { + remote_mds_nodsh && skip "remote MDS with nodsh" && return + local server_version=$(lustre_version_code $SINGLEMDS) + [[ $server_version -ge $(version_code 2.7.2) ]] || + { skip "Need server version newer than 2.7.1"; return 0; } + + test_130_base + # let the reply to be dropped + sleep 10 + +#define OBD_FAIL_SRV_ENOENT 0x217 + set_nodes_failloc "$(mdts_nodes)" 0x80000217 + + wait $T130_PID || [ $? -eq 0 ] && error "stat should fail" + return 0 +} +run_test 130b "enqueue resend on a stale inode" + +test_130c() { + remote_mds_nodsh && skip "remote MDS with nodsh" && return + + do_nodes ${CLIENTS:-$HOSTNAME} sync + echo XXX > $DIR/$tfile + + cancel_lru_locks mdc + + # Trigger writeback on $tfile. + # + # we need to race with unlink, unlink must complete before we will + # take a DLM lock, otherwise unlink will wait until intent will + # complete; but later than intent starts so that intent found + # the object +#define OBD_FAIL_MDS_INTENT_DELAY 0x160 + set_nodes_failloc "$(mdts_nodes)" 0x80000160 + sync & + T130_PID=$! + sleep 2 + + rm $DIR/$tfile + + # drop the reply so that resend happens on an unlinked file. +#define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157 + set_nodes_failloc "$(mdts_nodes)" 0x80000157 + + # let the reply to be dropped + sleep 10 + +#define OBD_FAIL_SRV_ENOENT 0x217 + set_nodes_failloc "$(mdts_nodes)" 0x80000217 + + wait $T130_PID + + return 0 +} +run_test 130c "layout intent resend on a stale inode" + +test_132() { + local before=$(date +%s) + local evict + + mount_client $MOUNT2 || error "mount filed" + + rm -f $DIR/$tfile + # get a lock on client so that export would reach the stale list + $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed" + dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 conv=fsync || + error "dd failed" + + #define OBD_FAIL_OST_PAUSE_PUNCH 0x236 + do_facet ost1 $LCTL set_param fail_val=120 fail_loc=0x80000236 + + $TRUNCATE $DIR/$tfile 100 & + + sleep 1 + dd if=/dev/zero of=$DIR2/$tfile bs=4096 count=1 conv=notrunc || + error "dd failed" + + wait + umount_client $MOUNT2 + + evict=$(do_facet client $LCTL get_param \ + osc.$FSNAME-OST0000-osc-*/state | + awk -F"[ [,]" '/EVICTED ]$/ { if (t<$5) {t=$5;} } END { print t }') + + [ -z "$evict" ] || [[ $evict -le $before ]] || + (do_facet client $LCTL get_param \ + osc.$FSNAME-OST0000-osc-*/state; + error "eviction happened: $evict before:$before") +} +run_test 132 "long punch" + +test_131() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + rm -f $DIR/$tfile + # get a lock on client so that export would reach the stale list + $SETSTRIPE -i 0 $DIR/$tfile || error "setstripe failed" + dd if=/dev/zero of=$DIR/$tfile count=1 || error "dd failed" + + # another IO under the same lock + #define OBD_FAIL_OSC_DELAY_IO 0x414 + $LCTL set_param fail_loc=0x80000414 + $LCTL set_param fail_val=4 fail_loc=0x80000414 + dd if=/dev/zero of=$DIR/$tfile count=1 conv=notrunc oflag=dsync & + local pid=$! + sleep 1 + + #define OBD_FAIL_LDLM_BL_EVICT 0x31e + set_nodes_failloc "$(osts_nodes)" 0x8000031e + ost_evict_client + client_reconnect + + wait $pid && error "dd succeeded" + return 0 +} +run_test 131 "IO vs evict results to IO under staled lock" + +test_133() { + local list=$(comma_list $(mdts_nodes)) + + local t=$((TIMEOUT * 2)) + touch $DIR/$tfile + + flock $DIR/$tfile -c "echo bl lock;sleep $t;echo bl flock unlocked" & + sleep 1 + multiop_bg_pause $DIR/$tfile O_jc || return 1 + PID=$! + + #define OBD_FAIL_LDLM_REPLY 0x30c + do_nodes $list $LCTL set_param fail_loc=0x8000030c + kill -USR1 $PID + echo "waiting for multiop $PID" + wait $PID || return 2 + + rm -f $DIR/$tfile + + return 0 +} +run_test 133 "don't fail on flock resend" + complete $SECONDS check_and_cleanup_lustre exit_status