+ mkdir -p $DIR/$tdir
+ # put a load of file creates/writes/deletes
+ writemany -q $DIR/$tdir/$tfile 0 5 &
+ CLIENT_PID=$!
+ sleep 1
+ FAILURE_MODE="SOFT"
+ facet_failover $SINGLEMDS
+ # failover at various points during recovery
+ SEQ="1 5 10 $(seq $TIMEOUT 5 $(($TIMEOUT+10)))"
+ echo will failover at $SEQ
+ for i in $SEQ
+ do
+ echo failover in $i sec
+ sleep $i
+ facet_failover $SINGLEMDS
+ done
+ # client process should see no problems even though MDS went down
+ # and recovery was interrupted
+ sleep $TIMEOUT
+ kill -USR1 $CLIENT_PID
+ wait $CLIENT_PID
+ rc=$?
+ echo writemany returned $rc
+ [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+}
+run_test 51 "failover MDS during recovery"
+
+test_52_guts() {
+ do_facet client "mkdir -p $DIR/$tdir"
+ do_facet client "writemany -q -a $DIR/$tdir/$tfile 300 5" &
+ CLIENT_PID=$!
+ echo writemany pid $CLIENT_PID
+ sleep 10
+ FAILURE_MODE="SOFT"
+ fail ost1
+ rc=0
+ wait $CLIENT_PID || rc=$?
+ # active client process should see an EIO for down OST
+ [ $rc -eq 5 ] && { echo "writemany correctly failed $rc" && return 0; }
+ # but timing or failover setup may allow success
+ [ $rc -eq 0 ] && { echo "writemany succeeded" && return 0; }
+ echo "writemany returned $rc"
+ return $rc
+}
+
+test_52() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+ test_52_guts
+ rc=$?
+ [ $rc -ne 0 ] && { return $rc; }
+ # wait for client to reconnect to OST
+ sleep 30
+ test_52_guts
+ rc=$?
+ [ $rc -ne 0 ] && { return $rc; }
+ sleep 30
+ test_52_guts
+ rc=$?
+ client_reconnect
+ #return $rc
+}
+run_test 52 "failover OST under load"
+
+# test of open reconstruct
+test_53() {
+ touch $DIR/$tfile
+ drop_ldlm_reply "openfile -f O_RDWR:O_CREAT -m 0755 $DIR/$tfile" ||\
+ return 2
+}
+run_test 53 "touch: drop rep"
+
+test_54() {
+ zconf_mount `hostname` $MOUNT2
+ touch $DIR/$tfile
+ touch $DIR2/$tfile.1
+ sleep 10
+ cat $DIR2/$tfile.missing # save transno = 0, rc != 0 into last_rcvd
+ fail $SINGLEMDS
+ umount $MOUNT2
+ ERROR=`dmesg | egrep "(test 54|went back in time)" | tail -n1 | grep "went back in time"`
+ [ x"$ERROR" == x ] || error "back in time occured"
+}
+run_test 54 "back in time"
+
+# bug 11330 - liblustre application death during I/O locks up OST
+test_55() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+
+ # first dd should be finished quickly
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-1
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
+ DDPID=$!
+ count=0
+ echo "step1: testing ......"
+ while [ true ]; do
+ if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
+ count=$[count+1]
+ if [ $count -gt 64 ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-2
+ #define OBD_FAIL_OST_DROP_REQ 0x21d
+ do_facet ost1 lctl set_param fail_loc=0x0000021d
+ # second dd will be never finished
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
+ DDPID=$!
+ count=0
+ echo "step2: testing ......"
+ while [ $count -le 64 ]; do
+ dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
+ if [ -z $dd_name ]; then
+ ls -l $DIR/$tdir
+ echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
+ error "dd shouldn't be finished!"
+ fi
+ count=$[count+1]
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ #Recover fail_loc and dd will finish soon
+ do_facet ost1 lctl set_param fail_loc=0
+ count=0
+ echo "step3: testing ......"
+ while [ true ]; do
+ if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
+ count=$[count+1]
+ if [ $count -gt 500 ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
+ echo "(dd_pid=$DDPID, time=$count)successful"
+
+ rm -rf $DIR/$tdir
+}
+run_test 55 "ost_brw_read/write drops timed-out read/write request"
+
+test_56() { # b=11277
+#define OBD_FAIL_MDS_RESEND 0x136
+ touch $DIR/$tfile
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000136"
+ stat $DIR/$tfile
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ rm -f $DIR/$tfile
+}
+run_test 56 "do not allow reconnect to busy exports"
+
+test_57_helper() {
+ # no oscs means no client or mdt
+ while lctl get_param osc.*.* > /dev/null 2>&1; do
+ : # loop until proc file is removed
+ done
+}
+
+test_57() { # bug 10866
+ test_57_helper &
+ pid=$!
+ sleep 1
+#define OBD_FAIL_LPROC_REMOVE 0xB00
+ lctl set_param fail_loc=0x80000B00
+ zconf_umount `hostname` $DIR
+ lctl set_param fail_loc=0x80000B00
+ fail_abort $SINGLEMDS
+ kill -9 $pid
+ lctl set_param fail_loc=0
+ mount_client $DIR
+ do_facet client "df $DIR"
+}
+run_test 57 "read procfs entries causes kernel crash"
+
+test_58() { # bug 11546
+#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
+ touch $DIR/$tfile
+ ls -la $DIR/$tfile
+ lctl set_param fail_loc=0x80000801
+ cp $DIR/$tfile /dev/null &
+ pid=$!
+ sleep 1
+ lctl set_param fail_loc=0
+ drop_bl_callback rm -f $DIR/$tfile
+ wait $pid
+ # the first 'df' could tigger the eviction caused by
+ # 'drop_bl_callback', and it's normal case.
+ # but the next 'df' should return successfully.
+ do_facet client "df $DIR" || do_facet client "df $DIR"
+}
+run_test 58 "Eviction in the middle of open RPC reply processing"
+
+test_59() { # bug 10589
+ zconf_mount `hostname` $MOUNT2 || error "Failed to mount $MOUNT2"
+ echo $DIR2 | grep -q $MOUNT2 || error "DIR2 is not set properly: $DIR2"
+#define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
+ lctl set_param fail_loc=0x311
+ writes=$(LANG=C dd if=/dev/zero of=$DIR2/$tfile count=1 2>&1)
+ [ $? = 0 ] || error "dd write failed"
+ writes=$(echo $writes | awk -F '+' '/out/ {print $1}')
+ lctl set_param fail_loc=0
+ sync
+ zconf_umount `hostname` $MOUNT2 -f
+ reads=$(LANG=C dd if=$DIR/$tfile of=/dev/null 2>&1)
+ [ $? = 0 ] || error "dd read failed"
+ reads=$(echo $reads | awk -F '+' '/in/ {print $1}')
+ [ "$reads" -eq "$writes" ] || error "read" $reads "blocks, must be" $writes
+}
+run_test 59 "Read cancel race on client eviction"
+
+err17935 () {
+ # we assume that all md changes are in the MDT0 changelog
+ if [ $MDSCOUNT -gt 1 ]; then
+ error_ignore 17935 $*
+ else
+ error $*
+ fi
+}
+
+test_60() {
+ MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid | \
+ awk '{gsub(/_UUID/,""); print $1}' | head -1)
+
+ NUM_FILES=15000
+ mkdir -p $DIR/$tdir
+
+ # Register (and start) changelog
+ USER=$(do_facet $SINGLEMDS lctl --device $MDT0 changelog_register -n)
+ echo "Registered as $MDT0 changelog user $USER"
+
+ # Generate a large number of changelog entries
+ createmany -o $DIR/$tdir/$tfile $NUM_FILES
+ sync
+ sleep 5
+
+ # Unlink files in the background
+ unlinkmany $DIR/$tdir/$tfile $NUM_FILES &
+ CLIENT_PID=$!
+ sleep 1
+
+ # Failover the MDS while unlinks are happening
+ facet_failover $SINGLEMDS
+
+ # Wait for unlinkmany to finish
+ wait $CLIENT_PID
+
+ # Check if all the create/unlink events were recorded
+ # in the changelog
+ $LFS changelog $MDT0 >> $DIR/$tdir/changelog
+ local cl_count=$(grep UNLNK $DIR/$tdir/changelog | wc -l)
+ echo "$cl_count unlinks in $MDT0 changelog"
+
+ do_facet $SINGLEMDS lctl --device $MDT0 changelog_deregister $USER
+ USERS=$(( $(do_facet $SINGLEMDS lctl get_param -n \
+ mdd.$MDT0.changelog_users | wc -l) - 2 ))
+ if [ $USERS -eq 0 ]; then
+ [ $cl_count -eq $NUM_FILES ] || \
+ err17935 "Recorded ${cl_count} unlinks out of $NUM_FILES"
+ # Also make sure we can clear large changelogs
+ cl_count=$($LFS changelog $FSNAME | wc -l)
+ [ $cl_count -le 2 ] || \
+ error "Changelog not empty: $cl_count entries"
+ else
+ # If there are other users, there may be other unlinks in the log
+ [ $cl_count -ge $NUM_FILES ] || \
+ err17935 "Recorded ${cl_count} unlinks out of $NUM_FILES"
+ echo "$USERS other changelog users; can't verify clear"
+ fi
+}
+run_test 60 "Add Changelog entries during MDS failover"
+
+test_61()
+{
+ local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $FSNAME-OST0000)
+ mdtosc=${mdtosc/-MDT*/-MDT\*}
+ local cflags="osc.$mdtosc.connect_flags"
+ do_facet $SINGLEMDS "lctl get_param -n $cflags" |grep -q skip_orphan
+ [ $? -ne 0 ] && skip "don't have skip orphan feature" && return
+
+ mkdir -p $DIR/$tdir || error "mkdir dir $DIR/$tdir failed"
+ # Set the default stripe of $DIR/$tdir to put the files to ost1
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
+
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR/$tdir/$tfile-%d 10
+ local oid=`do_facet ost1 "lctl get_param -n obdfilter.${ost1_svc}.last_id"`
+
+ fail_abort $SINGLEMDS
+
+ touch $DIR/$tdir/$tfile
+ local id=`$LFS getstripe $DIR/$tdir/$tfile | awk '$1 == 0 { print $2 }'`
+ [ $id -le $oid ] && error "the orphan objid was reused, failed"
+
+ # Cleanup
+ rm -rf $DIR/$tdir
+}
+run_test 61 "Verify to not reuse orphan objects - bug 17025"
+
+check_cli_ir_state()
+{
+ local NODE=${1:-$HOSTNAME}
+ local st
+ st=$(do_node $NODE "lctl get_param mgc.*.ir_state |
+ awk '/imperative_recovery:/ { print \\\$2}'")
+ [ $st != ON -o $st != OFF -o $st != ENABLED -o $st != DISABLED ] ||
+ error "Error state $st, must be ENABLED or DISABLED"
+ echo -n $st
+}
+
+check_target_ir_state()
+{
+ local target=${1}
+ local name=${target}_svc
+ local recovery_proc=obdfilter.${!name}.recovery_status
+ local st
+
+ st=$(do_facet $target "lctl get_param -n $recovery_proc |
+ awk '/IR:/{ print \\\$2}'")
+ [ $st != ON -o $st != OFF -o $st != ENABLED -o $st != DISABLED ] ||
+ error "Error state $st, must be ENABLED or DISABLED"
+ echo -n $st
+}
+
+set_ir_status()
+{
+ do_facet mgs lctl set_param -n mgs.MGS.live.$FSNAME="state=$1"
+}
+
+get_ir_status()
+{
+ local state=$(do_facet mgs "lctl get_param -n mgs.MGS.live.$FSNAME |
+ awk '/state:/{ print \\\$2 }'")
+ echo -n ${state/,/}
+}
+
+nidtbl_version_mgs()
+{
+ local ver=$(do_facet mgs "lctl get_param -n mgs.MGS.live.$FSNAME |
+ awk '/nidtbl_version:/{ print \\\$2 }'")
+ echo -n $ver
+}
+
+# nidtbl_version_client <mds1|client> [node]
+nidtbl_version_client()
+{
+ local cli=$1
+ local node=${2:-$HOSTNAME}
+
+ if [ X$cli = Xclient ]; then
+ cli=$FSNAME-client
+ else
+ local obdtype=${cli/%[0-9]*/}
+ [ $obdtype != mds ] && error "wrong parameters $cli"
+
+ node=$(facet_active_host $cli)
+ local t=${cli}_svc
+ cli=${!t}
+ fi
+
+ local vers=$(do_node $node "lctl get_param -n mgc.*.ir_state" |
+ awk "/$cli/{print \$6}" |sort -u)
+
+ # in case there are multiple mounts on the client node
+ local arr=($vers)
+ [ ${#arr[@]} -ne 1 ] && error "versions on client node mismatch"
+ echo -n $vers
+}
+
+nidtbl_versions_match()
+{
+ [ $(nidtbl_version_mgs) -eq $(nidtbl_version_client ${1:-client}) ]
+}
+
+target_instance_match()
+{
+ local srv=$1
+ local obdtype
+ local cliname
+
+ obdtype=${srv/%[0-9]*/}
+ case $obdtype in
+ mds)
+ obdname="mdt"
+ cliname="mdc"
+ ;;
+ ost)
+ obdname="obdfilter"
+ cliname="osc"
+ ;;
+ *)
+ error "invalid target type" $srv
+ return 1
+ ;;
+ esac
+
+ local target=${srv}_svc
+ local si=$(do_facet $srv lctl get_param -n $obdname.${!target}.instance)
+ local ci=$(lctl get_param -n $cliname.${!target}-${cliname}-*.import | \
+ awk '/instance/{ print $2 }' |head -1)
+
+ return $([ $si -eq $ci ])
+}
+
+test_100()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ # MDT was just restarted in the previous test, make sure everything
+ # is all set.
+ local cnt=30
+ while [ $cnt -gt 0 ]; do
+ nidtbl_versions_match && break
+ sleep 1
+ cnt=$((cnt - 1))
+ done
+
+ # disable IR
+ set_ir_status disabled
+
+ local prev_ver=$(nidtbl_version_client client)
+
+ local saved_FAILURE_MODE=$FAILURE_MODE
+ [ $(facet_host mgs) = $(facet_host ost1) ] && FAILURE_MODE="SOFT"
+ fail ost1
+
+ # valid check
+ [ $(nidtbl_version_client client) -eq $prev_ver ] ||
+ error "version must not change due to IR disabled"
+ target_instance_match ost1 || error "instance mismatch"
+
+ # restore env
+ set_ir_status full
+ FAILURE_MODE=$saved_FAILURE_MODE
+}
+run_test 100 "IR: Make sure normal recovery still works w/o IR"
+
+test_101()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ set_ir_status full
+
+ local OST1_IMP=$(get_osc_import_name client ost1)
+
+ # disable pinger recovery
+ lctl set_param -n osc.$OST1_IMP.pinger_recov=0
+
+ fail ost1
+
+ target_instance_match ost1 || error "instance mismatch"
+ nidtbl_versions_match || error "version must match"
+
+ lctl set_param -n osc.$OST1_IMP.pinger_recov=1
+}
+run_test 101 "IR: Make sure IR works w/o normal recovery"
+
+test_102()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ local clients=${CLIENTS:-$HOSTNAME}
+ local old_version
+ local new_version
+ local mgsdev=mgs
+
+ set_ir_status full
+
+ # let's have a new nidtbl version
+ fail ost1
+
+ # sleep for a while so that clients can see the failure of ost
+ # it must be MGC_TIMEOUT_MIN_SECONDS + MGC_TIMEOUT_RAND_CENTISEC.
+ # int mgc_request.c:
+ # define MGC_TIMEOUT_MIN_SECONDS 5
+ # define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 *
+ local count=30 # 20 seconds at most
+ while [ $count -gt 0 ]; do
+ nidtbl_versions_match && break
+ sleep 1
+ count=$((count-1))
+ done
+
+ nidtbl_versions_match || error "nidtbl mismatch"
+
+ # get the version #
+ old_version=$(nidtbl_version_client client)
+
+ zconf_umount_clients $clients $MOUNT || error "Cannot umount client"
+
+ # restart mgs
+ combined_mgs_mds && mgsdev=mds1
+ remount_facet $mgsdev
+ fail ost1
+
+ zconf_mount_clients $clients $MOUNT || error "Cannot mount client"
+
+ # check new version
+ new_version=$(nidtbl_version_client client)
+ [ $new_version -lt $old_version ] &&
+ error "nidtbl version wrong after mgs restarts"
+ return 0
+}
+run_test 102 "IR: New client gets updated nidtbl after MGS restart"
+
+test_103()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ combined_mgs_mds && skip "mgs and mds on the same target" && return 0
+
+ # workaround solution to generate config log on the mds
+ remount_facet mds1
+
+ stop mgs
+ stop mds1
+
+ # We need this test because mds is like a client in IR context.
+ start mds1 $MDSDEV1 || error "MDS should start w/o mgs"
+
+ # start mgs and remount mds w/ ir
+ start mgs $MGSDEV
+ clients_up
+
+ # remount client so that fsdb will be created on the MGS
+ umount_client $MOUNT || error "umount failed"
+ mount_client $MOUNT || error "mount failed"
+
+ # sleep 30 seconds so the MDS has a chance to detect MGS restarting
+ local count=30
+ while [ $count -gt 0 ]; do
+ [ $(nidtbl_version_client mds1) -ne 0 ] && break
+ sleep 1
+ count=$((count-1))
+ done
+
+ # after a while, mds should be able to reconnect to mgs and fetch
+ # up-to-date nidtbl version
+ nidtbl_versions_match mds1 || error "mds nidtbl mismatch"
+
+ # reset everything
+ set_ir_status full
+}
+run_test 103 "IR: MDS can start w/o MGS and get updated nidtbl later"
+
+test_104()
+{
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ set_ir_status full
+
+ stop ost1
+ start ost1 $(ostdevname 1) "$OST_MOUNT_OPTS -onoir" ||
+ error "OST1 cannot start"
+ clients_up
+
+ local ir_state=$(check_target_ir_state ost1)
+ [ $ir_state = "DISABLED" -o $ir_state = "OFF" ] ||
+ error "ir status on ost1 should be DISABLED"
+}
+run_test 104 "IR: ost can disable IR voluntarily"
+
+test_105()
+{
+ [ -z "$RCLIENTS" ] && skip "Needs multiple clients" && return 0
+ do_facet mgs $LCTL list_param mgs.*.ir_timeout ||
+ { skip "MGS without IR support"; return 0; }
+
+ set_ir_status full
+
+ # get one of the clients from client list
+ local rcli=$(echo $RCLIENTS |cut -d' ' -f 1)
+
+ local old_MOUNTOPT=$MOUNTOPT
+ MOUNTOPT=${MOUNTOPT},noir
+ zconf_umount $rcli $MOUNT || error "umount failed"
+ zconf_mount $rcli $MOUNT || error "mount failed"
+
+ # make sure lustre mount at $rcli disabling IR
+ local ir_state=$(check_cli_ir_state $rcli)
+ [ $ir_state = "DISABLED" -o $ir_state = "OFF" ] ||
+ error "IR state must be DISABLED at $rcli"
+
+ # Since the client just mounted, its last_rcvd entry is not on disk.
+ # Send an RPC so exp_need_sync forces last_rcvd to commit this export
+ # so the client can reconnect during OST recovery (LU-924, LU-1582)
+ $SETSTRIPE -i 0 $DIR/$tfile
+ dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 conv=sync
+
+ # make sure MGS's state is Partial
+ [ $(get_ir_status) = "partial" ] || error "MGS IR state must be partial"
+
+ fail ost1
+ # make sure IR on ost1 is DISABLED
+ local ir_state=$(check_target_ir_state ost1)
+ [ $ir_state = "DISABLED" -o $ir_state = "OFF" ] ||
+ error "IR status on ost1 should be DISABLED"
+
+ # restore it
+ MOUNTOPT=$old_MOUNTOPT
+ zconf_umount $rcli $MOUNT || error "umount failed"
+ zconf_mount $rcli $MOUNT || error "mount failed"
+
+ # make sure MGS's state is full
+ [ $(get_ir_status) = "full" ] || error "MGS IR status must be full"
+
+ fail ost1
+ # make sure IR on ost1 is ENABLED
+ local ir_state=$(check_target_ir_state ost1)
+ [ $ir_state = "ENABLED" -o $ir_state = "ON" ] ||
+ error "IR status on ost1 should be ENABLED"
+
+ return 0
+}
+run_test 105 "IR: NON IR clients support"
+
+cleanup_106() {
+ trap 0
+ umount_client $DIR2
+}
+
+test_106() { # LU-1789
+#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
+ $LCTL set_param fail_loc=0x805
+
+ trap cleanup_106 EXIT
+
+ # enable lightweight flag on mdc connection
+ mount_client $DIR2
+
+ local MDS_NEXP=$(do_facet $SINGLEMDS \
+ lctl get_param -n mdt.${mds1_svc}.num_exports |
+ cut -d' ' -f2)
+ $LCTL set_param fail_loc=0
+
+ touch $DIR2/$tfile || error "failed to create empty file"
+ replay_barrier $SINGLEMDS
+ facet_failover $SINGLEMDS
+
+ # lightweight connection must be evicted
+ touch -c $DIR2/$tfile || true
+ evicted=`dmesg | awk '/test 106/ {start = 1;}
+ /This client was evicted by .*MDT0000/ {
+ if (start) {
+ print;
+ }
+ }'`
+ [ -z "$evicted" ] && error "lightweight client not evicted by mds"
+
+ # and all operations performed by lightweight client should be
+ # synchronous, so the file created before mds restart should be there
+ $CHECKSTAT -t file $DIR/$tfile || error "file not present"
+ rm -f $DIR/$tfile
+
+ cleanup_106
+}
+run_test 106 "lightweight connection support"