Whamcloud - gitweb
LU-13169 tests: add ONLY_REPEAT parameter to repeat subtests
[fs/lustre-release.git] / lustre / tests / test-framework.sh
index 1604790..32dbc87 100755 (executable)
@@ -22,6 +22,7 @@ export SK_UNIQUE_NM=${SK_UNIQUE_NM:-false}
 export SK_S2S=${SK_S2S:-false}
 export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
 export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
+export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
 export IDENTITY_UPCALL=default
 export QUOTA_AUTO=1
 export FLAKEY=${FLAKEY:-true}
@@ -94,13 +95,14 @@ usage() {
 }
 
 print_summary () {
-    trap 0
-       [ -z "$DEFAULT_SUITES"] && return 0
-    [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
-    local details
-    local form="%-13s %-17s %-9s %s %s\n"
-    printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
-    echo "------------------------------------------------------------------------------------"
+       trap 0
+       [ -z "$DEFAULT_SUITES" ] && return 0
+       [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
+       local details
+       local form="%-13s %-17s %-9s %s %s\n"
+
+       printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
+       echo "---------------------------------------------------------------"
     for O in $DEFAULT_SUITES; do
         O=$(echo $O  | tr "-" "_" | tr "[:lower:]" "[:upper:]")
         [ "${!O}" = "no" ] && continue || true
@@ -147,6 +149,27 @@ print_summary () {
     fi
 }
 
+# Get information about the Lustre environment. The information collected
+# will be used in Lustre tests.
+# usage: get_lustre_env
+# input: No required or optional arguments
+# output: No return values, environment variables are exported
+
+get_lustre_env() {
+
+       export mds1_FSTYPE=${mds1_FSTYPE:-$(facet_fstype mds1)}
+       export ost1_FSTYPE=${ost1_FSTYPE:-$(facet_fstype ost1)}
+
+       export MGS_VERSION=$(lustre_version_code mgs)
+       export MDS1_VERSION=$(lustre_version_code mds1)
+       export OST1_VERSION=$(lustre_version_code ost1)
+       export CLIENT_VERSION=$(lustre_version_code client)
+
+       # Prefer using "mds1" directly instead of SINGLEMDS.
+       # Keep this for compat until it is removed from scripts.
+       export SINGLEMDS=${SINGLEMDS:-mds1}
+}
+
 init_test_env() {
        export LUSTRE=$(absolute_path $LUSTRE)
        export TESTSUITE=$(basename $0 .sh)
@@ -156,6 +179,7 @@ init_test_env() {
        export DO_CLEANUP=${DO_CLEANUP:-true}
        export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
        export CLEANUP_DM_DEV=false
+       export PAGE_SIZE=$(get_page_size client)
 
        export MKE2FS=$MKE2FS
        if [ -z "$MKE2FS" ]; then
@@ -271,8 +295,8 @@ init_test_env() {
        [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
        export LFS=${LFS:-"$LUSTRE/utils/lfs"}
        [ ! -f "$LFS" ] && export LFS=$(which lfs)
-       SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"}
-       GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"}
+
+       export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
 
        export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
        if [ ! -f "$L_GETIDENTITY" ]; then
@@ -397,6 +421,14 @@ init_test_env() {
        fi
 
        export TF_FAIL=${TF_FAIL:-$TMP/tf.fail}
+
+       # Constants used in more than one test script
+       export LOV_MAX_STRIPE_COUNT=2000
+
+       export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
+       . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+       get_lustre_env
+
 }
 
 check_cpt_number() {
@@ -606,6 +638,24 @@ load_modules_local() {
                return 0
        fi
 
+       # Create special udev test rules on every node
+       if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
+               sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
+       } else {
+               echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
+       } fi
+       udevadm control --reload-rules
+       udevadm trigger
+
+       # For kmemleak-enabled kernels we need clear all past state
+       # that obviously has nothing to do with this Lustre run
+       # Disable automatic memory scanning to avoid perf hit.
+       if [ -f /sys/kernel/debug/kmemleak ] ; then
+               echo scan=off > /sys/kernel/debug/kmemleak
+               echo scan > /sys/kernel/debug/kmemleak
+               echo clear > /sys/kernel/debug/kmemleak
+       fi
+
        echo Loading modules from $LUSTRE
 
        local ncpus
@@ -751,12 +801,20 @@ unload_modules() {
 
        $LUSTRE_RMMOD ldiskfs || return 2
 
+       [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
+               rm /etc/udev/rules.d/99-lustre-test.rules
+       udevadm control --reload-rules
+       udevadm trigger
+
        if $LOAD_MODULES_REMOTE; then
                local list=$(comma_list $(remote_nodes_list))
                if [ -n "$list" ]; then
                        echo "unloading modules on: '$list'"
                        do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
                        do_rpc_nodes "$list" check_mem_leak
+                       do_rpc_nodes "$list" "rm -f /etc/udev/rules.d/99-lustre-test.rules"
+                       do_rpc_nodes "$list" "udevadm control --reload-rules"
+                       do_rpc_nodes "$list" "udevadm trigger"
                fi
        fi
 
@@ -924,6 +982,16 @@ init_gss() {
                return
        fi
 
+       case $LUSTRE in
+       /usr/lib/lustre/* | /usr/lib64/lustre/* | /usr/lib/lustre | \
+       /usr/lib64/lustre )
+               from_build_tree=false
+               ;;
+       *)
+               from_build_tree=true
+               ;;
+       esac
+
        if ! module_loaded ptlrpc_gss; then
                load_module ptlrpc/gss/ptlrpc_gss
                module_loaded ptlrpc_gss ||
@@ -940,26 +1008,41 @@ init_gss() {
 
                # security ctx config for keyring
                SK_NO_KEY=false
-               mkdir -p $SK_OM_PATH
-               mount -o bind $SK_OM_PATH /etc/request-key.d/
-               local lgssc_conf_line='create lgssc * * '
-               lgssc_conf_line+=$(which lgss_keyring)
-               lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
-
                local lgssc_conf_file="/etc/request-key.d/lgssc.conf"
-               echo "$lgssc_conf_line" > $lgssc_conf_file
+
+               if $from_build_tree; then
+                       mkdir -p $SK_OM_PATH
+                       if grep -q request-key /proc/mounts > /dev/null; then
+                               echo "SSK: Request key already mounted."
+                       else
+                               mount -o bind $SK_OM_PATH /etc/request-key.d/
+                       fi
+                       local lgssc_conf_line='create lgssc * * '
+                       lgssc_conf_line+=$(which lgss_keyring)
+                       lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
+                       echo "$lgssc_conf_line" > $lgssc_conf_file
+               fi
+
                [ -e $lgssc_conf_file ] ||
                        error_exit "Could not find key options in $lgssc_conf_file"
+               echo "$lgssc_conf_file content is:"
+               cat $lgssc_conf_file
 
                if ! local_mode; then
-                       do_nodes $(comma_list $(all_nodes)) "mkdir -p \
-                               $SK_OM_PATH"
-                       do_nodes $(comma_list $(all_nodes)) "mount \
-                               -o bind $SK_OM_PATH \
-                               /etc/request-key.d/"
-                       do_nodes $(comma_list $(all_nodes)) "rsync -aqv \
-                               $HOSTNAME:$lgssc_conf_file \
-                               $lgssc_conf_file >/dev/null 2>&1"
+                       if $from_build_tree; then
+                               do_nodes $(comma_list $(all_nodes)) "mkdir -p \
+                                       $SK_OM_PATH"
+                               do_nodes $(comma_list $(all_nodes)) "mount \
+                                       -o bind $SK_OM_PATH \
+                                       /etc/request-key.d/"
+                               do_nodes $(comma_list $(all_nodes)) "rsync \
+                                       -aqv $HOSTNAME:$lgssc_conf_file \
+                                       $lgssc_conf_file >/dev/null 2>&1"
+                       else
+                               do_nodes $(comma_list $(all_nodes)) \
+                                       "echo $lgssc_conf_file: ; \
+                                       cat $lgssc_conf_file"
+                       fi
                fi
 
                # create shared key on all nodes
@@ -988,8 +1071,9 @@ init_gss() {
                done
                # Distribute keys
                if ! local_mode; then
-                       do_nodes $(comma_list $(all_nodes)) "rsync -av \
-                               $HOSTNAME:$SK_PATH/ $SK_PATH >/dev/null 2>&1"
+                       for lnode in $(all_nodes); do
+                               scp -r $SK_PATH ${lnode}:$(dirname $SK_PATH)/
+                       done
                fi
                # Set client keys to client type to generate prime P
                if local_mode; then
@@ -1017,6 +1101,8 @@ init_gss() {
                                -m $SK_PATH/$FSNAME-nmclient.key \
                                 >/dev/null 2>&1"
                fi
+       fi
+       if $GSS_SK; then
                # mount options for servers and clients
                MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
                MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
@@ -1040,6 +1126,16 @@ cleanup_gss() {
 
 cleanup_sk() {
        if $GSS_SK; then
+               case $LUSTRE in
+               /usr/lib/lustre/* | /usr/lib64/lustre/* | /usr/lib/lustre | \
+               /usr/lib64/lustre )
+                       from_build_tree=false
+                       ;;
+               *)
+                       from_build_tree=true
+                       ;;
+               esac
+
                if $SK_S2S; then
                        do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNM"
                        do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNMCLI"
@@ -1050,12 +1146,18 @@ cleanup_sk() {
                $RPC_MODE || echo "Cleaning up Shared Key.."
                do_nodes $(comma_list $(all_nodes)) "rm -f \
                        $SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
-               # Remove the mount and clean up the files we added to SK_PATH
-               do_nodes $(comma_list $(all_nodes)) "umount \
-                       /etc/request-key.d/"
-               do_nodes $(comma_list $(all_nodes)) "rm -f \
-                       $SK_OM_PATH/lgssc.conf"
-               do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
+               do_nodes $(comma_list $(all_nodes)) "keyctl show | \
+                 awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
+               if $from_build_tree; then
+                       # Remove the mount and clean up the files we added to
+                       # SK_PATH
+                       do_nodes $(comma_list $(all_nodes)) "while grep -q \
+                               request-key.d /proc/mounts; do umount \
+                               /etc/request-key.d/; done"
+                       do_nodes $(comma_list $(all_nodes)) "rm -f \
+                               $SK_OM_PATH/lgssc.conf"
+                       do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
+               fi
                SK_NO_KEY=true
        fi
 }
@@ -1232,7 +1334,7 @@ running_in_vm() {
        virt=$(dmidecode -s system-product-name | awk '{print $1}')
 
        case $virt in
-               VMware|KVM|VirtualBox|Parallels)
+               VMware|KVM|VirtualBox|Parallels|Bochs)
                        echo $virt | tr '[A-Z]' '[a-z]' ;;
                *) ;;
        esac
@@ -1448,34 +1550,42 @@ set_debug_size () {
 }
 
 set_default_debug () {
-    local debug=${1:-"$PTLDEBUG"}
-    local subsys=${2:-"$SUBSYSTEM"}
-    local debug_size=${3:-$DEBUG_SIZE}
+       local debug=${1:-"$PTLDEBUG"}
+       local subsys=${2:-"$SUBSYSTEM"}
+       local debug_size=${3:-$DEBUG_SIZE}
 
-    [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
-    [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
+       [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
+       [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
 
-    [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
+       [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
 }
 
 set_default_debug_nodes () {
        local nodes="$1"
+       local debug="${2:-"$PTLDEBUG"}"
+       local subsys="${3:-"$SUBSYSTEM"}"
+       local debug_size="${4:-$DEBUG_SIZE}"
 
        if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
                nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
                set_default_debug
        fi
 
-       do_rpc_nodes "$nodes" set_default_debug \
-               \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
+       [[ -z "$nodes" ]] ||
+               do_rpc_nodes "$nodes" set_default_debug \
+                       \\\"$debug\\\" \\\"$subsys\\\" $debug_size || true
 }
 
 set_default_debug_facet () {
-    local facet=$1
-    local node=$(facet_active_host $facet)
-    [ -z "$node" ] && echo "No host defined for facet $facet" && exit 1
+       local facet=$1
+       local debug="${2:-"$PTLDEBUG"}"
+       local subsys="${3:-"$SUBSYSTEM"}"
+       local debug_size="${4:-$DEBUG_SIZE}"
+       local node=$(facet_active_host $facet)
+
+       [ -n "$node" ] || error "No host defined for facet $facet"
 
-    set_default_debug_nodes $node
+       set_default_debug_nodes $node "$debug" "$subsys" $debug_size
 }
 
 set_hostid () {
@@ -1916,11 +2026,6 @@ mount_facet() {
 
        set_default_debug_facet $facet
 
-       if [[ $facet == mds* ]]; then
-               do_facet $facet \
-               lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 2>/dev/null
-       fi
-
        if [[ $opts =~ .*nosvc.* ]]; then
                echo "Start $dm_dev without service"
        else
@@ -1982,12 +2087,6 @@ start() {
        mount_facet ${facet}
        RC=$?
 
-       if [[ $facet == mds* ]]; then
-               do_facet $facet \
-                       lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
-                               2>/dev/null
-       fi
-
        return $RC
 }
 
@@ -2022,46 +2121,6 @@ stop() {
        fi
 }
 
-# save quota version (both administrative and operational quotas)
-# add an additional parameter if mountpoint is ever different from $MOUNT
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-#     it should be removed whenever we drop the interoperability for such
-#     server.
-quota_save_version() {
-    local fsname=${2:-$FSNAME}
-    local spec=$1
-    local ver=$(tr -c -d "123" <<< $spec)
-    local type=$(tr -c -d "ug" <<< $spec)
-
-    [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
-
-    [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
-
-    do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec"
-    local varsvc
-    local osts=$(get_facets OST)
-    for ost in ${osts//,/ }; do
-        varsvc=${ost}_svc
-        do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
-    done
-}
-
-# client could mount several lustre
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-#     it should be removed whenever we drop the interoperability for such
-#     server.
-quota_type() {
-       local fsname=${1:-$FSNAME}
-       local rc=0
-       do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type ||
-               rc=$?
-       do_nodes $(comma_list $(osts_nodes)) \
-               lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
-       return $rc
-}
-
 # get mdt quota type
 mdt_quota_type() {
        local varsvc=${SINGLEMDS}_svc
@@ -2080,12 +2139,24 @@ ost_quota_type() {
 # restore old quota type settings
 restore_quota() {
        if [ "$old_MDT_QUOTA_TYPE" ]; then
-               do_facet mgs $LCTL conf_param \
-                       $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+               if [[ $PERM_CMD == *"set_param -P"* ]]; then
+                       do_facet mgs $PERM_CMD \
+                               osd-*.$FSNAME-MDT*.quota_slave.enable = \
+                               $old_MDT_QUOTA_TYPE
+               else
+                       do_facet mgs $PERM_CMD \
+                               $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+               fi
        fi
        if [ "$old_OST_QUOTA_TYPE" ]; then
-               do_facet mgs $LCTL conf_param \
-                       $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+               if [[ $PERM_CMD == *"set_param -P"* ]]; then
+                       do_facet mgs $PERM_CMD \
+                               osd-*.$FSNAME-OST*.quota_slave.enable = \
+                               $old_OST_QUOTA_TYPE
+               else
+                       do_facet mgs $LCTL conf_param \
+                               $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+               fi
        fi
 }
 
@@ -2138,10 +2209,17 @@ setup_quota(){
        export old_MDT_QUOTA_TYPE=$mdt_qtype
        export old_OST_QUOTA_TYPE=$ost_qtype
 
-       do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE ||
-               error "set mdt quota type failed"
-       do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE ||
-               error "set ost quota type failed"
+       if [[ $PERM_CMD == *"set_param -P"* ]]; then
+               do_facet mgs $PERM_CMD \
+                       osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE
+               do_facet mgs $PERM_CMD \
+                       osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE
+       else
+               do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE ||
+                       error "set mdt quota type failed"
+               do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
+                       error "set ost quota type failed"
+       fi
 
        local quota_usrs=$QUOTA_USERS
 
@@ -2187,6 +2265,11 @@ zconf_mount() {
                exit 1
        fi
 
+       if $GSS_SK; then
+               # update mount option with skpath
+               opts=$(add_sk_mntflag $opts)
+       fi
+
        echo "Starting client: $client: $flags $opts $device $mnt"
        do_node $client mkdir -p $mnt
        if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
@@ -2221,43 +2304,31 @@ zconf_mount() {
 }
 
 zconf_umount() {
-    local client=$1
-    local mnt=$2
-    local force
-    local busy
-    local need_kill
-
-    [ "$3" ] && force=-f
-    local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
-    if [ $running -ne 0 ]; then
-        echo "Stopping client $client $mnt (opts:$force)"
-        do_node $client lsof -t $mnt || need_kill=no
-        if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then
-            pids=$(do_node $client lsof -t $mnt | sort -u);
-            if [ -n $pids ]; then
-                do_node $client kill -9 $pids || true
-            fi
-        fi
-
-        busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true
-        if [ $busy -ne 0 ] ; then
-            echo "$mnt is still busy, wait one second" && sleep 1
-            do_node $client umount $force $mnt
-        fi
-    fi
-}
-
-# Mount the file system on the MGS
-mount_mgs_client() {
-       do_facet mgs "mkdir -p $MOUNT"
-       zconf_mount $mgs_HOST $MOUNT $MOUNT_OPTS ||
-               error "unable to mount $MOUNT on MGS"
-}
+       local client=$1
+       local mnt=$2
+       local force
+       local busy
+       local need_kill
+       local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
+
+       [ "$3" ] && force=-f
+       [ $running -eq 0 ] && return 0
+
+       echo "Stopping client $client $mnt (opts:$force)"
+       do_node $client lsof -t $mnt || need_kill=no
+       if [ "x$force" != "x" ] && [ "x$need_kill" != "xno" ]; then
+               pids=$(do_node $client lsof -t $mnt | sort -u);
+               if [ -n "$pids" ]; then
+                       do_node $client kill -9 $pids || true
+               fi
+       fi
 
-# Unmount the file system on the MGS
-umount_mgs_client() {
-       zconf_umount $mgs_HOST $MOUNT
-       do_facet mgs "rm -rf $MOUNT"
+       busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") ||
+               true
+       if [ $busy -ne 0 ] ; then
+               echo "$mnt is still busy, wait one second" && sleep 1
+               do_node $client umount $force $mnt
+       fi
 }
 
 # nodes is comma list
@@ -2333,7 +2404,8 @@ zconf_mount_clients() {
        fi
 
        echo "Starting client $clients: $flags $opts $device $mnt"
-       if [ -n "$FILESET" -a ! -n "$SKIP_FILESET" ]; then
+       do_nodes $clients mkdir -p $mnt
+       if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
                if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
                        # Mount with own nodemap key
                        local i=0
@@ -2640,6 +2712,18 @@ start_client_load() {
                        LFS=$LFS \
                        LCTL=$LCTL \
                        FSNAME=$FSNAME \
+                       MPIRUN=$MPIRUN \
+                       MPIRUN_OPTIONS=\\\"$MPIRUN_OPTIONS\\\" \
+                       MACHINEFILE_OPTION=\\\"$MACHINEFILE_OPTION\\\" \
+                       num_clients=$(get_node_count ${CLIENTS//,/ }) \
+                       ior_THREADS=$ior_THREADS ior_iteration=$ior_iteration \
+                       ior_blockSize=$ior_blockSize \
+                       ior_blockUnit=$ior_blockUnit \
+                       ior_xferSize=$ior_xferSize ior_type=$ior_type \
+                       ior_DURATION=$ior_DURATION \
+                       ior_stripe_params=\\\"$ior_stripe_params\\\" \
+                       ior_custom_params=\\\"$ior_custom_param\\\" \
+                       mpi_ior_custom_threads=$mpi_ior_custom_threads \
                        run_${load}.sh" &
        local ppid=$!
        log "Started client load: ${load} on $client"
@@ -2651,16 +2735,15 @@ start_client_load() {
 }
 
 start_client_loads () {
-    local -a clients=(${1//,/ })
-    local numloads=${#CLIENT_LOADS[@]}
-    local testnum
+       local -a clients=(${1//,/ })
+       local numloads=${#CLIENT_LOADS[@]}
 
-    for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
-        testnum=$((nodenum % numloads))
-        start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]}
-    done
-    # bug 22169: wait the background threads to start
-    sleep 2
+       for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
+               local load=$((nodenum % numloads))
+               start_client_load ${clients[nodenum]} ${CLIENT_LOADS[load]}
+       done
+       # bug 22169: wait the background threads to start
+       sleep 2
 }
 
 # only for remote client
@@ -2924,6 +3007,142 @@ wait_zfs_commit() {
        fi
 }
 
+fill_ost() {
+       local filename=$1
+       local ost_idx=$2
+       local lwm=$3  #low watermark
+       local size_mb #how many MB should we write to pass watermark
+       local ost_name=$(ostname_from_index $ost_idx)
+
+       free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+       size_mb=0
+       if (( $free_kb / 1024 > lwm )); then
+               size_mb=$((free_kb / 1024 - lwm))
+       fi
+       #If 10% of free space cross low watermark use it
+       if (( $free_kb / 10240 > size_mb )); then
+               size_mb=$((free_kb / 10240))
+       else
+               #At least we need to store 1.1 of difference between
+               #free space and low watermark
+               size_mb=$((size_mb + size_mb / 10))
+       fi
+       if (( lwm <= $free_kb / 1024 )) ||
+          [ ! -f $DIR/${filename}.fill_ost$ost_idx ]; then
+               $LFS setstripe -i $ost_idx -c1 $DIR/${filename}.fill_ost$ost_idx
+               dd if=/dev/zero of=$DIR/${filename}.fill_ost$ost_idx bs=1M \
+                       count=$size_mb oflag=append conv=notrunc
+       fi
+
+       sleep_maxage
+
+       free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+       echo "OST still has $((free_kb / 1024)) MB free"
+}
+
+# This checks only the primary MDS
+ost_watermarks_get() {
+       local ost_idx=$1
+       local ost_name=$(ostname_from_index $ost_idx)
+       local mdtosc_proc=$(get_mdtosc_proc_path $SINGLEMDS $ost_name)
+
+       local hwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
+                       osp.$mdtosc_proc.reserved_mb_high)
+       local lwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
+                       osp.$mdtosc_proc.reserved_mb_low)
+
+       echo "$lwm $hwm"
+}
+
+# Note that we set watermarks on all MDSes (necessary for striped dirs)
+ost_watermarks_set() {
+       local ost_idx=$1
+       local lwm=$2
+       local hwm=$3
+       local ost_name=$(ostname_from_index $ost_idx)
+       local facets=$(get_facets MDS)
+
+       do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
+               osp.*$ost_name*.reserved_mb_low=$lwm \
+               osp.*$ost_name*.reserved_mb_high=$hwm > /dev/null
+
+       # sleep to ensure we see the change
+       sleep_maxage
+}
+
+ost_watermarks_set_low_space() {
+       local ost_idx=$1
+       local wms=$(ost_watermarks_get $ost_idx)
+       local ost_name=$(ostname_from_index $ost_idx)
+
+       local old_lwm=$(echo $wms | awk '{ print $1 }')
+       local old_hwm=$(echo $wms | awk '{ print $2 }')
+
+       local blocks=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
+       # minimal extension size is 64M
+       local new_lwm=50
+       if (( $blocks / 1024 > 50 )); then
+               new_lwm=$((blocks / 1024 - 50))
+       fi
+       local new_hwm=$((new_lwm + 5))
+
+       ost_watermarks_set $ost_idx $new_lwm $new_hwm
+       echo "watermarks: $old_lwm $old_hwm $new_lwm $new_hwm"
+}
+
+# Set watermarks to ~current available space & then write data to fill it
+# Note OST is not *actually* full after this, it just reports ENOSPC in the
+# internal statfs used by the stripe allocator
+#
+# first parameter is the filename-prefix, which must get under t-f cleanup
+# requirements (rm -rf $DIR/[Rdfs][0-9]*), i.e. $tfile work fine
+ost_watermarks_set_enospc() {
+       local filename=$1
+       local ost_idx=$2
+       # on the mdt's osc
+       local ost_name=$(ostname_from_index $ost_idx)
+       local facets=$(get_facets MDS)
+       local wms
+       local MDS
+
+       for MDS in ${facets//,/ }; do
+               local mdtosc_proc=$(get_mdtosc_proc_path $MDS $ost_name)
+
+               do_facet $MDS $LCTL get_param -n \
+                       osp.$mdtosc_proc.reserved_mb_high ||
+                       skip  "remote MDS does not support reserved_mb_high"
+       done
+
+       wms=$(ost_watermarks_set_low_space $ost_idx)
+       local new_lwm=$(echo $wms | awk '{ print $4 }')
+       fill_ost $filename $ost_idx $new_lwm
+       #First enospc could execute orphan deletion so repeat
+       fill_ost $filename $ost_idx $new_lwm
+       echo $wms
+}
+
+ost_watermarks_enospc_delete_files() {
+       local filename=$1
+       local ost_idx=$2
+
+       rm -f $DIR/${filename}.fill_ost$ost_idx
+
+       wait_delete_completed
+       wait_mds_ost_sync
+}
+
+# clean up from "ost_watermarks_set_enospc"
+ost_watermarks_clear_enospc() {
+       local filename=$1
+       local ost_idx=$2
+       local old_lwm=$4
+       local old_hwm=$5
+
+       ost_watermarks_enospc_delete_files $filename $ost_idx
+       ost_watermarks_set $ost_idx $old_lwm $old_hwm
+       echo "set OST$ost_idx lwm back to $old_lwm, hwm back to $old_hwm"
+}
+
 wait_delete_completed_mds() {
        local max_wait=${1:-20}
        local mds2sync=""
@@ -3300,6 +3519,10 @@ facet_failover() {
                change_active ${affecteds[index]}
 
                wait_for_facet ${affecteds[index]}
+               if $GSS_SK; then
+                       init_gss
+                       init_facets_vars_simple
+               fi
                # start mgs first if it is affected
                if ! combined_mgs_mds &&
                        list_member ${affecteds[index]} mgs; then
@@ -3309,6 +3532,12 @@ facet_failover() {
                affected=$(exclude_items_from_list ${affecteds[index]} mgs)
                echo mount facets: ${affecteds[index]}
                mount_facets ${affecteds[index]}
+               if $GSS_SK; then
+                       do_nodes $(comma_list $(all_nodes)) \
+                               "keyctl show | grep lustre | cut -c1-11 |
+                               sed -e 's/ //g;' |
+                               xargs -IX keyctl setperm X 0x3f3f3f3f"
+               fi
        done
 }
 
@@ -3402,7 +3631,12 @@ fail() {
        local facets=$1
        local clients=${CLIENTS:-$HOSTNAME}
 
+       SK_NO_KEY_save=$SK_NO_KEY
+       if $GSS_SK; then
+               export SK_NO_KEY=false
+       fi
        facet_failover $* || error "failover: $?"
+       export SK_NO_KEY=$SK_NO_KEY_save
        # to initiate all OSC idling connections
        clients_up
        wait_clients_import_state "$clients" "$facets" "\(FULL\|IDLE\)"
@@ -4125,40 +4359,44 @@ unmount_fstype() {
 ## MountConf setup
 
 stopall() {
-    # make sure we are using the primary server, so test-framework will
-    # be able to clean up properly.
-    activemds=`facet_active mds1`
-    if [ $activemds != "mds1" ]; then
-        fail mds1
-    fi
+       # make sure we are using the primary server, so test-framework will
+       # be able to clean up properly.
+       activemds=`facet_active mds1`
+       if [ $activemds != "mds1" ]; then
+               fail mds1
+       fi
 
-    local clients=$CLIENTS
-    [ -z $clients ] && clients=$(hostname)
+       local clients=$CLIENTS
+       [ -z $clients ] && clients=$(hostname)
 
-    zconf_umount_clients $clients $MOUNT "$*" || true
-    [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
+       zconf_umount_clients $clients $MOUNT "$*" || true
+       [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
 
-    [ -n "$CLIENTONLY" ] && return
+       [ -n "$CLIENTONLY" ] && return
 
-    # The add fn does rm ${facet}active file, this would be enough
-    # if we use do_facet <facet> only after the facet added, but
-    # currently we use do_facet mds in local.sh
-    for num in `seq $MDSCOUNT`; do
-        stop mds$num -f
-        rm -f ${TMP}/mds${num}active
-    done
-    combined_mgs_mds && rm -f $TMP/mgsactive
+       # The add fn does rm ${facet}active file, this would be enough
+       # if we use do_facet <facet> only after the facet added, but
+       # currently we use do_facet mds in local.sh
+       for num in `seq $MDSCOUNT`; do
+               stop mds$num -f
+               rm -f ${TMP}/mds${num}active
+       done
+       combined_mgs_mds && rm -f $TMP/mgsactive
 
-    for num in `seq $OSTCOUNT`; do
-        stop ost$num -f
-        rm -f $TMP/ost${num}active
-    done
+       for num in `seq $OSTCOUNT`; do
+               stop ost$num -f
+               rm -f $TMP/ost${num}active
+       done
 
-    if ! combined_mgs_mds ; then
-        stop mgs
-    fi
+       if ! combined_mgs_mds ; then
+               stop mgs
+       fi
 
-    return 0
+       if $SHARED_KEY; then
+               export SK_MOUNTED=false
+       fi
+
+       return 0
 }
 
 cleanup_echo_devs () {
@@ -4197,6 +4435,48 @@ upper() {
        echo -n "$1" | tr '[:lower:]' '[:upper:]'
 }
 
+squash_opt() {
+       local var="$*"
+       local other=""
+       local opt_o=""
+       local opt_e=""
+       local first_e=0
+       local first_o=0
+       local take=""
+
+       var=$(echo "$var" | sed -e 's/,\( \)*/,/g')
+       for i in $(echo "$var"); do
+               if [ "$i" == "-O" ]; then
+                       take="o";
+                       first_o=$(($first_o + 1))
+                       continue;
+               fi
+               if [ "$i" == "-E" ]; then
+                       take="e";
+                       first_e=$(($first_e + 1 ))
+                       continue;
+               fi
+               case $take in
+                       "o")
+                               [ $first_o -gt 1 ] && opt_o+=",";
+                               opt_o+="$i";
+                               ;;
+                       "e")
+                               [ $first_e -gt 1 ] && opt_e+=",";
+                               opt_e+="$i";
+                               ;;
+                       *)
+                               other+=" $i";
+                               ;;
+               esac
+               take=""
+       done
+
+       echo -n "$other"
+       [ -n "$opt_o" ] && echo " -O $opt_o"
+       [ -n "$opt_e" ] && echo " -E $opt_e"
+}
+
 mkfs_opts() {
        local facet=$1
        local dev=$2
@@ -4237,11 +4517,7 @@ mkfs_opts() {
                opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
 
                if [ $fstype == ldiskfs ]; then
-                       # Check for wide striping
-                       if [ $OSTCOUNT -gt 160 ]; then
-                               MDSJOURNALSIZE=${MDSJOURNALSIZE:-4096}
-                               fs_mkfs_opts+="-O ea_inode"
-                       fi
+                       fs_mkfs_opts+="-O ea_inode,large_dir"
 
                        var=${facet}_JRN
                        if [ -n "${!var}" ]; then
@@ -4279,6 +4555,8 @@ mkfs_opts() {
        var=${type}_FS_MKFS_OPTS
        fs_mkfs_opts+=${!var:+" ${!var}"}
 
+       [ $fstype == ldiskfs ] && fs_mkfs_opts=$(squash_opt $fs_mkfs_opts)
+
        if [ -n "${fs_mkfs_opts## }" ]; then
                opts+=" --mkfsoptions=\\\"${fs_mkfs_opts## }\\\""
        fi
@@ -4343,6 +4621,9 @@ __touch_device()
        [[ ! "$device" =~ ^/dev/ ]] || [[ "$device" =~ ^/dev/shm/ ]] ||
                error "$facet: device '$device' does not exist"
 
+       # zpool create doesn't like empty files
+       [[ $(facet_fstype $facet) == zfs ]] && return 0
+
        do_facet $facet "touch \"${device}\""
 }
 
@@ -4403,7 +4684,7 @@ formatall() {
        # (Assumes MDS version is also OSS version)
        if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ];
        then
-           do_rpc_nodes "$(comma_list $(remote_nodes_list))" set_hostid
+               do_rpc_nodes "$(comma_list $(all_server_nodes))" set_hostid
        fi
 
        # We need ldiskfs here, may as well load them all
@@ -4648,8 +4929,17 @@ setupall() {
                if $GSS_SK; then
                        set_rule $FSNAME any cli2mdt $SK_FLAVOR
                        set_rule $FSNAME any cli2ost $SK_FLAVOR
-                       wait_flavor cli2mdt $SK_FLAVOR
-                       wait_flavor cli2ost $SK_FLAVOR
+                       if $SK_SKIPFIRST; then
+                               export SK_SKIPFIRST=false
+
+                               sleep 30
+                               do_nodes $CLIENTS \
+                                        "lctl set_param osc.*.idle_connect=1"
+                               return
+                       else
+                               wait_flavor cli2mdt $SK_FLAVOR
+                               wait_flavor cli2ost $SK_FLAVOR
+                       fi
                else
                        set_flavor_all $SEC
                fi
@@ -4755,6 +5045,31 @@ init_facets_vars () {
        fi
 }
 
+init_facets_vars_simple () {
+       local devname
+
+       if ! remote_mds_nodsh; then
+               for num in $(seq $MDSCOUNT); do
+                       devname=$(mdsdevname $num)
+                       eval export mds${num}_dev=${devname}
+                       eval export mds${num}_opt=\"${MDS_MOUNT_OPTS}\"
+               done
+       fi
+
+       if ! combined_mgs_mds ; then
+               eval export mgs_dev=$(mgsdevname)
+               eval export mgs_opt=\"${MGS_MOUNT_OPTS}\"
+       fi
+
+       if ! remote_ost_nodsh; then
+               for num in $(seq $OSTCOUNT); do
+                       devname=$(ostdevname $num)
+                       eval export ost${num}_dev=${devname}
+                       eval export ost${num}_opt=\"${OST_MOUNT_OPTS}\"
+               done
+       fi
+}
+
 osc_ensure_active () {
     local facet=$1
     local timeout=$2
@@ -4793,10 +5108,57 @@ set_conf_param_and_check() {
                error "check $PARAM failed!"
 }
 
+set_persistent_param() {
+       local myfacet=$1
+       local test_param=$2
+       local param=$3
+       local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+       if [ $# -gt 3 ]; then
+               local final=$4
+       else
+               local -i final
+               final=$((orig + 5))
+       fi
+
+       if [[ $PERM_CMD == *"set_param -P"* ]]; then
+               echo "Setting $test_param from $orig to $final"
+               do_facet mgs "$PERM_CMD $test_param='$final'" ||
+                       error "$PERM_CMD $test_param failed"
+       else
+               echo "Setting $param from $orig to $final"
+               do_facet mgs "$PERM_CMD $param='$final'" ||
+                       error "$PERM_CMD $param failed"
+       fi
+}
+
+set_persistent_param_and_check() {
+       local myfacet=$1
+       local test_param=$2
+       local param=$3
+       local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+       if [ $# -gt 3 ]; then
+               local final=$4
+       else
+               local -i final
+               final=$((orig + 5))
+       fi
+
+       set_persistent_param $myfacet $test_param $param "$final"
+
+       wait_update_facet $myfacet "$LCTL get_param -n $test_param" "$final" ||
+               error "check $param failed!"
+}
+
 init_param_vars () {
        TIMEOUT=$(lctl get_param -n timeout)
        TIMEOUT=${TIMEOUT:-20}
 
+       if [ -n "$arg1" ]; then
+               [ "$arg1" = "server_only" ] && return
+       fi
+
        remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
 
        TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
@@ -4814,9 +5176,8 @@ init_param_vars () {
                elif [ $current_jobid_var != $JOBID_VAR ]; then
                        echo "setting jobstats to $JOBID_VAR"
 
-                       set_conf_param_and_check client                 \
-                               "$LCTL get_param -n jobid_var"          \
-                               "$FSNAME.sys.jobid_var" $JOBID_VAR
+                       set_persistent_param_and_check client \
+                               "jobid_var" "$FSNAME.sys.jobid_var" $JOBID_VAR
                fi
        else
                echo "jobstats not supported by server"
@@ -4911,11 +5272,11 @@ check_timeout () {
 }
 
 is_mounted () {
-    local mntpt=$1
-    [ -z $mntpt ] && return 1
-    local mounted=$(mounted_lustre_filesystems)
+       local mntpt=$1
+       [ -z $mntpt ] && return 1
+       local mounted=$(mounted_lustre_filesystems)
 
-    echo $mounted' ' | grep -w -q $mntpt' '
+       echo $mounted' ' | grep -w -q $mntpt' '
 }
 
 is_empty_dir() {
@@ -5014,22 +5375,12 @@ check_and_setup_lustre() {
                fi
        fi
 
-       init_gss
        if $GSS_SK; then
                set_flavor_all null
        elif $GSS; then
                set_flavor_all $SEC
        fi
 
-       if [ -z "$CLIENTONLY" ]; then
-               # Enable remote MDT create for testing
-               for num in $(seq $MDSCOUNT); do
-                       do_facet mds$num \
-                               lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
-                                       2>/dev/null
-               done
-       fi
-
        if [ "$ONLY" == "setup" ]; then
                exit 0
        fi
@@ -5459,7 +5810,7 @@ at_max_set() {
 drop_request() {
 # OBD_FAIL_MDS_ALL_REQUEST_NET
     RC=0
-    do_facet $SINGLEMDS lctl set_param fail_loc=0x123
+    do_facet $SINGLEMDS lctl set_param fail_val=0 fail_loc=0x123
     do_facet client "$1" || RC=$?
     do_facet $SINGLEMDS lctl set_param fail_loc=0
     return $RC
@@ -5544,11 +5895,11 @@ drop_bl_callback() {
        return $rc
 }
 
-drop_ldlm_reply() {
-#define OBD_FAIL_LDLM_REPLY              0x30c
+drop_mdt_ldlm_reply() {
+#define OBD_FAIL_MDS_LDLM_REPLY_NET    0x157
     RC=0
-    local list=$(comma_list $(mdts_nodes) $(osts_nodes))
-    do_nodes $list lctl set_param fail_loc=0x30c
+    local list=$(comma_list $(mdts_nodes))
+    do_nodes $list lctl set_param fail_loc=0x157
 
     do_facet client "$@" || RC=$?
 
@@ -5556,11 +5907,11 @@ drop_ldlm_reply() {
     return $RC
 }
 
-drop_ldlm_reply_once() {
-#define OBD_FAIL_LDLM_REPLY              0x30c
+drop_mdt_ldlm_reply_once() {
+#define OBD_FAIL_MDS_LDLM_REPLY_NET    0x157
     RC=0
-    local list=$(comma_list $(mdts_nodes) $(osts_nodes))
-    do_nodes $list lctl set_param fail_loc=0x8000030c
+    local list=$(comma_list $(mdts_nodes))
+    do_nodes $list lctl set_param fail_loc=0x80000157
 
     do_facet client "$@" || RC=$?
 
@@ -5607,8 +5958,10 @@ lru_resize_disable()
 
 flock_is_enabled()
 {
+       local mountpath=${1:-$MOUNT}
        local RC=0
-       [ -z "$(mount | grep "$MOUNT.*flock" | grep -v noflock)" ] && RC=1
+
+       [ -z "$(mount | grep "$mountpath .*flock" | grep -v noflock)" ] && RC=1
        return $RC
 }
 
@@ -5653,19 +6006,19 @@ debug_size_restore() {
 }
 
 start_full_debug_logging() {
-    debugsave
-    debug_size_save
+       debugsave
+       debug_size_save
 
-    local FULLDEBUG=-1
-    local DEBUG_SIZE=150
+       local fulldebug=-1
+       local debug_size=150
+       local nodes=$(comma_list $(nodes_list))
 
-    do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE"
-    do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=$FULLDEBUG;"
+       do_nodes $nodes "$LCTL set_param debug=$fulldebug debug_mb=$debug_size"
 }
 
 stop_full_debug_logging() {
-    debug_size_restore
-    debugrestore
+       debug_size_restore
+       debugrestore
 }
 
 # prints bash call stack
@@ -5722,7 +6075,7 @@ report_error() {
 # usage: stack_trap arg sigspec
 #
 # stack_trap() behaves like bash's built-in trap, except that it "stacks" the
-# command ``arg`` on top of previously defined commands for ``sigspec`` instead
+# command "arg" on top of previously defined commands for "sigspec" instead
 # of overwriting them.
 # stacked traps are executed in reverse order of their registration
 #
@@ -5730,7 +6083,7 @@ report_error() {
 stack_trap()
 {
        local arg="$1"
-       local sigspec="$2"
+       local sigspec="${2:-EXIT}"
 
        # Use "trap -p" to get the quoting right
        local old_trap="$(trap -p "$sigspec")"
@@ -5757,7 +6110,7 @@ exit_status () {
        local status=0
        local log=$TESTSUITELOG
 
-       [ -f "$log" ] && grep -q FAIL $log && status=1
+       [ -f "$log" ] && grep -qw FAIL $log && status=1
        exit $status
 }
 
@@ -5799,6 +6152,12 @@ error_not_in_vm() {
        fi
 }
 
+#
+# Function: skip_env()
+# Purpose:  to skip a test during developer testing because some tool
+#           is missing, but fail the test in release testing because the test
+#           environment is not configured properly".
+#
 skip_env () {
        $FAIL_ON_SKIP_ENV && error false $@ || skip $@
 }
@@ -5816,6 +6175,7 @@ skip_noexit() {
 
        [[ -n "$TESTSUITELOG" ]] &&
                echo "$TESTSUITE: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true
+       unset TESTNAME
 }
 
 skip() {
@@ -5858,7 +6218,7 @@ basetest() {
     if [[ $1 = [a-z]* ]]; then
         echo $1
     else
-        echo ${1%%[a-z]*}
+       echo ${1%%[a-zA-Z]*}
     fi
 }
 
@@ -5866,72 +6226,90 @@ basetest() {
 export LAST_SKIPPED=
 export ALWAYS_SKIPPED=
 #
-# Main entry into test-framework. This is called with the name and
-# description of a test. The name is used to find the function to run
+# Main entry into test-framework. This is called with the number and
+# description of a test. The number is used to find the function to run
 # the test using "test_$name".
 #
 # This supports a variety of methods of specifying specific test to
-# run or not run.  These need to be documented...
+# run or not run:
+# - ONLY= env variable with space-separated list of test numbers to run
+# - EXCEPT= env variable with space-separated list of test numbers to exclude
 #
 run_test() {
        assert_DIR
+       local testnum=$1
+       local testmsg=$2
+       export base=$(basetest $testnum)
+       export TESTNAME=test_$testnum
+       LAST_SKIPPED=
+       ALWAYS_SKIPPED=
 
-       export base=$(basetest $1)
+       # Check the EXCEPT, ALWAYS_EXCEPT and SLOW lists to see if we
+       # need to skip the current test. If so, set the ALWAYS_SKIPPED flag.
+       local isexcept=EXCEPT_$testnum
+       local isexcept_base=EXCEPT_$base
+       if [ ${!isexcept}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping excluded test $testnum"
+       elif [ ${!isexcept_base}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping excluded test $testnum (base $base)"
+       fi
+
+       isexcept=EXCEPT_ALWAYS_$testnum
+       isexcept_base=EXCEPT_ALWAYS_$base
+       if [ ${!isexcept}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping ALWAYS excluded test $testnum"
+       elif [ ${!isexcept_base}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping ALWAYS excluded test $testnum (base $base)"
+       fi
+
+       isexcept=EXCEPT_SLOW_$testnum
+       isexcept_base=EXCEPT_SLOW_$base
+       if [ ${!isexcept}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping SLOW test $testnum"
+       elif [ ${!isexcept_base}x != x ]; then
+               ALWAYS_SKIPPED="y"
+               skip_message="skipping SLOW test $testnum (base $base)"
+       fi
+
+       # If there are tests on the ONLY list, check if the current test
+       # is on that list and, if so, check if the test is to be skipped
+       # and if we are supposed to honor the skip lists.
        if [ -n "$ONLY" ]; then
-               testname=ONLY_$1
-               if [ ${!testname}x != x ]; then
-                       [ -n "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
-                       run_one_logged $1 "$2"
-                       return $?
-               fi
-               testname=ONLY_$base
-               if [ ${!testname}x != x ]; then
-                       [ -n "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
-                       run_one_logged $1 "$2"
-                       return $?
+               local isonly=ONLY_$testnum
+               local isonly_base=ONLY_$base
+               if [[ ${!isonly}x != x || ${!isonly_base}x != x ]]; then
+
+                       if [[ -n "$ALWAYS_SKIPPED" && -n "$HONOR_EXCEPT" ]]; then
+                               LAST_SKIPPED="y"
+                               skip_noexit "$skip_message"
+                               return 0
+                       else
+                               [ -n "$LAST_SKIPPED" ] &&
+                                       echo "" && LAST_SKIPPED=
+                               ALWAYS_SKIPPED=
+                               run_one_logged $testnum "$testmsg"
+                               return $?
+                       fi
+
+               else
+                       LAST_SKIPPED="y"
+                       return 0
                fi
-               LAST_SKIPPED="y"
-               return 0
        fi
 
-       LAST_SKIPPED="y"
-       ALWAYS_SKIPPED="y"
-       testname=EXCEPT_$1
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping excluded test $1"
-               return 0
-       fi
-       testname=EXCEPT_$base
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping excluded test $1 (base $base)"
-               return 0
-       fi
-       testname=EXCEPT_ALWAYS_$1
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping ALWAYS excluded test $1"
-               return 0
-       fi
-       testname=EXCEPT_ALWAYS_$base
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping ALWAYS excluded test $1 (base $base)"
-               return 0
-       fi
-       testname=EXCEPT_SLOW_$1
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping SLOW test $1"
-               return 0
-       fi
-       testname=EXCEPT_SLOW_$base
-       if [ ${!testname}x != x ]; then
-               TESTNAME=test_$1 skip_noexit "skipping SLOW test $1 (base $base)"
+       if [ -n "$ALWAYS_SKIPPED" ]; then
+               LAST_SKIPPED="y"
+               skip_noexit "$skip_message"
                return 0
+       else
+               run_one_logged $testnum "$testmsg"
+               return $?
        fi
-
-       LAST_SKIPPED=
-       ALWAYS_SKIPPED=
-       run_one_logged $1 "$2"
-
-       return $?
 }
 
 log() {
@@ -6031,10 +6409,7 @@ group descriptors corrupted"
 #
 run_one() {
        local testnum=$1
-       local message=$2
-       export tfile=f${testnum}.${TESTSUITE}
-       export tdir=d${testnum}.${TESTSUITE}
-       export TESTNAME=test_$testnum
+       local testmsg="$2"
        local SAVE_UMASK=`umask`
        umask 0022
 
@@ -6042,7 +6417,7 @@ run_one() {
                $SETUP
        fi
 
-       banner "test $testnum: $message"
+       banner "test $testnum: $testmsg"
        test_${testnum} || error "test_$testnum failed with $?"
        cd $SAVE_PWD
        reset_fail_loc
@@ -6050,12 +6425,9 @@ run_one() {
        check_node_health
        check_dmesg_for_errors || error "Error in dmesg detected"
        if [ "$PARALLEL" != "yes" ]; then
-               ps auxww | grep -v grep | grep -q multiop &&
+               ps auxww | grep -v grep | grep -q "multiop " &&
                                        error "multiop still running"
        fi
-       unset TESTNAME
-       unset tdir
-       unset tfile
        umask $SAVE_UMASK
        $CLEANUP
        return 0
@@ -6068,49 +6440,74 @@ run_one() {
 #  - test result is saved to data file
 #
 run_one_logged() {
-       local BEFORE=$(date +%s)
-       local TEST_ERROR
-       local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log
+       local before=$SECONDS
+       local testnum=$1
+       local testmsg=$2
+       export tfile=f${testnum}.${TESTSUITE}
+       export tdir=d${testnum}.${TESTSUITE}
+       local name=$TESTSUITE.$TESTNAME.test_log.$(hostname -s).log
        local test_log=$LOGDIR/$name
-       local zfs_log_name=${TESTSUITE}.test_${1}.zfs_log
+       local zfs_log_name=$TESTSUITE.$TESTNAME.zfs_log
        local zfs_debug_log=$LOGDIR/$zfs_log_name
-       rm -rf $LOGDIR/err
-       rm -rf $LOGDIR/ignore
-       rm -rf $LOGDIR/skip
        local SAVE_UMASK=$(umask)
+       local rc=0
        umask 0022
 
+       rm -f $LOGDIR/err $LOGDIR/ignore $LOGDIR/skip
        echo
-       log_sub_test_begin test_${1}
-       (run_one $1 "$2") 2>&1 | tee -i $test_log
-       local RC=${PIPESTATUS[0]}
-
-       [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] &&
-               echo "test_$1 returned $RC" | tee $LOGDIR/err
-
-       duration=$(($(date +%s) - $BEFORE))
-       pass "$1" "(${duration}s)"
+       # if ${ONLY_$testnum} set, repeat $ONLY_REPEAT times, otherwise once
+       local isonly=ONLY_$testnum
+       local repeat=${!isonly:+$ONLY_REPEAT}
+
+       for testiter in $(seq ${repeat:-1}); do
+               local before_sub=$SECONDS
+               log_sub_test_begin $TESTNAME
+
+               # remove temp files between repetitions to avoid test failures
+               [ -n "$append" -a -n "$DIR" -a -n "$tdir" -a -n "$tfile" ] &&
+                       rm -rf $DIR/$tdir* $DIR/$tfile*
+               # loop around subshell so stack_trap EXIT triggers each time
+               (run_one $testnum "$testmsg") 2>&1 | tee -i $append $test_log
+               rc=${PIPESTATUS[0]}
+               local append=-a
+               local duration_sub=$((SECONDS - before_sub))
+               local test_error
+
+               [[ $rc != 0 && ! -f $LOGDIR/err ]] &&
+                       echo "$TESTNAME returned $rc" | tee $LOGDIR/err
+
+               if [[ -f $LOGDIR/err ]]; then
+                       test_error=$(cat $LOGDIR/err)
+                       TEST_STATUS="FAIL"
+               elif [[ -f $LOGDIR/ignore ]]; then
+                       test_error=$(cat $LOGDIR/ignore)
+               elif [[ -f $LOGDIR/skip ]]; then
+                       test_error=$(cat $LOGDIR/skip)
+                       TEST_STATUS="SKIP"
+               else
+                       TEST_STATUS="PASS"
+               fi
 
-       if [[ -f $LOGDIR/err ]]; then
-               TEST_ERROR=$(cat $LOGDIR/err)
-       elif [[ -f $LOGDIR/ignore ]]; then
-               TEST_ERROR=$(cat $LOGDIR/ignore)
-       elif [[ -f $LOGDIR/skip ]]; then
-               TEST_ERROR=$(cat $LOGDIR/skip)
-       fi
-       log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR"
+               pass "$testnum" "($((SECONDS - before))s)"
+               log_sub_test_end $TEST_STATUS $duration_sub "$rc" "$test_error"
+               [[ $rc != 0 ]] && break
+       done
 
-       if [[ "$TEST_STATUS" != "SKIP" ]] && [[ -f $TF_SKIP ]]; then
+       if [[ "$TEST_STATUS" != "SKIP" && -f $TF_SKIP ]]; then
                rm -f $TF_SKIP
        fi
 
        if [ -f $LOGDIR/err ]; then
                log_zfs_info "$zfs_debug_log"
-               $FAIL_ON_ERROR && exit $RC
+               $FAIL_ON_ERROR && exit $rc
        fi
 
        umask $SAVE_UMASK
 
+       unset TESTNAME
+       unset tdir
+       unset tfile
+
        return 0
 }
 
@@ -6132,9 +6529,9 @@ check_grant() {
        export base=$(basetest $1)
        [ "$CHECK_GRANT" == "no" ] && return 0
 
-       testnamebase=GCHECK_ONLY_${base}
-       testname=GCHECK_ONLY_$1
-       [ ${!testnamebase}x == x -a ${!testname}x == x ] && return 0
+       local isonly_base=GCHECK_ONLY_${base}
+       local isonly=GCHECK_ONLY_$1
+       [ ${!isonly_base}x == x -a ${!isonly}x == x ] && return 0
 
        echo -n "checking grant......"
 
@@ -6370,7 +6767,7 @@ remote_nodes_list () {
 all_mdts_nodes () {
        local host
        local failover_host
-       local nodes="${mds_HOST} ${mdsfailover_HOST}"
+       local nodes
        local nodes_sort
        local i
 
@@ -6380,6 +6777,7 @@ all_mdts_nodes () {
                nodes="$nodes ${!host} ${!failover_host}"
        done
 
+       [ -n "$nodes" ] || nodes="${mds_HOST} ${mdsfailover_HOST}"
        nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
        echo -n $nodes_sort
 }
@@ -6388,7 +6786,7 @@ all_mdts_nodes () {
 all_osts_nodes () {
        local host
        local failover_host
-       local nodes="${ost_HOST} ${ostfailover_HOST}"
+       local nodes=
        local nodes_sort
        local i
 
@@ -6398,6 +6796,7 @@ all_osts_nodes () {
                nodes="$nodes ${!host} ${!failover_host}"
        done
 
+       [ -n "$nodes" ] || nodes="${ost_HOST} ${ostfailover_HOST}"
        nodes_sort=$(for i in $nodes; do echo $i; done | sort -u)
        echo -n $nodes_sort
 }
@@ -6785,23 +7184,25 @@ delayed_recovery_enabled () {
 ########################
 
 convert_facet2label() {
-    local facet=$1
+       local facet=$1
 
-    if [ x$facet = xost ]; then
-       facet=ost1
-    fi
+       if [ x$facet = xost ]; then
+               facet=ost1
+       elif [ x$facet = xmgs ] && combined_mgs_mds ; then
+               facet=mds1
+       fi
 
-    local varsvc=${facet}_svc
+       local varsvc=${facet}_svc
 
-    if [ -n ${!varsvc} ]; then
-        echo ${!varsvc}
-    else
-        error "No lablel for $facet!"
-    fi
+       if [ -n ${!varsvc} ]; then
+               echo ${!varsvc}
+       else
+               error "No label for $facet!"
+       fi
 }
 
 get_clientosc_proc_path() {
-       echo "${1}-osc-ffff*"
+       echo "${1}-osc-[-0-9a-f]*"
 }
 
 # If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names
@@ -6945,7 +7346,7 @@ _wait_osc_import_state() {
        local ost_facet=$2
        local expected=$3
        local target=$(get_osc_import_name $facet $ost_facet)
-       local param="osc.${target}.ost_server_uuid"
+       local param="os[cp].${target}.ost_server_uuid"
        local params=$param
        local i=0
 
@@ -7137,6 +7538,7 @@ wait_osp_active() {
        # wait until all MDTs are in the expected state
        for ((num = 1; num <= $MDSCOUNT; num++)); do
                local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name})
+               local wait=0
                local mproc
 
                if [ $facet = "mds" ]; then
@@ -7151,11 +7553,10 @@ wait_osp_active() {
                        sleep 5
                        local result=$(do_facet mds${num} "$LCTL get_param -n $mproc")
                        local max=30
-                       local wait=0
 
                        [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
                        if [ $result -eq $expected ]; then
-                               echo -n "target updated after"
+                               echo -n "target updated after "
                                echo "$wait sec (got $result)"
                                break
                        fi
@@ -7257,6 +7658,8 @@ destroy_pool_int() {
        for ost in $OSTS; do
                do_facet mgs lctl pool_remove $1 $ost
        done
+       wait_update_facet $SINGLEMDS "lctl pool_list $1 | wc -l" "1" ||
+               error "MDS: pool_list $1 failed"
        do_facet mgs lctl pool_destroy $1
 }
 
@@ -7580,6 +7983,8 @@ flvr_cnt_cli2ost()
     local clients=${CLIENTS:-$HOSTNAME}
 
     for c in ${clients//,/ }; do
+       # reconnect if idle
+       do_node $c lctl set_param osc.*.idle_connect=1 >/dev/null 2>&1
        local output=$(do_node $c lctl get_param -n \
                 osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null)
        local tmpcnt=$(count_flvr "$output" $flavor)
@@ -7635,12 +8040,12 @@ flvr_cnt_mdt2ost()
         mdtosc=$(get_mdtosc_proc_path mds$num)
         mdtosc=${mdtosc/-MDT*/-MDT\*}
        local output=$(do_facet mds$num lctl get_param -n \
-                osc.$mdtosc.$PROC_CLI 2>/dev/null)
+                      os[cp].$mdtosc.$PROC_CLI 2>/dev/null)
        local tmpcnt=$(count_flvr "$output" $flavor)
        if $GSS_SK && [ $flavor != "null" ]; then
                # tmpcnt=min(contexts,flavors) to ensure SK context is on
                output=$(do_facet mds$num lctl get_param -n \
-                        osc.$mdtosc.$PROC_CON 2>/dev/null)
+                        os[cp].$mdtosc.$PROC_CON 2>/dev/null)
                local outcon=$(count_contexts "$output")
                if [ "$outcon" -lt "$tmpcnt" ]; then
                        tmpcnt=$outcon
@@ -8064,13 +8469,15 @@ get_obd_size() {
 
 #
 # Get the page size (bytes) on a given facet node.
+# The local client page_size is directly available in PAGE_SIZE.
 #
 get_page_size() {
        local facet=$1
-       local size=$(getconf PAGE_SIZE 2>/dev/null)
+       local page_size=$(getconf PAGE_SIZE 2>/dev/null)
 
-       [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
-       echo -n ${size:-4096}
+       [ -z "$CLIENTONLY" -a "$facet" != "client" ] &&
+               page_size=$(do_facet $facet getconf PAGE_SIZE)
+       echo -n ${page_size:-4096}
 }
 
 #
@@ -8101,7 +8508,7 @@ get_block_size() {
 # ldiskfs xattrs over one block in size.  Allow both the historical
 # Lustre feature name (large_xattr) and the upstream name (ea_inode).
 large_xattr_enabled() {
-       [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 0
+       [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 1
 
        local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
 
@@ -8112,21 +8519,7 @@ large_xattr_enabled() {
 
 # Get the maximum xattr size supported by the filesystem.
 max_xattr_size() {
-    local size
-
-    if large_xattr_enabled; then
-        # include/linux/limits.h: #define XATTR_SIZE_MAX 65536
-        size=65536
-    else
-        local mds_dev=$(mdsdevname ${SINGLEMDS//mds/})
-        local block_size=$(get_block_size $SINGLEMDS $mds_dev)
-
-        # maximum xattr size = size of block - size of header -
-        #                      size of 1 entry - 4 null bytes
-        size=$((block_size - 32 - 32 - 4))
-    fi
-
-    echo $size
+       $LCTL get_param -n llite.*.max_easize
 }
 
 # Dump the value of the named xattr from a file.
@@ -8265,14 +8658,14 @@ generate_logname() {
 test_mkdir() {
        local path
        local p_option
-       local stripe_count=2
-       local stripe_index=-1
+       local dirstripe_count=${DIRSTRIPE_COUNT:-"2"}
+       local dirstripe_index=${DIRSTRIPE_INDEX:-$((base % $MDSCOUNT))}
        local OPTIND=1
 
        while getopts "c:i:p" opt; do
                case $opt in
-                       c) stripe_count=$OPTARG;;
-                       i) stripe_index=$OPTARG;;
+                       c) dirstripe_count=$OPTARG;;
+                       i) dirstripe_index=$OPTARG;;
                        p) p_option="-p";;
                        \?) error "only support -i -c -p";;
                esac
@@ -8295,17 +8688,25 @@ test_mkdir() {
        if [ $MDSCOUNT -le 1 ]; then
                mkdir $path || error "mkdir '$path' failed"
        else
-               local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g')
                local mdt_index
 
-               if [ $stripe_index -eq -1 ]; then
-                       mdt_index=$((test_num % MDSCOUNT))
+               if [ $dirstripe_index -eq -1 ]; then
+                       mdt_index=$((base % MDSCOUNT))
+               else
+                       mdt_index=$dirstripe_index
+               fi
+
+               if (($MDS1_VERSION >= $(version_code 2.8.0))); then
+                       if [ $dirstripe_count -eq -1 ]; then
+                               dirstripe_count=$((RANDOM % MDSCOUNT + 1))
+                       fi
                else
-                       mdt_index=$stripe_index
+                       dirstripe_count=1
                fi
-               echo "striped dir -i$mdt_index -c$stripe_count $path"
-               $LFS mkdir -i$mdt_index -c$stripe_count $path ||
-                       error "mkdir -i $mdt_index -c$stripe_count $path failed"
+
+               echo "striped dir -i$mdt_index -c$dirstripe_count $path"
+               $LFS mkdir -i$mdt_index -c$dirstripe_count $path ||
+                       error "mkdir -i $mdt_index -c$dirstripe_count $path failed"
        fi
 }
 
@@ -8359,7 +8760,7 @@ check_file_in_pool()
        local file=$1
        local pool=$2
        local tlist="$3"
-       local res=$($GETSTRIPE $file | grep 0x | cut -f2)
+       local res=$($LFS getstripe $file | grep 0x | cut -f2)
        for i in $res
        do
                for t in $tlist ; do
@@ -8391,7 +8792,11 @@ pool_add_targets() {
        local last=$3
        local step=${4:-1}
 
-       local list=$(seq $first $step $last)
+       if [ -z $last ]; then
+               local list=$first
+       else
+               local list=$(seq $first $step $last)
+       fi
 
        local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done)
        do_facet mgs $LCTL pool_add \
@@ -8427,7 +8832,7 @@ pool_set_dir() {
        local tdir=$2
        echo "Setting pool on directory $tdir"
 
-       $SETSTRIPE -c 2 -p $pool $tdir && return 0
+       $LFS setstripe -c 2 -p $pool $tdir && return 0
 
        error_noexit "Cannot set pool $pool to $tdir"
        return 1
@@ -8438,7 +8843,7 @@ pool_check_dir() {
        local tdir=$2
        echo "Checking pool on directory $tdir"
 
-       local res=$($GETSTRIPE --pool $tdir | sed "s/\s*$//")
+       local res=$($LFS getstripe --pool $tdir | sed "s/\s*$//")
        [ "$res" = "$pool" ] && return 0
 
        error_noexit "Pool on '$tdir' is '$res', not '$pool'"
@@ -8493,7 +8898,7 @@ pool_create_files() {
        for i in $(seq -w 1 $count)
        do
                local file=$tdir/spoo-$i
-               $SETSTRIPE -p $pool $file
+               $LFS setstripe -p $pool $file
                check_file_in_pool $file $pool "$tlist" || \
                        failed=$((failed + 1))
        done
@@ -8527,11 +8932,11 @@ pool_file_rel_path() {
        mkdir -p $tdir ||
                { error_noexit "unable to create $tdir"; return 1 ; }
        local file="/..$tdir/$tfile-1"
-       $SETSTRIPE -p $pool $file ||
+       $LFS setstripe -p $pool $file ||
                { error_noexit "unable to create $file" ; return 2 ; }
 
        cd $tdir
-       $SETSTRIPE -p $pool $tfile-2 || {
+       $LFS setstripe -p $pool $tfile-2 || {
                error_noexit "unable to create $tfile-2 in $tdir"
                return 3
        }
@@ -8590,7 +8995,7 @@ pool_remove_all_targets() {
                return 2
        }
        # setstripe on an empty pool should fail
-       $SETSTRIPE -p $pool $file 2>/dev/null && {
+       $LFS setstripe -p $pool $file 2>/dev/null && {
                error_noexit "expected failure when creating file" \
                                                        "with empty pool"
                return 3
@@ -8613,7 +9018,7 @@ pool_remove() {
                return 1
        }
        # setstripe on an empty pool should fail
-       $SETSTRIPE -p $pool $file 2>/dev/null && {
+       $LFS setstripe -p $pool $file 2>/dev/null && {
                error_noexit "expected failure when creating file" \
                                                        "with missing pool"
                return 2
@@ -8640,15 +9045,15 @@ check_stripe_count() {
        [[ -z "$file" || -z "$expected" ]] &&
                error "check_stripe_count: invalid argument"
 
-       local cmd="$GETSTRIPE -c $file"
+       local cmd="$LFS getstripe -c $file"
        actual=$($cmd) || error "$cmd failed"
        actual=${actual%% *}
 
        if [[ $actual -ne $expected ]]; then
-               [[ $expected -eq -1 ]] ||
-                       error "$cmd wrong: found $actual, expected $expected"
-               [[ $actual -eq $OSTCOUNT ]] ||
-                       error "$cmd wrong: found $actual, expected $OSTCOUNT"
+               [[ $expected -eq -1 ]] || { $LFS getstripe $file;
+                       error "$cmd not expected ($expected): found $actual"; }
+               [[ $actual -eq $OSTCOUNT ]] || { $LFS getstripe $file;
+                       error "$cmd not OST count ($OSTCOUNT): found $actual"; }
        fi
 }
 
@@ -8662,7 +9067,7 @@ check_obdidx() {
        [[ -z "$file" || -z "$expected" ]] &&
                error "check_obdidx: invalid argument!"
 
-       obdidx=$(comma_list $($GETSTRIPE $file | grep -A $OSTCOUNT obdidx |
+       obdidx=$(comma_list $($LFS getstripe $file | grep -A $OSTCOUNT obdidx |
                              grep -v obdidx | awk '{print $1}' | xargs))
 
        [[ $obdidx = $expected ]] ||
@@ -8680,8 +9085,8 @@ check_start_ost_idx() {
        [[ -z "$file" || -z "$expected" ]] &&
                error "check_start_ost_idx: invalid argument!"
 
-       start_ost_idx=$($GETSTRIPE $file | grep -A 1 obdidx | grep -v obdidx |
-                       awk '{print $1}')
+       start_ost_idx=$($LFS getstripe $file | grep -A 1 obdidx |
+                        grep -v obdidx | awk '{print $1}')
 
        [[ $start_ost_idx = $expected ]] ||
                error "OST index of the first stripe on $file is" \
@@ -9312,3 +9717,593 @@ save_layout_restore_at_exit() {
 
        stack_trap "restore_layout $dir $layout" EXIT
 }
+
+verify_yaml_layout() {
+       local src=$1
+       local dst=$2
+       local temp=$3
+       local msg_prefix=$4
+
+       echo "getstripe --yaml $src"
+       $LFS getstripe --yaml $src > $temp || error "getstripe $src failed"
+       echo "setstripe --yaml=$temp $dst"
+       $LFS setstripe --yaml=$temp $dst|| error "setstripe $dst failed"
+
+       echo "compare"
+       local layout1=$(get_layout_param $src)
+       local layout2=$(get_layout_param $dst)
+       # compare their layout info
+       [ "$layout1" == "$layout2" ] ||
+               error "$msg_prefix $src/$dst layouts are not equal"
+}
+
+is_project_quota_supported() {
+       $ENABLE_PROJECT_QUOTAS || return 1
+
+       [[ "$(facet_fstype $SINGLEMDS)" == "ldiskfs" &&
+          $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.9.55) ]] &&
+               do_facet mds1 lfs --help |& grep -q project && return 0
+
+       [[ "$(facet_fstype $SINGLEMDS)" == "zfs" &&
+          $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.10.53) ]] &&
+               do_facet mds1 $ZPOOL get all | grep -q project_quota && return 0
+
+       return 1
+}
+
+# ZFS project quota enable/disable:
+#   This  feature  will  become  active as soon as it is enabled and will never
+#   return to being disabled. Each filesystem will be upgraded automatically
+#   when remounted or when [a] new file is created under that filesystem. The
+#   upgrade can also be triggered on filesystems via `zfs set version=current
+#   <pool/fs>`. The upgrade process runs in the background and may take a
+#   while to complete for the filesystems containing a large number of files.
+enable_project_quota() {
+       is_project_quota_supported || return 0
+       local zkeeper=${KEEP_ZPOOL}
+       stack_trap "KEEP_ZPOOL=$zkeeper" EXIT
+       KEEP_ZPOOL="true"
+       stopall || error "failed to stopall (1)"
+
+       local zfeat_en="feature@project_quota=enabled"
+       for facet in $(seq -f mds%g $MDSCOUNT) $(seq -f ost%g $OSTCOUNT); do
+               local facet_fstype=${facet:0:3}1_FSTYPE
+               local devname
+
+               if [ "${!facet_fstype}" = "zfs" ]; then
+                       devname=$(zpool_name ${facet})
+                       do_facet ${facet} $ZPOOL set "$zfeat_en" $devname ||
+                               error "$ZPOOL set $zfeat_en $devname"
+               else
+                       [ ${facet:0:3} == "mds" ] &&
+                               devname=$(mdsdevname ${facet:3}) ||
+                               devname=$(ostdevname ${facet:3})
+                       do_facet ${facet} $TUNE2FS -O project $devname ||
+                               error "tune2fs $devname failed"
+               fi
+       done
+
+       KEEP_ZPOOL="${zkeeper}"
+       mount
+       setupall
+}
+
+disable_project_quota() {
+       is_project_quota_supported || return 0
+       [ "$mds1_FSTYPE" != "ldiskfs" ] && return 0
+       stopall || error "failed to stopall (1)"
+
+       for num in $(seq $MDSCOUNT); do
+               do_facet mds$num $TUNE2FS -Q ^prj $(mdsdevname $num) ||
+                       error "tune2fs $(mdsdevname $num) failed"
+       done
+
+       for num in $(seq $OSTCOUNT); do
+               do_facet ost$num $TUNE2FS -Q ^prj $(ostdevname $num) ||
+                       error "tune2fs $(ostdevname $num) failed"
+       done
+
+       mount
+       setupall
+}
+
+#
+# In order to test multiple remote HSM agents, a new facet type named "AGT" and
+# the following associated variables are added:
+#
+# AGTCOUNT: number of agents
+# AGTDEV{N}: target HSM mount point (root path of the backend)
+# agt{N}_HOST: hostname of the agent agt{N}
+# SINGLEAGT: facet of the single agent
+#
+# The number of agents is initialized as the number of remote client nodes.
+# By default, only single copytool is started on a remote client/agent. If there
+# was no remote client, then the copytool will be started on the local client.
+#
+init_agt_vars() {
+       local n
+       local agent
+
+       export AGTCOUNT=${AGTCOUNT:-$((CLIENTCOUNT - 1))}
+       [[ $AGTCOUNT -gt 0 ]] || AGTCOUNT=1
+
+       export SHARED_DIRECTORY=${SHARED_DIRECTORY:-$TMP}
+       if [[ $CLIENTCOUNT -gt 1 ]] &&
+               ! check_shared_dir $SHARED_DIRECTORY $CLIENTS; then
+               skip_env "SHARED_DIRECTORY should be accessible"\
+                        "on all client nodes"
+               exit 0
+       fi
+
+       # We used to put the HSM archive in $SHARED_DIRECTORY but that
+       # meant NFS issues could hose sanity-hsm sessions. So now we
+       # use $TMP instead.
+       for n in $(seq $AGTCOUNT); do
+               eval export AGTDEV$n=\$\{AGTDEV$n:-"$TMP/arc$n"\}
+               agent=CLIENT$((n + 1))
+               if [[ -z "${!agent}" ]]; then
+                       [[ $CLIENTCOUNT -eq 1 ]] && agent=CLIENT1 ||
+                               agent=CLIENT2
+               fi
+               eval export agt${n}_HOST=\$\{agt${n}_HOST:-${!agent}\}
+               local var=agt${n}_HOST
+               [[ ! -z "${!var}" ]] || error "agt${n}_HOST is empty!"
+       done
+
+       export SINGLEAGT=${SINGLEAGT:-agt1}
+
+       export HSMTOOL=${HSMTOOL:-"lhsmtool_posix"}
+       export HSMTOOL_VERBOSE=${HSMTOOL_VERBOSE:-""}
+       export HSMTOOL_UPDATE_INTERVAL=${HSMTOOL_UPDATE_INTERVAL:=""}
+       export HSMTOOL_EVENT_FIFO=${HSMTOOL_EVENT_FIFO:=""}
+       export HSMTOOL_TESTDIR
+       export HSMTOOL_BASE=$(basename "$HSMTOOL" | cut -f1 -d" ")
+
+       HSM_ARCHIVE_NUMBER=2
+
+       # The test only support up to 10 MDTs
+       MDT_PREFIX="mdt.$FSNAME-MDT000"
+       HSM_PARAM="${MDT_PREFIX}0.hsm"
+
+       # archive is purged at copytool setup
+       HSM_ARCHIVE_PURGE=true
+
+       # Don't allow copytool error upon start/setup
+       HSMTOOL_NOERROR=false
+}
+
+# Get the backend root path for the given agent facet.
+copytool_device() {
+       local facet=$1
+       local dev=AGTDEV$(facet_number $facet)
+
+       echo -n ${!dev}
+}
+
+get_mdt_devices() {
+       local mdtno
+       # get MDT device for each mdc
+       for mdtno in $(seq 1 $MDSCOUNT); do
+               local idx=$(($mdtno - 1))
+               MDT[$idx]=$($LCTL get_param -n \
+                       mdc.$FSNAME-MDT000${idx}-mdc-*.mds_server_uuid |
+                       awk '{gsub(/_UUID/,""); print $1}' | head -n1)
+       done
+}
+
+search_copytools() {
+       local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+       do_nodesv $hosts "pgrep -x $HSMTOOL_BASE"
+}
+
+kill_copytools() {
+       local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+
+       echo "Killing existing copytools on $hosts"
+       do_nodesv $hosts "killall -q $HSMTOOL_BASE" || true
+}
+
+wait_copytools() {
+       local hosts=${1:-$(facet_active_host $SINGLEAGT)}
+       local wait_timeout=200
+       local wait_start=$SECONDS
+       local wait_end=$((wait_start + wait_timeout))
+       local sleep_time=100000 # 0.1 second
+
+       while ((SECONDS < wait_end)); do
+               if ! search_copytools $hosts; then
+                       echo "copytools stopped in $((SECONDS - wait_start))s"
+                       return 0
+               fi
+
+               echo "copytools still running on $hosts"
+               usleep $sleep_time
+               [ $sleep_time -lt 32000000 ] && # 3.2 seconds
+                       sleep_time=$(bc <<< "$sleep_time * 2")
+       done
+
+       # try to dump Copytool's stack
+       do_nodesv $hosts "echo 1 >/proc/sys/kernel/sysrq ; " \
+                        "echo t >/proc/sysrq-trigger"
+
+       echo "copytools failed to stop in ${wait_timeout}s"
+
+       return 1
+}
+
+copytool_monitor_cleanup() {
+       local facet=${1:-$SINGLEAGT}
+       local agent=$(facet_active_host $facet)
+
+       if [ -n "$HSMTOOL_MONITOR_DIR" ]; then
+               # Should die when the copytool dies, but just in case.
+               local cmd="kill \\\$(cat $HSMTOOL_MONITOR_DIR/monitor_pid)"
+               cmd+=" 2>/dev/null || true"
+               do_node $agent "$cmd"
+               do_node $agent "rm -fr $HSMTOOL_MONITOR_DIR"
+               export HSMTOOL_MONITOR_DIR=
+       fi
+
+       # The pdsh should die on its own when the monitor dies. Just
+       # in case, though, try to clean up to avoid any cruft.
+       if [ -n "$HSMTOOL_MONITOR_PDSH" ]; then
+               kill $HSMTOOL_MONITOR_PDSH 2>/dev/null || true
+               export HSMTOOL_MONITOR_PDSH=
+       fi
+}
+
+copytool_logfile()
+{
+       local host="$(facet_host "$1")"
+       local prefix=$TESTLOG_PREFIX
+       [ -n "$TESTNAME" ] && prefix+=.$TESTNAME
+
+       printf "${prefix}.copytool${archive_id}_log.${host}.log"
+}
+
+__lhsmtool_rebind()
+{
+       do_facet $facet $HSMTOOL -p "$hsm_root" --rebind "$@" "$mountpoint"
+}
+
+__lhsmtool_import()
+{
+       mkdir -p "$(dirname "$2")" ||
+               error "cannot create directory '$(dirname "$2")'"
+       do_facet $facet $HSMTOOL -p "$hsm_root" --import "$@" "$mountpoint"
+}
+
+__lhsmtool_setup()
+{
+       local cmd="$HSMTOOL $HSMTOOL_VERBOSE --daemon --hsm-root \"$hsm_root\""
+       [ -n "$bandwidth" ] && cmd+=" --bandwidth $bandwidth"
+       [ -n "$archive_id" ] && cmd+=" --archive $archive_id"
+       [ ${#misc_options[@]} -gt 0 ] &&
+               cmd+=" $(IFS=" " echo "$@")"
+       cmd+=" \"$mountpoint\""
+
+       echo "Starting copytool $facet on $(facet_host $facet)"
+       stack_trap "do_facet $facet libtool execute pkill -x '$HSMTOOL' || true" EXIT
+       do_facet $facet "$cmd < /dev/null > \"$(copytool_logfile $facet)\" 2>&1"
+}
+
+hsm_root() {
+       local facet="${1:-$SINGLEAGT}"
+
+       printf "$(copytool_device "$facet")/${TESTSUITE}.${TESTNAME}/"
+}
+
+# Main entry point to perform copytool related operations
+#
+# Sub-commands:
+#
+#      setup   setup a copytool to run in the background, that copytool will be
+#              killed on EXIT
+#      import  import a file from an HSM backend
+#      rebind  rebind an archived file to a new fid
+#
+# Although the semantics might suggest otherwise, one does not need to 'setup'
+# a copytool before a call to 'copytool import' or 'copytool rebind'.
+#
+copytool()
+{
+       local action=$1
+       shift
+
+       # Use default values
+       local facet=$SINGLEAGT
+       local mountpoint="${MOUNT2:-$MOUNT}"
+       local hsm_root="${hsm_root:-$(hsm_root "$facet")}"
+
+       # Parse arguments
+       local fail_on_error=true
+       local -a misc_options
+       while [ $# -gt 0 ]; do
+               case "$1" in
+               -f|--facet)
+                       shift
+                       facet="$1"
+                       ;;
+               -m|--mountpoint)
+                       shift
+                       mountpoint="$1"
+                       ;;
+               -a|--archive-id)
+                       shift
+                       local archive_id="$1"
+                       ;;
+               -h|--hsm-root)
+                       shift
+                       hsm_root="$1"
+                       ;;
+               -b|--bwlimit)
+                       shift
+                       local bandwidth="$1" # in MB/s
+                       ;;
+               -n|--no-fail)
+                       local fail_on_error=false
+                       ;;
+               *)
+                       # Uncommon(/copytool dependent) option
+                       misc_options+=("$1")
+                       ;;
+               esac
+               shift
+       done
+
+       stack_trap "do_facet $facet rm -rf '$hsm_root'" EXIT
+       do_facet $facet mkdir -p "$hsm_root" ||
+               error "mkdir '$hsm_root' failed"
+
+       case "$HSMTOOL" in
+       lhsmtool_posix)
+               local copytool=lhsmtool
+               ;;
+       esac
+
+       __${copytool}_${action} "${misc_options[@]}"
+       if [ $? -ne 0 ]; then
+               local error_msg
+
+               case $action in
+               setup)
+                       local host="$(facet_host $facet)"
+                       error_msg="Failed to start copytool $facet on '$host'"
+                       ;;
+               import)
+                       local src="${misc_options[0]}"
+                       local dest="${misc_options[1]}"
+                       error_msg="Failed to import '$src' to '$dest'"
+                       ;;
+               rebind)
+                       error_msg="could not rebind file"
+                       ;;
+               esac
+
+               $fail_on_error && error "$error_msg" || echo "$error_msg"
+       fi
+}
+
+needclients() {
+       local client_count=$1
+       if [[ $CLIENTCOUNT -lt $client_count ]]; then
+               skip "Need $client_count or more clients, have $CLIENTCOUNT"
+               return 1
+       fi
+       return 0
+}
+
+path2fid() {
+       $LFS path2fid $1 | tr -d '[]'
+       return ${PIPESTATUS[0]}
+}
+
+get_hsm_flags() {
+       local f=$1
+       local u=$2
+       local st
+
+       if [[ $u == "user" ]]; then
+               st=$($RUNAS $LFS hsm_state $f)
+       else
+               u=root
+               st=$($LFS hsm_state $f)
+       fi
+
+       [[ $? == 0 ]] || error "$LFS hsm_state $f failed (run as $u)"
+
+       st=$(echo $st | cut -f 2 -d" " | tr -d "()," )
+       echo $st
+}
+
+check_hsm_flags() {
+       local f=$1
+       local fl=$2
+
+       local st=$(get_hsm_flags $f)
+       [[ $st == $fl ]] || error "hsm flags on $f are $st != $fl"
+}
+
+mdts_set_param() {
+       local arg=$1
+       local key=$2
+       local value=$3
+       local mdtno
+       local rc=0
+       if [[ "$value" != "" ]]; then
+               value="=$value"
+       fi
+       for mdtno in $(seq 1 $MDSCOUNT); do
+               local idx=$(($mdtno - 1))
+               local facet=mds${mdtno}
+               # if $arg include -P option, run 1 set_param per MDT on the MGS
+               # else, run set_param on each MDT
+               [[ $arg = *"-P"* ]] && facet=mgs
+               do_facet $facet $LCTL set_param $arg mdt.${MDT[$idx]}.$key$value
+               [[ $? != 0 ]] && rc=1
+       done
+       return $rc
+}
+
+wait_result() {
+       local facet=$1
+       shift
+       wait_update --verbose $(facet_active_host $facet) "$@"
+}
+
+mdts_check_param() {
+       local key="$1"
+       local target="$2"
+       local timeout="$3"
+       local mdtno
+       for mdtno in $(seq 1 $MDSCOUNT); do
+               local idx=$(($mdtno - 1))
+               wait_result mds${mdtno} \
+                       "$LCTL get_param -n $MDT_PREFIX${idx}.$key" "$target" \
+                       $timeout ||
+                       error "$key state is not '$target' on mds${mdtno}"
+       done
+}
+
+cdt_set_mount_state() {
+       mdts_set_param "-P" hsm_control "$1"
+       # set_param -P is asynchronous operation and could race with set_param.
+       # In such case configs could be retrieved and applied at mgc after
+       # set_param -P completion. Sleep here to avoid race with set_param.
+       # We need at least 20 seconds. 10 for mgc_requeue_thread to wake up
+       # MGC_TIMEOUT_MIN_SECONDS + MGC_TIMEOUT_RAND_CENTISEC(5 + 5)
+       # and 10 seconds to retrieve config from server.
+       sleep 20
+}
+
+cdt_check_state() {
+       mdts_check_param hsm_control "$1" 20
+}
+
+cdt_set_sanity_policy() {
+       if [[ "$CDT_POLICY_HAD_CHANGED" ]]
+       then
+               # clear all
+               mdts_set_param "" hsm.policy "+NRA"
+               mdts_set_param "" hsm.policy "-NBR"
+               CDT_POLICY_HAD_CHANGED=
+       fi
+}
+
+set_hsm_param() {
+       local param=$1
+       local value=$2
+       local opt=$3
+       mdts_set_param "$opt -n" "hsm.$param" "$value"
+       return $?
+}
+
+wait_request_state() {
+       local fid=$1
+       local request=$2
+       local state=$3
+       # 4th arg (mdt index) is optional
+       local mdtidx=${4:-0}
+       local mds=mds$(($mdtidx + 1))
+
+       local cmd="$LCTL get_param -n ${MDT_PREFIX}${mdtidx}.hsm.actions"
+       cmd+=" | awk '/'$fid'.*action='$request'/ {print \\\$13}' | cut -f2 -d="
+
+       wait_result $mds "$cmd" "$state" 200 ||
+               error "request on $fid is not $state on $mds"
+}
+
+
+rmultiop_start() {
+       local client=$1
+       local file=$2
+       local cmds=$3
+       local WAIT_MAX=${4:-60}
+       local wait_time=0
+
+       # We need to run do_node in bg, because pdsh does not exit
+       # if child process of run script exists.
+       # I.e. pdsh does not exit when runmultiop_bg_pause exited,
+       # because of multiop_bg_pause -> $MULTIOP_PROG &
+       # By the same reason we need sleep a bit after do_nodes starts
+       # to let runmultiop_bg_pause start muliop and
+       # update /tmp/multiop_bg.pid ;
+       # The rm /tmp/multiop_bg.pid guarantees here that
+       # we have the updated by runmultiop_bg_pause
+       # /tmp/multiop_bg.pid file
+
+       local pid_file=$TMP/multiop_bg.pid.$$
+
+       do_node $client "MULTIOP_PID_FILE=$pid_file LUSTRE= \
+                       runmultiop_bg_pause $file $cmds" &
+       local pid=$!
+       local multiop_pid
+
+       while [[ $wait_time -lt $WAIT_MAX ]]; do
+               sleep 3
+               wait_time=$((wait_time + 3))
+               multiop_pid=$(do_node $client cat $pid_file)
+               if [ -n "$multiop_pid" ]; then
+                       break
+               fi
+       done
+
+       [ -n "$multiop_pid" ] ||
+               error "$client : Can not get multiop_pid from $pid_file "
+
+       eval export $(node_var_name $client)_multiop_pid=$multiop_pid
+       eval export $(node_var_name $client)_do_node_pid=$pid
+       local var=$(node_var_name $client)_multiop_pid
+       echo client $client multiop_bg started multiop_pid=${!var}
+       return $?
+}
+
+rmultiop_stop() {
+       local client=$1
+       local multiop_pid=$(node_var_name $client)_multiop_pid
+       local do_node_pid=$(node_var_name $client)_do_node_pid
+
+       echo "Stopping multiop_pid=${!multiop_pid} (kill ${!multiop_pid} on $client)"
+       do_node $client kill -USR1 ${!multiop_pid}
+
+       wait ${!do_node_pid}
+}
+
+sleep_maxage() {
+       local delay=$(do_facet $SINGLEMDS lctl get_param -n lo[vd].*.qos_maxage |
+                     awk '{ print $1 * 2; exit; }')
+       sleep $delay
+}
+
+check_component_count() {
+       local comp_cnt=$($LFS getstripe --component-count $1)
+       [ $comp_cnt -eq $2 ] || error "$1, component count $comp_cnt != $2"
+}
+
+# Verify there are no init components with "extension" flag
+verify_no_init_extension() {
+       local flg_opts="--component-flags init,extension"
+       local found=$($LFS find $flg_opts $1 | wc -l)
+       [ $found -eq 0 ] || error "$1 has component with initialized extension"
+}
+
+# Verify there is at least one component starting at 0
+verify_comp_at_zero() {
+       flg_opts="--component-flags init"
+       found=$($LFS find --component-start 0M $flg_opts $1 | wc -l)
+       [ $found -eq 1 ] ||
+               error "No component starting at zero(!)"
+}
+
+# version after which Self-Extending Layouts are available
+SEL_VER="2.12.55"
+
+sel_layout_sanity() {
+       local file=$1
+       local comp_cnt=$2
+
+       verify_no_init_extension $file
+       verify_comp_at_zero $file
+       check_component_count $file $comp_cnt
+}
+