Whamcloud - gitweb
LU-10626 test: create custom udev rule
[fs/lustre-release.git] / lustre / tests / test-framework.sh
index 935708b..9caa3f7 100755 (executable)
@@ -22,6 +22,7 @@ export SK_UNIQUE_NM=${SK_UNIQUE_NM:-false}
 export SK_S2S=${SK_S2S:-false}
 export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
 export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
+export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
 export IDENTITY_UPCALL=default
 export QUOTA_AUTO=1
 export FLAKEY=${FLAKEY:-true}
@@ -156,6 +157,7 @@ init_test_env() {
        export DO_CLEANUP=${DO_CLEANUP:-true}
        export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
        export CLEANUP_DM_DEV=false
+       export PAGE_SIZE=$(get_page_size client)
 
        export MKE2FS=$MKE2FS
        if [ -z "$MKE2FS" ]; then
@@ -274,6 +276,8 @@ init_test_env() {
        SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"}
        GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"}
 
+       export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
+
        export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
        if [ ! -f "$L_GETIDENTITY" ]; then
                if `which l_getidentity > /dev/null 2>&1`; then
@@ -606,6 +610,15 @@ load_modules_local() {
                return 0
        fi
 
+       # Create special udev test rules on every node
+       if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
+               sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
+       } else {
+               echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
+       } fi
+       udevadm control --reload-rules
+       udevadm trigger
+
        echo Loading modules from $LUSTRE
 
        local ncpus
@@ -751,12 +764,20 @@ unload_modules() {
 
        $LUSTRE_RMMOD ldiskfs || return 2
 
+       [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
+               rm /etc/udev/rules.d/99-lustre-test.rules
+       udevadm control --reload-rules
+       udevadm trigger
+
        if $LOAD_MODULES_REMOTE; then
                local list=$(comma_list $(remote_nodes_list))
                if [ -n "$list" ]; then
                        echo "unloading modules on: '$list'"
                        do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
                        do_rpc_nodes "$list" check_mem_leak
+                       do_rpc_nodes "$list" "rm /etc/udev/rules.d/99-lustre-test.rules"
+                       do_rpc_nodes "$list" "udevadm control --reload-rules"
+                       do_rpc_nodes "$list" "udevadm trigger"
                fi
        fi
 
@@ -785,7 +806,7 @@ fs_log_size() {
                          ;;
        esac
 
-       echo -n $size
+       echo -n $((size * MDSCOUNT))
 }
 
 fs_inode_ksize() {
@@ -941,7 +962,11 @@ init_gss() {
                # security ctx config for keyring
                SK_NO_KEY=false
                mkdir -p $SK_OM_PATH
-               mount -o bind $SK_OM_PATH /etc/request-key.d/
+               if grep -q request-key /proc/mounts > /dev/null; then
+                       echo "SSK: Request key already mounted."
+               else
+                       mount -o bind $SK_OM_PATH /etc/request-key.d/
+               fi
                local lgssc_conf_line='create lgssc * * '
                lgssc_conf_line+=$(which lgss_keyring)
                lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
@@ -1017,6 +1042,8 @@ init_gss() {
                                -m $SK_PATH/$FSNAME-nmclient.key \
                                 >/dev/null 2>&1"
                fi
+       fi
+       if $GSS_SK; then
                # mount options for servers and clients
                MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
                MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
@@ -1050,9 +1077,12 @@ cleanup_sk() {
                $RPC_MODE || echo "Cleaning up Shared Key.."
                do_nodes $(comma_list $(all_nodes)) "rm -f \
                        $SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
+               do_nodes $(comma_list $(all_nodes)) "keyctl show | \
+                 awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
                # Remove the mount and clean up the files we added to SK_PATH
-               do_nodes $(comma_list $(all_nodes)) "umount \
-                       /etc/request-key.d/"
+               do_nodes $(comma_list $(all_nodes)) "while grep -q \
+                       request-key.d /proc/mounts; do umount \
+                       /etc/request-key.d/; done"
                do_nodes $(comma_list $(all_nodes)) "rm -f \
                        $SK_OM_PATH/lgssc.conf"
                do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
@@ -2022,46 +2052,6 @@ stop() {
        fi
 }
 
-# save quota version (both administrative and operational quotas)
-# add an additional parameter if mountpoint is ever different from $MOUNT
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-#     it should be removed whenever we drop the interoperability for such
-#     server.
-quota_save_version() {
-    local fsname=${2:-$FSNAME}
-    local spec=$1
-    local ver=$(tr -c -d "123" <<< $spec)
-    local type=$(tr -c -d "ug" <<< $spec)
-
-    [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
-
-    [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
-
-    do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec"
-    local varsvc
-    local osts=$(get_facets OST)
-    for ost in ${osts//,/ }; do
-        varsvc=${ost}_svc
-        do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
-    done
-}
-
-# client could mount several lustre
-#
-# XXX This function is kept for interoperability with old server (< 2.3.50),
-#     it should be removed whenever we drop the interoperability for such
-#     server.
-quota_type() {
-       local fsname=${1:-$FSNAME}
-       local rc=0
-       do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type ||
-               rc=$?
-       do_nodes $(comma_list $(osts_nodes)) \
-               lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
-       return $rc
-}
-
 # get mdt quota type
 mdt_quota_type() {
        local varsvc=${SINGLEMDS}_svc
@@ -2080,12 +2070,24 @@ ost_quota_type() {
 # restore old quota type settings
 restore_quota() {
        if [ "$old_MDT_QUOTA_TYPE" ]; then
-               do_facet mgs $LCTL conf_param \
-                       $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+               if [[ $PERM_CMD = *"set_param -P"* ]]; then
+                       do_facet mgs $PERM_CMD \
+                               osd-*.$FSNAME-MDT*.quota_slave.enable = \
+                               $old_MDT_QUOTA_TYPE
+               else
+                       do_facet mgs $PERM_CMD \
+                               $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
+               fi
        fi
        if [ "$old_OST_QUOTA_TYPE" ]; then
-               do_facet mgs $LCTL conf_param \
-                       $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+               if [[ $PERM_CMD = *"set_param -P"* ]]; then
+                       do_facet mgs $PERM_CMD \
+                               osd-*.$FSNAME-OST*.quota_slave.enable = \
+                               $old_OST_QUOTA_TYPE
+               else
+                       do_facet mgs $LCTL conf_param \
+                               $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
+               fi
        fi
 }
 
@@ -2138,10 +2140,17 @@ setup_quota(){
        export old_MDT_QUOTA_TYPE=$mdt_qtype
        export old_OST_QUOTA_TYPE=$ost_qtype
 
-       do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE ||
-               error "set mdt quota type failed"
-       do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE ||
-               error "set ost quota type failed"
+       if [[ $PERM_CMD = *"set_param -P"* ]]; then
+               do_facet mgs $PERM_CMD \
+                       osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE
+               do_facet mgs $PERM_CMD \
+                       osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE
+       else
+               do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE ||
+                       error "set mdt quota type failed"
+               do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
+                       error "set ost quota type failed"
+       fi
 
        local quota_usrs=$QUOTA_USERS
 
@@ -4240,7 +4249,7 @@ mkfs_opts() {
                        # Check for wide striping
                        if [ $OSTCOUNT -gt 160 ]; then
                                MDSJOURNALSIZE=${MDSJOURNALSIZE:-4096}
-                               fs_mkfs_opts+="-O large_xattr"
+                               fs_mkfs_opts+="-O ea_inode"
                        fi
 
                        var=${facet}_JRN
@@ -4648,8 +4657,17 @@ setupall() {
                if $GSS_SK; then
                        set_rule $FSNAME any cli2mdt $SK_FLAVOR
                        set_rule $FSNAME any cli2ost $SK_FLAVOR
-                       wait_flavor cli2mdt $SK_FLAVOR
-                       wait_flavor cli2ost $SK_FLAVOR
+                       if $SK_SKIPFIRST; then
+                               export SK_SKIPFIRST=false
+
+                               sleep 30
+                               do_nodes $CLIENTS \
+                                        "lctl set_param osc.*.idle_connect=1"
+                               return
+                       else
+                               wait_flavor cli2mdt $SK_FLAVOR
+                               wait_flavor cli2ost $SK_FLAVOR
+                       fi
                else
                        set_flavor_all $SEC
                fi
@@ -4793,10 +4811,57 @@ set_conf_param_and_check() {
                error "check $PARAM failed!"
 }
 
+set_persistent_param() {
+       local myfacet=$1
+       local test_param=$2
+       local param=$3
+       local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+       if [ $# -gt 3 ]; then
+               local final=$4
+       else
+               local -i final
+               final=$((orig + 5))
+       fi
+
+       if [[ $PERM_CMD = *"set_param -P"* ]]; then
+               echo "Setting $test_param from $orig to $final"
+               do_facet mgs "$PERM_CMD $test_param='$final'" ||
+                       error "$PERM_CMD $test_param failed"
+       else
+               echo "Setting $param from $orig to $final"
+               do_facet mgs "$PERM_CMD $param='$final'" ||
+                       error "$PERM_CMD $param failed"
+       fi
+}
+
+set_persistent_param_and_check() {
+       local myfacet=$1
+       local test_param=$2
+       local param=$3
+       local orig=$(do_facet $myfacet "$LCTL get_param -n $test_param")
+
+       if [ $# -gt 3 ]; then
+               local final=$4
+       else
+               local -i final
+               final=$((orig + 5))
+       fi
+
+       set_persistent_param $myfacet $test_param $param "$final"
+
+       wait_update_facet $myfacet "$LCTL get_param -n $test_param" "$final" ||
+               error "check $param failed!"
+}
+
 init_param_vars () {
        TIMEOUT=$(lctl get_param -n timeout)
        TIMEOUT=${TIMEOUT:-20}
 
+       if [ -n $arg1 ]; then
+               [ "$arg1" = "server_only" ] && return
+       fi
+
        remote_mds_nodsh && log "Using TIMEOUT=$TIMEOUT" && return 0
 
        TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout")
@@ -4804,6 +4869,7 @@ init_param_vars () {
 
        osc_ensure_active $SINGLEMDS $TIMEOUT
        osc_ensure_active client $TIMEOUT
+       $LCTL set_param osc.*.idle_timeout=debug
 
        if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then
                local current_jobid_var=$($LCTL get_param -n jobid_var)
@@ -4811,11 +4877,10 @@ init_param_vars () {
                if [ $JOBID_VAR = "existing" ]; then
                        echo "keeping jobstats as $current_jobid_var"
                elif [ $current_jobid_var != $JOBID_VAR ]; then
-                       echo "seting jobstats to $JOBID_VAR"
+                       echo "setting jobstats to $JOBID_VAR"
 
-                       set_conf_param_and_check client                 \
-                               "$LCTL get_param -n jobid_var"          \
-                               "$FSNAME.sys.jobid_var" $JOBID_VAR
+                       set_persistent_param_and_check client \
+                               "jobid_var" "$FSNAME.sys.jobid_var" $JOBID_VAR
                fi
        else
                echo "jobstats not supported by server"
@@ -5013,7 +5078,6 @@ check_and_setup_lustre() {
                fi
        fi
 
-       init_gss
        if $GSS_SK; then
                set_flavor_all null
        elif $GSS; then
@@ -7136,6 +7200,7 @@ wait_osp_active() {
        # wait until all MDTs are in the expected state
        for ((num = 1; num <= $MDSCOUNT; num++)); do
                local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name})
+               local wait=0
                local mproc
 
                if [ $facet = "mds" ]; then
@@ -7150,7 +7215,6 @@ wait_osp_active() {
                        sleep 5
                        local result=$(do_facet mds${num} "$LCTL get_param -n $mproc")
                        local max=30
-                       local wait=0
 
                        [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc"
                        if [ $result -eq $expected ]; then
@@ -7579,6 +7643,8 @@ flvr_cnt_cli2ost()
     local clients=${CLIENTS:-$HOSTNAME}
 
     for c in ${clients//,/ }; do
+       # reconnect if idle
+       do_node $c lctl set_param osc.*.idle_connect=1 >/dev/null 2>&1
        local output=$(do_node $c lctl get_param -n \
                 osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null)
        local tmpcnt=$(count_flvr "$output" $flavor)
@@ -8063,13 +8129,15 @@ get_obd_size() {
 
 #
 # Get the page size (bytes) on a given facet node.
+# The local client page_size is directly available in PAGE_SIZE.
 #
 get_page_size() {
        local facet=$1
-       local size=$(getconf PAGE_SIZE 2>/dev/null)
+       local page_size=$(getconf PAGE_SIZE 2>/dev/null)
 
-       [ -z "$CLIENTONLY" ] && size=$(do_facet $facet getconf PAGE_SIZE)
-       echo -n ${size:-4096}
+       [ -z "$CLIENTONLY" -a "$facet" != "client" ] &&
+               page_size=$(do_facet $facet getconf PAGE_SIZE)
+       echo -n ${page_size:-4096}
 }
 
 #
@@ -8096,7 +8164,9 @@ get_block_size() {
        echo -n ${size:-0}
 }
 
-# Check whether the "large_xattr" feature is enabled or not.
+# Check whether the "ea_inode" feature is enabled or not, to allow
+# ldiskfs xattrs over one block in size.  Allow both the historical
+# Lustre feature name (large_xattr) and the upstream name (ea_inode).
 large_xattr_enabled() {
        [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 0