X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=c507425a361f107fc5cf260e85cc798029a82ea3;hp=f64020037b8a7999903f7fdddf2e1e259e2433d6;hb=608cce73d5149cb4d317ee189db94f0b6ade1ff5;hpb=3f8e14163a57ddf51047efc1c0a1b9c15631e2b4;ds=sidebyside diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index f640200..c507425 100755 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -428,6 +428,8 @@ init_test_env() { # Constants used in more than one test script export LOV_MAX_STRIPE_COUNT=2000 + export DELETE_OLD_POOLS=${DELETE_OLD_POOLS:-false} + export KEEP_POOLS=${KEEP_POOLS:-false} export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines} . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} @@ -536,6 +538,17 @@ module_loaded () { /sbin/lsmod | grep -q "^\<$1\>" } +check_lfs_df_ret_val() { + # Ignore only EOPNOTSUPP (which is 95; Operation not supported) error + # returned by 'lfs df' for valid dentry but not a lustrefs. + # + # 'lfs df' historically always returned success(0) instead of + # EOPNOTSUPP. This function for compatibility reason, ignores and + # masquerades EOPNOTSUPP as success. + [[ $1 -eq 95 ]] && return 0 + return $1 +} + PRLFS=false lustre_insmod() { local module=$1 @@ -680,9 +693,9 @@ load_modules_local() { # that obviously has nothing to do with this Lustre run # Disable automatic memory scanning to avoid perf hit. if [ -f /sys/kernel/debug/kmemleak ] ; then - echo scan=off > /sys/kernel/debug/kmemleak - echo scan > /sys/kernel/debug/kmemleak - echo clear > /sys/kernel/debug/kmemleak + echo scan=off > /sys/kernel/debug/kmemleak || true + echo scan > /sys/kernel/debug/kmemleak || true + echo clear > /sys/kernel/debug/kmemleak || true fi echo Loading modules from $LUSTRE @@ -2192,7 +2205,7 @@ restore_quota() { if [ "$old_MDT_QUOTA_TYPE" ]; then if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs $PERM_CMD \ - osd-*.$FSNAME-MDT*.quota_slave.enable = \ + osd-*.$FSNAME-MDT*.quota_slave.enabled = \ $old_MDT_QUOTA_TYPE else do_facet mgs $PERM_CMD \ @@ -2202,7 +2215,7 @@ restore_quota() { if [ "$old_OST_QUOTA_TYPE" ]; then if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs $PERM_CMD \ - osd-*.$FSNAME-OST*.quota_slave.enable = \ + osd-*.$FSNAME-OST*.quota_slave.enabled = \ $old_OST_QUOTA_TYPE else do_facet mgs $LCTL conf_param \ @@ -2216,6 +2229,7 @@ restore_quota() { # This will allow fixing the "lfs df" summary line in the future. lfs_df() { $LFS df $* | sed -e 's/filesystem /filesystem_/' + check_lfs_df_ret_val $? } # Get free inodes on the MDT specified by mdt index, free indoes on @@ -2263,9 +2277,9 @@ setup_quota(){ if [[ $PERM_CMD == *"set_param -P"* ]]; then do_facet mgs $PERM_CMD \ - osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE + osd-*.$FSNAME-MDT*.quota_slave.enabled=$QUOTA_TYPE do_facet mgs $PERM_CMD \ - osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE + osd-*.$FSNAME-OST*.quota_slave.enabled=$QUOTA_TYPE else do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE || error "set mdt quota type failed" @@ -2999,15 +3013,21 @@ stop_client_loads() { # If --verbose is passed as the first argument, the result is printed on each # value change, otherwise it is only printed after every 10s interval. # +# If --quiet is passed as the first/second argument, the do_node() command +# will not print the remote command before executing it each time. +# # Using wait_update_cond() or related helper function is preferable to adding # a "long enough" wait for some state to change in the background, since # "long enough" may be too short due to tunables, system config, or running in # a VM, and must by necessity wait too long for most cases or risk failure. # -# usage: wait_update_cond [--verbose] node check cond expect [max_wait] +# usage: wait_update_cond [--verbose] [--quiet] node check cond expect [max_wait] wait_update_cond() { - local verbose=false - [[ "$1" == "--verbose" ]] && verbose=true && shift + local verbose + local quiet + + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift local node=$1 local check="$2" @@ -3022,7 +3042,7 @@ wait_update_cond() { local print=10 while (( $waited <= $max_wait )); do - result=$(do_node $node "$check") + result=$(do_node $quiet $node "$check") eval [[ "'$result'" $cond "'$expect'" ]] if [[ $? == 0 ]]; then @@ -3030,7 +3050,7 @@ wait_update_cond() { echo "Updated after ${waited}s: want '$expect' got '$result'" return 0 fi - if $verbose && [[ "$result" != "$prev_result" ]]; then + if [[ -n "$verbose" && "$result" != "$prev_result" ]]; then [[ -n "$prev_result" ]] && echo "Changed after ${waited}s: from '$prev_result' to '$result'" prev_result="$result" @@ -3044,23 +3064,29 @@ wait_update_cond() { return 3 } -# usage: wait_update [--verbose] node check expect [max_wait] +# usage: wait_update [--verbose] [--quiet] node check expect [max_wait] wait_update() { - local verbose= - [ "$1" = "--verbose" ] && verbose="$1" && shift + local verbose + local quiet + + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift local node="$1" local check="$2" local expect="$3" local max_wait=$4 - wait_update_cond $verbose $node "$check" "==" "$expect" $max_wait + wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait } # usage: wait_update_facet_cond [--verbose] facet check cond expect [max_wait] wait_update_facet_cond() { - local verbose= - [ "$1" = "--verbose" ] && verbose="$1" && shift + local verbose + local quiet + + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift local node=$(facet_active_host $1) local check="$2" @@ -3068,20 +3094,23 @@ wait_update_facet_cond() { local expect="$4" local max_wait=$5 - wait_update_cond $verbose $node "$check" "$cond" "$expect" $max_wait + wait_update_cond $verbose $quiet $node "$check" "$cond" "$expect" $max_wait } # usage: wait_update_facet [--verbose] facet check expect [max_wait] wait_update_facet() { - local verbose= - [ "$1" = "--verbose" ] && verbose="$1" && shift + local verbose + local quiet + + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift local node=$(facet_active_host $1) local check="$2" local expect="$3" local max_wait=$4 - wait_update_cond $verbose $node "$check" "==" "$expect" $max_wait + wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait } sync_all_data() { @@ -3421,7 +3450,7 @@ wait_destroy_complete () { # MAX value shouldn't be big as this mean server responsiveness # never increase this just to make test pass but investigate # why it takes so long time - local MAX=5 + local MAX=${1:-5} local WAIT=0 local list=$(comma_list $(mdts_nodes)) while [ $WAIT -lt $MAX ]; do @@ -3505,12 +3534,20 @@ wait_remote_prog () { lfs_df_check() { local clients=${1:-$CLIENTS} + local rc if [ -z "$clients" ]; then - $LFS df $MOUNT + $LFS df $MOUNT > /dev/null + rc=$? else $PDSH $clients "$LFS df $MOUNT" > /dev/null + rc=$? fi + + check_lfs_df_ret_val $rc + rc=$? + + return $rc } clients_up() { @@ -4008,26 +4045,27 @@ change_active() { } do_node() { - local verbose=false - # do not stripe off hostname if verbose, bug 19215 - if [ x$1 = x--verbose ]; then - shift - verbose=true - fi + local verbose + local quiet - local HOST=$1 - shift - local myPDSH=$PDSH - if [ "$HOST" = "$HOSTNAME" ]; then - myPDSH="no_dsh" - elif [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" ]; then - echo "cannot run remote command on $HOST with $myPDSH" - return 128 - fi - if $VERBOSE; then - echo "CMD: $HOST $@" >&2 - $myPDSH $HOST "$LCTL mark \"$@\"" > /dev/null 2>&1 || : - fi + # do not strip off hostname if verbose, b=19215 + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift + + local HOST=$1 + shift + local myPDSH=$PDSH + + if [ "$HOST" = "$HOSTNAME" ]; then + myPDSH="no_dsh" + elif [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" ]; then + echo "cannot run remote command on $HOST with $myPDSH" + return 128 + fi + if $VERBOSE && [[ -z "$quiet" ]]; then + echo "CMD: $HOST $@" >&2 + $myPDSH $HOST "$LCTL mark \"$@\"" > /dev/null 2>&1 || : + fi if [[ "$myPDSH" == "rsh" ]] || [[ "$myPDSH" == *pdsh* && "$myPDSH" != *-S* ]]; then @@ -4044,7 +4082,7 @@ do_node() { return 0 fi - if $verbose ; then + if [[ -n "$verbose" ]]; then # print HOSTNAME for myPDSH="no_dsh" if [[ $myPDSH = no_dsh ]]; then $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /" @@ -4057,12 +4095,8 @@ do_node() { return ${PIPESTATUS[0]} } -do_nodev() { - do_node --verbose "$@" -} - single_local_node () { - [ "$1" = "$HOSTNAME" ] + [ "$1" = "$HOSTNAME" ] } # Outputs environment variable assignments that should be passed to remote nodes @@ -4108,45 +4142,42 @@ get_env_vars() { } do_nodes() { - local verbose=false - # do not stripe off hostname if verbose, bug 19215 - if [ x$1 = x--verbose ]; then - shift - verbose=true - fi + local verbose + local quiet - local rnodes=$1 - shift + # do not strip off hostname if verbose, b=19215 + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift - if single_local_node $rnodes; then - if $verbose; then - do_nodev $rnodes "$@" - else - do_node $rnodes "$@" - fi - return $? - fi + local rnodes=$1 + shift - # This is part from do_node - local myPDSH=$PDSH + if single_local_node $rnodes; then + do_node $verbose $quiet $rnodes "$@" + return $? + fi - [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" -o "$myPDSH" = "rsh" ] && \ - echo "cannot run remote command on $rnodes with $myPDSH" && return 128 + # This is part from do_node + local myPDSH=$PDSH - export FANOUT=$(get_node_count "${rnodes//,/ }") - if $VERBOSE; then - echo "CMD: $rnodes $@" >&2 - $myPDSH $rnodes "$LCTL mark \"$@\"" > /dev/null 2>&1 || : - fi + [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" -o "$myPDSH" = "rsh" ] && + echo "cannot run remote command on $rnodes with $myPDSH" && + return 128 - # do not replace anything from pdsh output if -N is used - # -N Disable hostname: prefix on lines of output. - if $verbose || [[ $myPDSH = *-N* ]]; then - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" - else - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" | sed -re "s/^[^:]*: //g" - fi - return ${PIPESTATUS[0]} + export FANOUT=$(get_node_count "${rnodes//,/ }") + if $VERBOSE && [[ -z "$quiet" ]]; then + echo "CMD: $rnodes $@" >&2 + $myPDSH $rnodes "$LCTL mark \"$@\"" > /dev/null 2>&1 || : + fi + + # do not replace anything from pdsh output if -N is used + # -N Disable hostname: prefix on lines of output. + if [[ -n "$verbose" || $myPDSH = *-N* ]]; then + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" + else + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" | sed -re "s/^[^:]*: //g" + fi + return ${PIPESTATUS[0]} } ## @@ -4157,11 +4188,18 @@ do_nodes() { # # usage: do_facet $facet command [arg ...] do_facet() { + local verbose + local quiet + + [[ "$1" == "--verbose" ]] && verbose="$1" && shift + [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift + local facet=$1 shift - local HOST=$(facet_active_host $facet) - [ -z $HOST ] && echo "No host defined for facet ${facet}" && exit 1 - do_node $HOST "$@" + local host=$(facet_active_host $facet) + + [ -z "$host" ] && echo "No host defined for facet ${facet}" && exit 1 + do_node $verbose $quiet $host "$@" } # Function: do_facet_random_file $FACET $FILE $SIZE @@ -4184,7 +4222,7 @@ do_facet_create_file() { } do_nodesv() { - do_nodes --verbose "$@" + do_nodes --verbose "$@" } add() { @@ -5268,6 +5306,11 @@ init_param_vars () { TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") log "Using TIMEOUT=$TIMEOUT" + # tune down to speed up testing on (usually) small setups + local mgc_timeout=/sys/module/mgc/parameters/mgc_requeue_timeout_min + do_nodes $(comma_list $(nodes_list)) \ + "[ -f $mgc_timeout ] && echo 1 > $mgc_timeout; exit 0" + osc_ensure_active $SINGLEMDS $TIMEOUT osc_ensure_active client $TIMEOUT $LCTL set_param osc.*.idle_timeout=debug @@ -5387,6 +5430,105 @@ is_mounted () { echo $mounted' ' | grep -w -q $mntpt' ' } +create_pools () { + local pool=$1 + local ostsn=${2:-$OSTCOUNT} + local npools=${FS_NPOOLS:-$((OSTCOUNT / ostsn))} + local n + + echo ostsn=$ostsn npools=$npools + if [[ $ostsn -gt $OSTCOUNT ]]; then + echo "request to use $ostsn OSTs in the pool, \ + using max available OSTCOUNT=$OSTCOUNT" + ostsn=$OSTCOUNT + fi + for (( n=0; n < $npools; n++ )); do + p=${pool}$n + if ! $DELETE_OLD_POOLS; then + log "request to not delete old pools: $FSNAME.$p exist?" + if ! check_pool_not_exist $FSNAME.$p; then + echo "Using existing $FSNAME.$p" + $LCTL pool_list $FSNAME.$p + continue + fi + fi + create_pool $FSNAME.$p $KEEP_POOLS || + error "create_pool $FSNAME.$p failed" + + local first=$(( (n * ostsn) % OSTCOUNT )) + local last=$(( (first + ostsn - 1) % OSTCOUNT )) + if [[ $first -le $last ]]; then + pool_add_targets $p $first $last || + error "pool_add_targets $p $first $last failed" + else + pool_add_targets $p $first $(( OSTCOUNT - 1 )) || + error "pool_add_targets $p $first \ + $(( OSTCOUNT - 1 )) failed" + pool_add_targets $p 0 $last || + error "pool_add_targets $p 0 $last failed" + fi + done +} + +set_pools_quota () { + local u + local o + local p + local i + local j + + [[ $ENABLE_QUOTA ]] || error "Required Pool Quotas: \ + $POOLS_QUOTA_USERS_SET, but ENABLE_QUOTA not set!" + + # POOLS_QUOTA_USERS_SET= + # "quota15_1:20M -- for all of the found pools + # quota15_2:1G:gpool0 + # quota15_3 -- for global limit only + # quota15_4:200M:gpool0 + # quota15_4:200M:gpool1" + + declare -a pq_userset=(${POOLS_QUOTA_USERS_SET="mpiuser"}) + declare -a pq_users + declare -A pq_limits + + for ((i=0; i<${#pq_userset[@]}; i++)); do + u=${pq_userset[i]%%:*} + o="" + # user gets no pool limits if + # POOLS_QUOTA_USERS_SET does not specify it + [[ ${pq_userset[i]} =~ : ]] && o=${pq_userset[i]##$u:} + pq_limits[$u]+=" $o" + done + pq_users=(${!pq_limits[@]}) + + declare -a opts + local pool + + for ((i=0; i<${#pq_users[@]}; i++)); do + u=${pq_users[i]} + # set to max limit (_u64) + $LFS setquota -u $u -B $((2**24 - 1))T $DIR + opts=(${pq_limits[$u]}) + for ((j=0; j<${#opts[@]}; j++)); do + p=${opts[j]##*:} + o=${opts[j]%%:*} + # Set limit for all existing pools if + # no pool specified + if [ $p == $o ]; then + p=$(list_pool $FSNAME | sed "s/$FSNAME.//") + echo "No pool specified for $u, + set limit $o for all existing pools" + fi + for pool in $p; do + $LFS setquota -u $u -B $o --pool $pool $DIR || + error "setquota -u $u -B $o \ + --pool $pool failed" + done + done + $LFS quota -uv $u --pool $DIR + done +} + check_and_setup_lustre() { sanitize_parameters nfs_client_mode && return @@ -5472,6 +5614,16 @@ check_and_setup_lustre() { set_flavor_all $SEC fi + if $DELETE_OLD_POOLS; then + destroy_all_pools + fi + if [[ -n "$FS_POOL" ]]; then + create_pools $FS_POOL $FS_POOL_NOSTS + fi + + if [[ -n "$POOLS_QUOTA_USERS_SET" ]]; then + set_pools_quota + fi if [ "$ONLY" == "setup" ]; then exit 0 fi @@ -6416,10 +6568,10 @@ check_mds() { } reset_fail_loc () { - echo -n "Resetting fail_loc on all nodes..." - do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 \ - fail_val=0 2>/dev/null" || true - echo done. + #echo -n "Resetting fail_loc on all nodes..." + do_nodes --quiet $(comma_list $(nodes_list)) \ + "lctl set_param -n fail_loc=0 fail_val=0 2>/dev/null" || true + #echo done. } @@ -6428,7 +6580,8 @@ reset_fail_loc () { # Also appends a timestamp and prepends the testsuite name. # -EQUALS="====================================================================================================" +# ======================================================== 15:06:12 (1624050372) +EQUALS="========================================================" banner() { msg="== ${TESTSUITE} $*" last=${msg: -1:1} @@ -6444,7 +6597,7 @@ check_dmesg_for_errors() { ldiskfs_check_descriptors: Checksum for group 0 failed\|\ group descriptors corrupted" - res=$(do_nodes $(comma_list $(nodes_list)) "dmesg" | grep "$errors") + res=$(do_nodes -q $(comma_list $(nodes_list)) "dmesg" | grep "$errors") [ -z "$res" ] && return 0 echo "Kernel error detected: $res" return 1 @@ -7210,15 +7363,18 @@ restore_lustre_params() { check_node_health() { local nodes=${1:-$(comma_list $(nodes_list))} - - for node in ${nodes//,/ }; do - check_network "$node" 5 - if [ $? -eq 0 ]; then - do_node $node "$LCTL get_param catastrophe 2>&1" | - grep -q "catastrophe=1" && - error "$node:LBUG/LASSERT detected" || true - fi - done + local health=$TMP/node_health.$$ + + do_nodes -q $nodes "$LCTL get_param catastrophe 2>&1" | tee $health | + grep "catastrophe=1" && error "LBUG/LASSERT detected" + # Only check/report network health if get_param isn't reported, since + # *clearly* the network is working if get_param returned something. + if (( $(grep -c catastro $health) != $(wc -w <<< ${nodes//,/ }) )); then + for node in ${nodes//,/}; do + check_network $node 5 + done + fi + rm -f $health } mdsrate_cleanup () { @@ -7648,6 +7804,7 @@ check_pool_not_exist() { create_pool() { local fsname=${1%%.*} local poolname=${1##$fsname.} + local keep_pools=${2:-false} stack_trap "destroy_test_pools $fsname" EXIT do_facet mgs lctl pool_new $1 @@ -7666,7 +7823,7 @@ create_pool() { wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ 2>/dev/null || echo foo" "" || error "pool_new failed $1" - add_pool_to_list $1 + $keep_pools || add_pool_to_list $1 return $RC } @@ -7684,10 +7841,18 @@ remove_pool_from_list () { local poolname=${1##$fsname.} local listvar=${fsname}_CREATED_POOLS - local temp=${listvar}=$(exclude_items_from_list ${!listvar} $poolname) + local temp=${listvar}=$(exclude_items_from_list "${!listvar}" $poolname) eval export $temp } +# cleanup all pools exist on $FSNAME +destroy_all_pools () { + local i + for i in $(list_pool $FSNAME); do + destroy_pool $i + done +} + destroy_pool_int() { local ost local OSTS=$(list_pool $1) @@ -7708,8 +7873,7 @@ destroy_pool() { local RC - check_pool_not_exist $fsname.$poolname - [[ $? -eq 0 ]] && return 0 + check_pool_not_exist $fsname.$poolname && return 0 || true destroy_pool_int $fsname.$poolname RC=$? @@ -8699,7 +8863,7 @@ check_mount_and_prep() is_mounted $MOUNT || setupall rm -rf $DIR/[df][0-9]* || error "Fail to cleanup the env!" - mkdir $DIR/$tdir || error "Fail to mkdir $DIR/$tdir." + mkdir_on_mdt0 $DIR/$tdir || error "Fail to mkdir $DIR/$tdir." for idx in $(seq $MDSCOUNT); do local name="MDT$(printf '%04x' $((idx - 1)))" rm -rf $MOUNT/.lustre/lost+found/$name/* @@ -9383,8 +9547,8 @@ changelog_register() { error "$mdt: changelog_mask=+hsm failed: $?" local cl_user - cl_user=$(do_facet $facet \ - $LCTL --device $mdt changelog_register -n) || + cl_user=$(do_facet $facet $LCTL --device $mdt \ + changelog_register -n $@) || error "$mdt: register changelog user failed: $?" stack_trap "__changelog_deregister $facet $cl_user" EXIT @@ -9471,6 +9635,7 @@ changelog_clear() { # so reorder to get same order than in changelog_register() local cl_facets=$(echo "${!CL_USERS[@]}" | tr " " "\n" | sort | tr "\n" " ") + local cl_user for facet in $cl_facets; do for cl_user in ${CL_USERS[$facet]}; do @@ -10397,3 +10562,102 @@ function restore_opencache() [[ -z "${saved_OPENCACHE_value}" ]] || $LCTL set_param -n "llite.*.opencache_threshold_count"=${saved_OPENCACHE_value} } + +# LU-13417: XXX lots of tests assume the directory to be created under MDT0, +# created on MDT0, use this function to create directory on specific MDT +# explicitly, and set default LMV to create subdirs on the same MDT too. +mkdir_on_mdt() { + local mdt + local OPTIND=1 + + while getopts "i:" opt $*; do + case $opt in + i) mdt=$OPTARG;; + esac + done + + shift $((OPTIND - 1)) + + $LFS mkdir -i $mdt -c 1 $* + # setting default LMV in non-DNE system will cause sanity-quota 41 fail + ((MDSCOUNT < 2)) || $LFS setdirstripe -D -i $mdt -c 1 $* +} + +mkdir_on_mdt0() { + mkdir_on_mdt -i0 $* +} + +# Wait for nodemap synchronization +wait_nm_sync() { + local nodemap_name=$1 + local key=$2 + local value=$3 + local opt=$4 + local proc_param + local is_active=$(do_facet mgs $LCTL get_param -n nodemap.active) + local max_retries=20 + local is_sync + local out1="" + local out2 + local mgs_ip=$(host_nids_address $mgs_HOST $NETTYPE | cut -d' ' -f1) + local i + + if [ "$nodemap_name" == "active" ]; then + proc_param="active" + elif [ -z "$key" ]; then + proc_param=${nodemap_name} + else + proc_param="${nodemap_name}.${key}" + fi + if [ "$opt" == "inactive" ]; then + # check nm sync even if nodemap is not activated + is_active=1 + opt="" + fi + (( is_active == 0 )) && [ "$proc_param" != "active" ] && return + + if [ -z "$value" ]; then + out1=$(do_facet mgs $LCTL get_param $opt \ + nodemap.${proc_param} 2>/dev/null) + echo "On MGS ${mgs_ip}, ${proc_param} = $out1" + else + out1=$value; + fi + + # if servers run on the same node, it is impossible to tell if they get + # synced with the mgs, so just wait an arbitrary 10 seconds + if [ $(facet_active_host mgs) == $(facet_active_host mds) ] && + [ $(facet_active_host mgs) == $(facet_active_host ost1) ]; then + echo "waiting 10 secs for sync" + sleep 10 + return + fi + + # wait up to 10 seconds for other servers to sync with mgs + for i in $(seq 1 10); do + for node in $(all_server_nodes); do + local node_ip=$(host_nids_address $node $NETTYPE | + cut -d' ' -f1) + + is_sync=true + if [ -z "$value" ]; then + [ $node_ip == $mgs_ip ] && continue + fi + + out2=$(do_node $node_ip $LCTL get_param $opt \ + nodemap.$proc_param 2>/dev/null) + echo "On $node ${node_ip}, ${proc_param} = $out2" + [ "$out1" != "$out2" ] && is_sync=false && break + done + $is_sync && break + sleep 1 + done + if ! $is_sync; then + echo MGS + echo $out1 + echo OTHER - IP: $node_ip + echo $out2 + error "mgs and $nodemap_name ${key} mismatch, $i attempts" + fi + echo "waited $((i - 1)) seconds for sync" +}