X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=919d90f7dd14dee3b999f81cea8d58029f5f5522;hb=f78c9f5c45c52b0e5d6046a9dbe2a27ee62d9594;hp=b7cb0a3185450f8418b82a8fda7c2599790346fb;hpb=bde576b277acce2f35ff8e238b277781574eba9e;p=fs%2Flustre-release.git diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index b7cb0a3..919d90f 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -15,20 +15,23 @@ export GSS=false export GSS_KRB5=false export GSS_PIPEFS=false export IDENTITY_UPCALL=default +export QUOTA_AUTO=1 #export PDSH="pdsh -S -Rssh -w" -# eg, assert_env LUSTRE MDSNODES OSTNODES CLIENTS -assert_env() { - local failed="" - for name in $@; do - if [ -z "${!name}" ]; then - echo "$0: $name must be set" - failed=1 - fi - done - [ $failed ] && exit 1 || true -} +# function used by scripts run on remote nodes +LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} +. $LUSTRE/tests/functions.sh + +LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg} + +EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh} + +if [ -f "$EXCEPT_LIST_FILE" ]; then + echo "Reading test skip list from $EXCEPT_LIST_FILE" + cat $EXCEPT_LIST_FILE + . $EXCEPT_LIST_FILE +fi assert_DIR () { local failed="" @@ -50,6 +53,7 @@ usage() { } print_summary () { + trap 0 [ "$TESTSUITE" == "lfscktest" ] && return 0 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to ${ONLY}." local form="%-13s %-17s %s\n" @@ -71,8 +75,16 @@ print_summary () { done for O in $TESTSUITE_LIST; do - [ "${!O}" = "no" ] && \ - printf "$form" "Skipped" "$O" "" + if [ "${!O}" = "no" ]; then + # FIXME. + # only for those tests suits which are run directly from acc-sm script: + # bonnie, iozone, etc. + if [ -f "$TESTSUITELOG" ] && grep FAIL $TESTSUITELOG | grep -q ' '$O ; then + printf "$form" "UNFINISHED" "$O" "" + else + printf "$form" "Skipped" "$O" "" + fi + fi done for O in $TESTSUITE_LIST; do @@ -85,6 +97,7 @@ init_test_env() { export LUSTRE=`absolute_path $LUSTRE` export TESTSUITE=`basename $0 .sh` export TEST_FAILED=false + export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} export MKE2FS=${MKE2FS:-mke2fs} export DEBUGFS=${DEBUGFS:-debugfs} @@ -106,11 +119,14 @@ init_test_env() { if ! echo $PATH | grep -q $LUSTRE/tests; then export PATH=$PATH:$LUSTRE/tests fi - export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mdsrate"} + export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"} [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) - if ! echo $PATH | grep -q $LUSTRE/test/racer; then + if ! echo $PATH | grep -q $LUSTRE/tests/racer; then export PATH=$PATH:$LUSTRE/tests/racer fi + if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then + export PATH=$PATH:$LUSTRE/tests/mpi + fi export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} [ ! -f "$LCTL" ] && export LCTL=$(which lctl) export LFS=${LFS:-"$LUSTRE/utils/lfs"} @@ -128,13 +144,15 @@ init_test_env() { export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre) export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " + export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod} + [ ! -f "$LUSTRE_RMMOD" ] && export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null) export FSTYPE=${FSTYPE:-"ldiskfs"} export NAME=${NAME:-local} export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \ export LGSSD=$(which lgssd) export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"} - [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd) + [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null) export KRB5DIR=${KRB5DIR:-"/usr/kerberos"} export DIR2 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests} @@ -162,11 +180,16 @@ init_test_env() { IDENTITY_UPCALL=false ;; esac + export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} # Paths on remote nodes, if different export RLUSTRE=${RLUSTRE:-$LUSTRE} export RPWD=${RPWD:-$PWD} export I_MOUNTED=${I_MOUNTED:-"no"} + if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mds.ko -a \ + ! -f `dirname $0`/../mds/mds.ko ]; then + export CLIENTMODSONLY=yes + fi # command line @@ -185,7 +208,6 @@ init_test_env() { [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true rm -f $TMP/*active - } case `uname -r` in @@ -193,13 +215,20 @@ case `uname -r` in *) EXT=".ko"; USE_QUOTA=yes;; esac + +module_loaded () { + /sbin/lsmod | grep -q $1 +} + load_module() { EXT=".ko" module=$1 shift BASE=`basename $module $EXT` - lsmod | grep -q ${BASE} || \ - if [ -f ${LUSTRE}/${module}${EXT} ]; then + + module_loaded ${BASE} && return + + if [ -f ${LUSTRE}/${module}${EXT} ]; then insmod ${LUSTRE}/${module}${EXT} $@ else # must be testing a "make install" or "rpm" installation @@ -212,7 +241,7 @@ load_module() { fi } -load_modules() { +load_modules_local() { if [ -n "$MODPROBE" ]; then # use modprobe return 0 @@ -225,8 +254,8 @@ load_modules() { echo Loading modules from $LUSTRE load_module ../libcfs/libcfs/libcfs - [ "$PTLDEBUG" ] && lctl set_param debug=$PTLDEBUG - [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug=${SUBSYSTEM# } + [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG" + [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }" local MODPROBECONF= [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre @@ -250,8 +279,9 @@ load_modules() { load_module osc/osc load_module lov/lov load_module mgc/mgc - if [ -z "$CLIENTONLY" ] && [ -z "$CLIENTMODSONLY" ]; then + if ! client_only; then grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; } + grep -q jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; } [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs load_module mgs/mgs load_module mds/mds @@ -275,44 +305,16 @@ load_modules() { [ -f $LUSTRE/utils/mount.lustre ] && cp $LUSTRE/utils/mount.lustre /sbin/. || true } -RMMOD=rmmod -if [ `uname -r | cut -c 3` -eq 4 ]; then - RMMOD="modprobe -r" -fi - -wait_for_lnet() { - local UNLOADED=0 - local WAIT=0 - local MAX=60 - MODULES=$($LCTL modules | awk '{ print $2 }') - while [ -n "$MODULES" ]; do - sleep 5 - $RMMOD $MODULES > /dev/null 2>&1 || true - MODULES=$($LCTL modules | awk '{ print $2 }') - if [ -z "$MODULES" ]; then - return 0 - else - WAIT=$((WAIT + 5)) - echo "waiting, $((MAX - WAIT)) secs left" - fi - if [ $WAIT -eq $MAX ]; then - echo "LNET modules $MODULES will not unload" - lsmod - return 3 - fi - done -} - -unload_dep_module() { - #lsmod output - #libcfs 107852 17 llite_lloop,lustre,obdfilter,ost,... - local MODULE=$1 - local DEPS=$(lsmod | awk '($1 == "'$MODULE'") { print $4 }' | tr ',' ' ') - for SUBMOD in $DEPS; do - unload_dep_module $SUBMOD - done - [ "$MODULE" = "libcfs" ] && $LCTL dk $TMP/debug || true - $RMMOD $MODULE || true +load_modules () { + load_modules_local + # bug 19124 + # load modules on remote nodes optionally + # lustre-tests have to be installed on these nodes + if $LOAD_MODULES_REMOTE ; then + local list=$(comma_list $(remote_nodes_list)) + echo loading modules on $list + do_rpc_nodes $list load_modules + fi } check_mem_leak () { @@ -331,25 +333,15 @@ check_mem_leak () { unload_modules() { wait_exit_ST client # bug 12845 - lsmod | grep libcfs > /dev/null && $LCTL dl - unload_dep_module $FSTYPE - unload_dep_module libcfs - - local MODULES=$($LCTL modules | awk '{ print $2 }') - if [ -n "$MODULES" ]; then - echo "Modules still loaded: " - echo $MODULES - if [ "$(lctl dl)" ]; then - echo "Lustre still loaded" - lctl dl || true - lsmod - return 2 - else - echo "Lustre stopped but LNET is still loaded, waiting..." - wait_for_lnet || return 3 - fi + $LUSTRE_RMMOD $FSTYPE || return 2 + if $LOAD_MODULES_REMOTE ; then + local list=$(comma_list $(remote_nodes_list)) + echo unloading modules on $list + do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE + do_rpc_nodes $list check_mem_leak fi + HAVE_MODULES=false check_mem_leak || return 254 @@ -427,6 +419,10 @@ stop_gss_daemons() { init_gss() { if $GSS; then start_gss_daemons + + if [ -n "$LGSS_KEYRING_DEBUG" ]; then + echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level + fi fi } @@ -455,7 +451,7 @@ ostdevlabel() { mount_facet() { local facet=$1 shift - local dev=${facet}_dev + local dev=$(facet_active $facet)_dev local opt=${facet}_opt echo "Starting ${facet}: ${!opt} $@ ${!dev} ${MOUNT%/*}/${facet}" do_facet ${facet} mount -t lustre ${!opt} $@ ${!dev} ${MOUNT%/*}/${facet} @@ -464,8 +460,8 @@ mount_facet() { echo "mount -t lustre $@ ${!dev} ${MOUNT%/*}/${facet}" echo "Start of ${!dev} on ${facet} failed ${RC}" else - do_facet ${facet} "lctl set_param debug=$PTLDEBUG; \ - lctl set_param subsystem_debug=${SUBSYSTEM# }; \ + do_facet ${facet} "lctl set_param debug=\\\"$PTLDEBUG\\\"; \ + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; \ lctl set_param debug_mb=${DEBUG_SIZE}; \ sync" @@ -485,6 +481,14 @@ start() { shift eval export ${facet}_dev=${device} eval export ${facet}_opt=\"$@\" + + local varname=${facet}failover_dev + if [ -n "${!varname}" ] ; then + eval export ${facet}failover_dev=${!varname} + else + eval export ${facet}failover_dev=$device + fi + do_facet ${facet} mkdir -p ${MOUNT%/*}/${facet} mount_facet ${facet} RC=$? @@ -509,6 +513,91 @@ stop() { wait_exit_ST ${facet} } +# save quota version (both administrative and operational quotas) +# add an additional parameter if mountpoint is ever different from $MOUNT +quota_save_version() { + local fsname=${2:-$FSNAME} + local spec=$1 + local ver=$(tr -c -d "123" <<< $spec) + local type=$(tr -c -d "ug" <<< $spec) + + [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier" + + [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; } + + do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec" + local varsvc + local osts=$(get_facets OST) + for ost in ${osts//,/ }; do + varsvc=${ost}_svc + do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec" + done +} + +# client could mount several lustre +quota_type () { + local fsname=${1:-$FSNAME} + local rc=0 + do_facet mgs lctl get_param mdd.${fsname}-MDT*.quota_type || rc=$? + do_nodes $(comma_list $(osts_nodes)) \ + lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$? + return $rc +} + +restore_quota_type () { + local mntpt=${1:-$MOUNT} + local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2) + if [ ! "$old_QUOTA_TYPE" ] || [ "$quota_type" = "$old_QUOTA_TYPE" ]; then + return + fi + quota_save_version $old_QUOTA_TYPE +} + +setup_quota(){ + local mntpt=$1 + + # We need save the original quota_type params, and restore them after testing + + # Suppose that quota type the same on mds and ost + local quota_type=$(quota_type | grep MDT | cut -d "=" -f2) + [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!" + echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]" + if [ "$quota_type" != "$QUOTA_TYPE" ]; then + export old_QUOTA_TYPE=$quota_type + quota_save_version $QUOTA_TYPE + else + qtype=$(tr -c -d "ug" <<< $QUOTA_TYPE) + $LFS quotacheck -$qtype $mntpt || error "quotacheck has failed for $type" + fi + + local quota_usrs=$QUOTA_USERS + + # get_filesystem_size + local disksz=$(lfs df $mntpt | grep "filesystem summary:" | awk '{print $3}') + local blk_soft=$((disksz + 1024)) + local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over + + local Inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | awk '{print $3}') + local i_soft=$Inodes + local i_hard=$((i_soft + i_soft / 20)) + + echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit: + $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard" + + local cmd + for usr in $quota_usrs; do + echo "Setting up quota on $HOSTNAME:$mntpt for $usr..." + for type in u g; do + cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt" + echo "+ $cmd" + eval $cmd || error "$cmd FAILED!" + done + # display the quota status + echo "Quota settings for $usr : " + $LFS quota -v -u $usr $mntpt || true + done +} + zconf_mount() { local OPTIONS local client=$1 @@ -527,8 +616,8 @@ zconf_mount() { do_node $client mkdir -p $mnt do_node $client mount -t lustre $OPTIONS $device $mnt || return 1 - do_node $client "lctl set_param debug=$PTLDEBUG; - lctl set_param subsystem_debug=${SUBSYSTEM# }; + do_node $client "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; lctl set_param debug_mb=${DEBUG_SIZE}" return 0 @@ -561,16 +650,73 @@ zconf_umount() { fi } +# nodes is comma list +sanity_mount_check_nodes () { + local nodes=$1 + shift + local mnts="$@" + local mnt + + # FIXME: assume that all cluster nodes run the same os + [ "$(uname)" = Linux ] || return 0 + + local rc=0 + for mnt in $mnts ; do + do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts); +mpts=\\\$(mount | grep -w -c $mnt); +if [ \\\$running -ne \\\$mpts ]; then + echo \\\$(hostname) env are INSANE!; + exit 1; +fi" + [ $? -eq 0 ] || rc=1 + done + return $rc +} + +sanity_mount_check_servers () { + [ "$CLIENTONLY" ] && + { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true + echo Checking servers environments + + # FIXME: modify get_facets to display all facets wo params + local facets="$(get_facets OST),$(get_facets MDS),mgs" + local node + local mnt + local facet + for facet in ${facets//,/ }; do + node=$(facet_host ${facet}) + mnt=${MOUNT%/*}/${facet} + sanity_mount_check_nodes $node $mnt || + { error "server $node environments are insane!"; return 1; } + done +} + +sanity_mount_check_clients () { + local clients=${1:-$CLIENTS} + local mntpt=${2:-$MOUNT} + local mntpt2=${3:-$MOUNT2} + + [ -z $clients ] && clients=$(hostname) + echo Checking clients $clients environments + + sanity_mount_check_nodes $clients $mntpt $mntpt2 || + error "clients environments are insane!" +} + +sanity_mount_check () { + sanity_mount_check_servers || return 1 + sanity_mount_check_clients || return 2 +} + # mount clients if not mouted zconf_mount_clients() { - local OPTIONS local clients=$1 local mnt=$2 - + local OPTIONS=${3:-$MOUNTOPT} # Only supply -o to mount if we have options - if [ -n "$MOUNTOPT" ]; then - OPTIONS="-o $MOUNTOPT" + if [ "$OPTIONS" ]; then + OPTIONS="-o $OPTIONS" fi local device=$MGSNID:/$FSNAME if [ -z "$mnt" -o -z "$FSNAME" ]; then @@ -579,13 +725,22 @@ zconf_mount_clients() { fi echo "Starting client $clients: $OPTIONS $device $mnt" - do_nodes $clients "mount | grep $mnt || { mkdir -p $mnt && mount -t lustre $OPTIONS $device $mnt || false; }" + + do_nodes $clients " +running=\\\$(mount | grep -c $mnt' '); +rc=0; +if [ \\\$running -eq 0 ] ; then + mkdir -p $mnt; + mount -t lustre $OPTIONS $device $mnt; + rc=\\\$?; +fi; +exit \\\$rc" || return ${PIPESTATUS[0]} echo "Started clients $clients: " - do_nodes $clients "mount | grep $mnt" + do_nodes $clients "mount | grep -w $mnt" - do_nodes $clients "lctl set_param debug=$PTLDEBUG; - lctl set_param subsystem_debug=${SUBSYSTEM# }; + do_nodes $clients "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; lctl set_param debug_mb=${DEBUG_SIZE};" return 0 @@ -599,29 +754,54 @@ zconf_umount_clients() { [ "$3" ] && force=-f echo "Stopping clients: $clients $mnt (opts:$force)" - do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts) + do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts); if [ \\\$running -ne 0 ] ; then -echo Stopping client \\\$(hostname) client $mnt opts:$force -lsof -t $mnt || need_kill=no +echo Stopping client \\\$(hostname) $mnt opts:$force; +lsof -t $mnt || need_kill=no; if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then pids=\\\$(lsof -t $mnt | sort -u); - if [ -n \\\$pids ]; then - kill -9 \\\$pids + if [ -n \\\"\\\$pids\\\" ]; then + kill -9 \\\$pids; fi -fi -busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy") +fi; +busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy"); if [ \\\$busy -ne 0 ] ; then - echo "$mnt is still busy, wait one second" && sleep 1 - umount $force $mnt + echo "$mnt is still busy, wait one second" && sleep 1; + umount $force $mnt; fi fi" } +shudown_node_hard () { + local host=$1 + local attempts=3 + + for i in $(seq $attempts) ; do + $POWER_DOWN $host + sleep 1 + ping -w 3 -c 1 $host > /dev/null 2>&1 || return 0 + echo "waiting for $host to fail attempts=$attempts" + [ $i -lt $attempts ] || \ + { echo "$host still pingable after power down! attempts=$attempts" && return 1; } + done +} + +shutdown_client() { + local client=$1 + local mnt=${2:-$MOUNT} + local attempts=3 + + if [ "$FAILURE_MODE" = HARD ]; then + shudown_node_hard $client + else + zconf_umount_clients $client $mnt -f + fi +} + shutdown_facet() { local facet=$1 if [ "$FAILURE_MODE" = HARD ]; then - $POWER_DOWN `facet_active_host $facet` - sleep 2 + shudown_node_hard $(facet_active_host $facet) elif [ "$FAILURE_MODE" = SOFT ]; then stop $facet fi @@ -640,6 +820,7 @@ boot_node() { local node=$1 if [ "$FAILURE_MODE" = HARD ]; then $POWER_UP $node + wait_for_host $node fi } @@ -649,65 +830,96 @@ check_progs_installed () { shift local progs=$@ - do_nodes $clients "set -x ; PATH=:$PATH status=true; for prog in $progs; do - which \\\$prog || { echo \\\$prog missing on \\\$(hostname) && status=false; } - done; - eval \\\$status" + do_nodes $clients "PATH=:$PATH; status=true; +for prog in $progs; do + if ! [ \\\"\\\$(which \\\$prog)\\\" -o \\\"\\\${!prog}\\\" ]; then + echo \\\$prog missing on \\\$(hostname); + status=false; + fi +done; +eval \\\$status" } -start_client_load() { - local list=(${1//,/ }) - local nodenum=$2 +client_var_name() { + echo __$(echo $1 | tr '-' 'X') +} - local numloads=${#CLIENT_LOADS[@]} - local testnum=$((nodenum % numloads)) +start_client_load() { + local client=$1 + local load=$2 + local var=$(client_var_name $client)_load + eval export ${var}=$load - do_node ${list[nodenum]} "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ + do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ BREAK_ON_ERROR=$BREAK_ON_ERROR \ END_RUN_FILE=$END_RUN_FILE \ LOAD_PID_FILE=$LOAD_PID_FILE \ TESTSUITELOG=$TESTSUITELOG \ - run_${CLIENT_LOADS[testnum]}.sh" & + run_${load}.sh" & CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $!" - log "Started client load: ${CLIENT_LOADS[testnum]} on ${list[nodenum]}" + log "Started client load: ${load} on $client" - eval export ${list[nodenum]}_load=${CLIENT_LOADS[testnum]} return 0 } start_client_loads () { - local clients=(${1//,/ }) + local -a clients=(${1//,/ }) + local numloads=${#CLIENT_LOADS[@]} + local testnum - for ((num=0; num < ${#clients[@]}; num++ )); do - start_client_load $1 $num + for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do + testnum=$((nodenum % numloads)) + start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]} done } # only for remote client check_client_load () { local client=$1 - local var=${client}_load - + local var=$(client_var_name $client)_load local TESTLOAD=run_${!var}.sh ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1 - - check_catastrophe $client || return 2 - - # see if the load is still on the client + + # bug 18914: try to connect several times not only when + # check ps, but while check_catastrophe also local tries=3 local RC=254 while [ $RC = 254 -a $tries -gt 0 ]; do let tries=$tries-1 # assume success RC=0 + if ! check_catastrophe $client; then + RC=${PIPESTATUS[0]} + if [ $RC -eq 254 ]; then + # FIXME: not sure how long we shuold sleep here + sleep 10 + continue + fi + echo "check catastrophe failed: RC=$RC " + return $RC + fi + done + # We can continue try to connect if RC=254 + # Just print the warning about this + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking catastrophe on $client" + fi + + # see if the load is still on the client + tries=3 + RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then RC=${PIPESTATUS[0]} sleep 30 fi done if [ $RC = 254 ]; then - echo "got a return status of $RC from do_node while checking (i.e. with 'ps') the client load on the remote system" + echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client" # see if we can diagnose a bit why this is fi @@ -720,13 +932,40 @@ check_client_loads () { for client in $clients; do check_client_load $client - rc=$? + rc=${PIPESTATUS[0]} if [ "$rc" != 0 ]; then log "Client load failed on node $client, rc=$rc" return $rc fi done } + +restart_client_loads () { + local clients=${1//,/ } + local expectedfail=${2:-""} + local client= + local rc=0 + + for client in $clients; do + check_client_load $client + rc=${PIPESTATUS[0]} + if [ "$rc" != 0 -a "$expectedfail" ]; then + local var=$(client_var_name $client)_load + start_client_load $client ${!var} + echo "Restarted client load ${!var}: on $client. Checking ..." + check_client_load $client + rc=${PIPESTATUS[0]} + if [ "$rc" != 0 ]; then + log "Client load failed to restart on node $client, rc=$rc" + # failure one client load means test fail + # we do not need to check other + return $rc + fi + else + return $rc + fi + done +} # End recovery-scale functions # verify that lustre actually cleaned up properly @@ -745,7 +984,7 @@ cleanup_check() { [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && \ echo "$0: lustre didn't clean up..." 1>&2 && return 202 || true - if [ "`/sbin/lsmod 2>&1 | egrep 'lnet|libcfs'`" ]; then + if module_loaded lnet || module_loaded libcfs; then echo "$0: modules still loaded..." 1>&2 /sbin/lsmod 1>&2 return 203 @@ -753,6 +992,35 @@ cleanup_check() { return 0 } +wait_update () { + local node=$1 + local TEST=$2 + local FINAL=$3 + local MAX=${4:-90} + + local RESULT + local WAIT=0 + local sleep=5 + while [ true ]; do + RESULT=$(do_node $node "$TEST") + if [ "$RESULT" == "$FINAL" ]; then + echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'" + return 0 + fi + [ $WAIT -ge $MAX ] && break + echo "Waiting $((MAX - WAIT)) secs for update" + WAIT=$((WAIT + sleep)) + sleep $sleep + done + echo "Update not seen after $MAX sec: wanted '$FINAL' got '$RESULT'" + return 3 +} + +wait_update_facet () { + local facet=$1 + wait_update $(facet_active_host $facet) "$@" +} + wait_delete_completed () { local TOTALPREV=`lctl get_param -n osc.*.kbytesavail | \ awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` @@ -763,41 +1031,100 @@ wait_delete_completed () { sleep 1 TOTAL=`lctl get_param -n osc.*.kbytesavail | \ awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` - [ "$TOTAL" -eq "$TOTALPREV" ] && break + [ "$TOTAL" -eq "$TOTALPREV" ] && return 0 echo "Waiting delete completed ... prev: $TOTALPREV current: $TOTAL " TOTALPREV=$TOTAL WAIT=$(( WAIT + 1)) done - echo "Delete completed." + echo "Delete is not completed in $MAX_WAIT sec" + return 1 } wait_for_host() { - HOST=$1 - check_network "$HOST" 900 - while ! do_node $HOST "ls -d $LUSTRE " > /dev/null; do sleep 5; done + local host=$1 + check_network "$host" 900 + while ! do_node $host hostname > /dev/null; do sleep 5; done } wait_for() { - facet=$1 - HOST=`facet_active_host $facet` - wait_for_host $HOST + local facet=$1 + local host=`facet_active_host $facet` + wait_for_host $host } -wait_mds_recovery_done () { - local timeout=`do_facet $SINGLEMDS lctl get_param -n timeout` -#define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) -# as we are in process of changing obd_timeout in different ways -# let's set MAX longer than that - MAX=$(( timeout * 4 )) - WAIT=0 +wait_recovery_complete () { + local facet=$1 + + # Use default policy if $2 is not passed by caller. + #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) + # as we are in process of changing obd_timeout in different ways + # let's set MAX longer than that + local MAX=${2:-$(( TIMEOUT * 4 ))} + + local var_svc=${facet}_svc + local procfile="*.${!var_svc}.recovery_status" + local WAIT=0 + local STATUS= + while [ $WAIT -lt $MAX ]; do - STATUS=`do_facet $SINGLEMDS "lctl get_param -n mdt.*-MDT0000.recovery_status | grep status"` - echo $STATUS | grep COMPLETE && return 0 + STATUS=$(do_facet $facet lctl get_param -n $procfile | grep status) + [[ $STATUS = "status: COMPLETE" ]] && return 0 sleep 5 WAIT=$((WAIT + 5)) - echo "Waiting $(($MAX - $WAIT)) secs for MDS recovery done" + echo "Waiting $((MAX - WAIT)) secs for $facet recovery done. $STATUS" done - echo "MDS recovery not done in $MAX sec" + echo "$facet recovery not done in $MAX sec. $STATUS" + return 1 +} + +wait_mds_ost_sync () { + # just because recovery is done doesn't mean we've finished + # orphan cleanup. Wait for llogs to get synchronized. + echo "Waiting for orphan cleanup..." + # MAX value includes time needed for MDS-OST reconnection + local MAX=$(( TIMEOUT * 2 )) + local WAIT=0 + while [ $WAIT -lt $MAX ]; do + local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \ + "$LCTL get_param -n obdfilter.*.mds_sync")) + local con=1 + for ((i=0; i<${#sync[@]}; i++)); do + [ ${sync[$i]} -eq 0 ] && continue + # there is a not finished MDS-OST synchronization + con=0 + break; + done + sleep 2 # increase waiting time and cover statfs cache + [ ${con} -eq 1 ] && return 0 + echo "Waiting $WAIT secs for $facet mds-ost sync done." + WAIT=$((WAIT + 2)) + done + echo "$facet recovery not done in $MAX sec. $STATUS" + return 1 +} + +wait_destroy_complete () { + echo "Waiting for destroy to be done..." + # MAX value shouldn't be big as this mean server responsiveness + # never increase this just to make test pass but investigate + # why it takes so long time + local MAX=5 + local WAIT=0 + while [ $WAIT -lt $MAX ]; do + local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight)) + local con=1 + for ((i=0; i<${#RPCs[@]}; i++)); do + [ ${RPCs[$i]} -eq 0 ] && continue + # there are still some destroy RPCs in flight + con=0 + break; + done + sleep 1 + [ ${con} -eq 1 ] && return 0 # done waiting + echo "Waiting $WAIT secs for destroys to be done." + WAIT=$((WAIT + 1)) + done + echo "Destroys weren't done in $MAX sec." return 1 } @@ -841,6 +1168,7 @@ wait_remote_prog () { local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}') [ -z "$pids" ] && return 0 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids" + # FIXME: not portable for pid in $pids; do cat /proc/${pid}/status || true cat /proc/${pid}/wchan || true @@ -853,15 +1181,31 @@ wait_remote_prog () { return $rc } -client_df() { +clients_up() { # not every config has many clients + sleep 1 if [ ! -z "$CLIENTS" ]; then - $PDSH $CLIENTS "df $MOUNT" > /dev/null + $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null + else + stat -f $MOUNT > /dev/null + fi +} + +client_up() { + local client=$1 + # usually checked on particular client or locally + sleep 1 + if [ ! -z "$client" ]; then + $PDSH $client "stat -f $MOUNT" > /dev/null else - df $MOUNT > /dev/null + stat -f $MOUNT > /dev/null fi } +client_evicted() { + ! client_up $1 +} + client_reconnect() { uname -n >> $MOUNT/recon if [ -z "$CLIENTS" ]; then @@ -876,17 +1220,14 @@ client_reconnect() { } facet_failover() { - facet=$1 - sleep_time=$2 + local facet=$1 + local sleep_time=$2 echo "Failing $facet on node `facet_active_host $facet`" shutdown_facet $facet [ -n "$sleep_time" ] && sleep $sleep_time reboot_facet $facet - client_df & - DFPID=$! - echo "df pid is $DFPID" change_active $facet - TO=`facet_active_host $facet` + local TO=`facet_active_host $facet` echo "Failover $facet to $TO" wait_for $facet mount_facet $facet || error "Restart of $facet failed" @@ -901,8 +1242,8 @@ replay_barrier() { do_facet $facet sync df $MOUNT local svc=${facet}_svc - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -912,8 +1253,8 @@ replay_barrier_nodf() { do_facet $facet sync local svc=${facet}_svc echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -922,8 +1263,8 @@ replay_barrier_nosync() { local facet=$1 echo running=${running} local svc=${facet}_svc echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -940,7 +1281,7 @@ ost_evict_client() { fail() { facet_failover $* || error "failover: $?" - df $MOUNT || error "post-failover df: $?" + clients_up || error "post-failover df: $?" } fail_nodf() { @@ -953,9 +1294,8 @@ fail_abort() { stop $facet change_active $facet mount_facet $facet -o abort_recovery - df $MOUNT || echo "first df failed: $?" - sleep 1 - df $MOUNT || error "post-failover df: $?" + clients_up || echo "first df failed: $?" + clients_up || error "post-failover df: $?" } do_lmc() { @@ -1056,22 +1396,29 @@ facet_active_host() { change_active() { local facet=$1 - failover=${facet}failover + local failover=${facet}failover host=`facet_host $failover` [ -z "$host" ] && return - curactive=`facet_active $facet` + local curactive=`facet_active $facet` if [ -z "${curactive}" -o "$curactive" == "$failover" ] ; then eval export ${facet}active=$facet else eval export ${facet}active=$failover fi # save the active host for this facet - activevar=${facet}active + local activevar=${facet}active echo "$activevar=${!activevar}" > $TMP/$activevar } do_node() { - HOST=$1 + local verbose=false + # do not stripe off hostname if verbose, bug 19215 + if [ x$1 = x--verbose ]; then + shift + verbose=true + fi + + local HOST=$1 shift local myPDSH=$PDSH if [ "$HOST" = "$HOSTNAME" ]; then @@ -1095,7 +1442,17 @@ do_node() { [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true return 0 fi - $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //" + + if $verbose ; then + # print HOSTNAME for myPDSH="no_dsh" + if [[ $myPDSH = no_dsh ]]; then + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /" + else + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + fi + else + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //" + fi return ${PIPESTATUS[0]} } @@ -1104,11 +1461,22 @@ single_local_node () { } do_nodes() { + local verbose=false + # do not stripe off hostname if verbose, bug 19215 + if [ x$1 = x--verbose ]; then + shift + verbose=true + fi + local rnodes=$1 shift if $(single_local_node $rnodes); then - do_node $rnodes $@ + if $verbose; then + do_node --verbose $rnodes $@ + else + do_node $rnodes $@ + fi return $? fi @@ -1123,14 +1491,18 @@ do_nodes() { $myPDSH $rnodes $LCTL mark "$@" > /dev/null 2>&1 || : fi - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g" + if $verbose ; then + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + else + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g" + fi return ${PIPESTATUS[0]} } do_facet() { - facet=$1 + local facet=$1 shift - HOST=`facet_active_host $facet` + local HOST=`facet_active_host $facet` [ -z $HOST ] && echo No host defined for facet ${facet} && exit 1 do_node $HOST "$@" } @@ -1171,14 +1543,11 @@ stopall() { fail mds1 fi - # assume client mount is local - grep " $MOUNT " /proc/mounts && zconf_umount $HOSTNAME $MOUNT $* - grep " $MOUNT2 " /proc/mounts && zconf_umount $HOSTNAME $MOUNT2 $* + local clients=$CLIENTS + [ -z $clients ] && clients=$(hostname) - if [ -n "$CLIENTS" ]; then - zconf_umount_clients $CLIENTS $MOUNT "$*" || true - [ -n "$MOUNT2" ] && zconf_umount_clients $CLIENTS $MOUNT2 "$*" || true - fi + zconf_umount_clients $clients $MOUNT "$*" || true + [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true [ "$CLIENTONLY" ] && return # The add fn does rm ${facet}active file, this would be enough @@ -1198,6 +1567,8 @@ stopall() { } cleanupall() { + nfs_client_mode && return + stopall $* unload_modules cleanup_gss @@ -1227,7 +1598,11 @@ formatall() { # We need ldiskfs here, may as well load them all load_modules [ "$CLIENTONLY" ] && return - echo "Formatting mdts, osts" + echo Formatting mgs, mds, osts + if [[ $MDSDEV1 != $MGSDEV ]] || [[ $mds1_HOST != $mgs_HOST ]]; then + add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10 + fi + for num in `seq $MDSCOUNT`; do echo "Format mds$num: $(mdsdevname $num)" if $VERBOSE; then @@ -1292,16 +1667,6 @@ remount_client() zconf_mount `hostname` $1 || error "mount failed" } -set_obd_timeout() { - local facet=$1 - local timeout=$2 - - do_facet $facet lsmod | grep -q obdclass || \ - do_facet $facet "modprobe obdclass" - - do_facet $facet "lctl set_param timeout=$timeout" -} - writeconf_facet () { local facet=$1 local dev=$2 @@ -1322,15 +1687,23 @@ writeconf_all () { } setupall() { + nfs_client_mode && return + + sanity_mount_check || + error "environments are insane!" + load_modules init_gss if [ -z "$CLIENTONLY" ]; then - echo "Setup mdts, osts" + echo Setup mgs, mdt, osts echo $WRITECONF | grep -q "writeconf" && \ writeconf_all + if [[ $mds1_HOST != $mgs_HOST ]] || [[ $MDSDEV1 != $MGSDEV ]]; then + start mgs $MGSDEV $mgs_MOUNT_OPTS + fi + for num in `seq $MDSCOUNT`; do DEVNAME=$(mdsdevname $num) - set_obd_timeout mds$num $TIMEOUT start mds$num $DEVNAME $MDS_MOUNT_OPTS # We started mds, now we should set failover variables properly. @@ -1346,7 +1719,6 @@ setupall() { done for num in `seq $OSTCOUNT`; do DEVNAME=$(ostdevname $num) - set_obd_timeout ost$num $TIMEOUT start ost$num $DEVNAME $OST_MOUNT_OPTS # We started ost$num, now we should set ost${num}failover variable properly. @@ -1371,7 +1743,7 @@ setupall() { [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2 fi - init_versions_vars + init_param_vars # by remounting mdt before ost, initial connect from mdt to ost might # timeout because ost is not ready yet. wait some time to its fully @@ -1390,6 +1762,7 @@ mounted_lustre_filesystems() { } init_facet_vars () { + [ "$CLIENTONLY" ] && return 0 local facet=$1 shift local device=$1 @@ -1409,15 +1782,27 @@ init_facet_vars () { if [ -z "${!varname}" ]; then eval $varname=$(facet_host $facet) fi + + # ${facet}failover_dev is set in cfg file + varname=${facet}failover_dev + if [ -n "${!varname}" ] ; then + eval export ${facet}failover_dev=${!varname} + else + eval export ${facet}failover_dev=$device + fi } init_facets_vars () { local DEVNAME - for num in `seq $MDSCOUNT`; do - DEVNAME=`mdsdevname $num` - init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS - done + if ! remote_mds_nodsh; then + for num in `seq $MDSCOUNT`; do + DEVNAME=`mdsdevname $num` + init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS + done + fi + + remote_ost_nodsh && return for num in `seq $OSTCOUNT`; do DEVNAME=`ostdevname $num` @@ -1425,14 +1810,104 @@ init_facets_vars () { done } -init_versions_vars () { - export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) - export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2) - export CLIVER=$(lctl get_param version | cut -d. -f 1,2) +osc_ensure_active () { + local facet=$1 + local type=$2 + local timeout=$3 + local period=0 + + while [ $period -lt $timeout ]; do + count=$(do_facet $facet "lctl dl | grep '${FSNAME}-OST.*-osc-${type}' | grep ' IN ' 2>/dev/null | wc -l") + if [ $count -eq 0 ]; then + break + fi + + echo "There are $count OST are inactive, wait $period seconds, and try again" + sleep 3 + period=$((period+3)) + done + + [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up" +} + +som_check() { + SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1) + echo $SOM_ENABLED +} + +init_param_vars () { + if ! remote_ost_nodsh && ! remote_mds_nodsh; then + export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) + export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2) + export CLIVER=$(lctl get_param version | cut -d. -f 1,2) + fi + + remote_mds_nodsh || + TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") + + log "Using TIMEOUT=$TIMEOUT" + + osc_ensure_active $SINGLEMDS M $TIMEOUT + osc_ensure_active client c $TIMEOUT + + if [ x"$(som_check)" = x"enabled" ]; then + ENABLE_QUOTA="" + echo "disable quota temporary when SOM enabled" + fi + if [ $QUOTA_AUTO -ne 0 ]; then + if [ "$ENABLE_QUOTA" ]; then + echo "enable quota as required" + setup_quota $MOUNT || return 2 + else + echo "disable quota as required" + $LFS quotaoff -ug $MOUNT > /dev/null 2>&1 + fi + fi + + return 0 } -check_config () { +nfs_client_mode () { + if [ "$NFSCLIENT" ]; then + echo "NFSCLIENT mode: setup, cleanup, check config skipped" + local clients=$CLIENTS + [ -z $clients ] && clients=$(hostname) + + # FIXME: remove hostname when 19215 fixed + do_nodes $clients "echo \\\$(hostname); grep ' '$MOUNT' ' /proc/mounts" + declare -a nfsexport=(`grep ' '$MOUNT' ' /proc/mounts | awk '{print $1}' | awk -F: '{print $1 " " $2}'`) + do_nodes ${nfsexport[0]} "echo \\\$(hostname); df -T ${nfsexport[1]}" + return + fi + return 1 +} + +check_config_client () { local mntpt=$1 + + local mounted=$(mount | grep " $mntpt ") + if [ "$CLIENTONLY" ]; then + # bug 18021 + # CLIENTONLY should not depend on *_HOST settings + local mgc=$($LCTL device_list | awk '/MGC/ {print $4}') + # in theory someone could create a new, + # client-only config file that assumed lustre was already + # configured and didn't set the MGSNID. If MGSNID is not set, + # then we should use the mgs nid currently being used + # as the default value. bug 18021 + [[ x$MGSNID = x ]] && + MGSNID=${mgc//MGC/} + + if [[ x$mgc != xMGC$MGSNID ]]; then + if [ "$mgs_HOST" ]; then + local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g") +# [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] || +# error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc" + fi + fi + return 0 + fi + local myMGS_host=$mgs_HOST if [ "$NETTYPE" = "ptl" ]; then myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//) @@ -1442,31 +1917,120 @@ check_config () { local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}') mgshost=$(echo $mgshost | awk -F: '{print $1}') - if [ "$mgshost" != "$myMGS_host" ]; then - FAIL_ON_ERROR=true \ - error "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE - Please use correct config or set mds_HOST correctly!" +# if [ "$mgshost" != "$myMGS_host" ]; then +# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE +# Please use correct config or set mds_HOST correctly!" +# fi + +} + +check_config_clients () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + nfs_client_mode && return + + do_rpc_nodes $clients check_config_client $mntpt + + sanity_mount_check || + error "environments are insane!" +} + +check_timeout () { + local mdstimeout=$(do_facet $SINGLEMDS "lctl get_param -n timeout") + local cltimeout=$(lctl get_param -n timeout) + if [ $mdstimeout -ne $TIMEOUT ] || [ $mdstimeout -ne $cltimeout ]; then + error "timeouts are wrong! mds: $mdstimeout, client: $cltimeout, TIMEOUT=$TIMEOUT" + return 1 fi } +is_mounted () { + local mntpt=$1 + local mounted=$(mounted_lustre_filesystems) + + echo $mounted' ' | grep -w -q $mntpt' ' +} + check_and_setup_lustre() { + nfs_client_mode && return + local MOUNTED=$(mounted_lustre_filesystems) - if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then + + local do_check=true + # 1. + # both MOUNT and MOUNT2 are not mounted + if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then [ "$REFORMAT" ] && formatall + # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set) setupall - MOUNTED=$(mounted_lustre_filesystems | head -1) - [ -z "$MOUNTED" ] && error "NAME=$NAME not mounted" + is_mounted $MOUNT || error "NAME=$NAME not mounted" export I_MOUNTED=yes - else - check_config $MOUNT + do_check=false + # 2. + # MOUNT2 is mounted + elif is_mounted $MOUNT2; then + # 3. + # MOUNT2 is mounted, while MOUNT_2 is not set + if ! [ "$MOUNT_2" ]; then + cleanup_mount $MOUNT2 + export I_UMOUNTED2=yes + + # 4. + # MOUNT2 is mounted, MOUNT_2 is set + else + # FIXME: what to do if check_config failed? + # i.e. if: + # 1) remote client has mounted other Lustre fs ? + # 2) it has insane env ? + # let's try umount MOUNT2 on all clients and mount it again: + if ! check_config_clients $MOUNT2; then + cleanup_mount $MOUNT2 + restore_mount $MOUNT2 + export I_MOUNTED2=yes + fi + fi + + # 5. + # MOUNT is mounted MOUNT2 is not mounted + elif [ "$MOUNT_2" ]; then + restore_mount $MOUNT2 + export I_MOUNTED2=yes + fi + + if $do_check; then + # FIXME: what to do if check_config failed? + # i.e. if: + # 1) remote client has mounted other Lustre fs? + # 2) lustre is mounted on remote_clients atall ? + check_config_clients $MOUNT init_facets_vars - init_versions_vars + init_param_vars + + do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; + lctl set_param debug_mb=${DEBUG_SIZE}; + sync" fi if [ "$ONLY" == "setup" ]; then exit 0 fi } +restore_mount () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + zconf_mount_clients $clients $mntpt +} + +cleanup_mount () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + zconf_umount_clients $clients $mntpt +} + cleanup_and_setup_lustre() { if [ "$ONLY" == "cleanup" -o "`mount | grep $MOUNT`" ]; then lctl set_param debug=0 || true @@ -1479,13 +2043,23 @@ cleanup_and_setup_lustre() { } check_and_cleanup_lustre() { - if [ "`mount | grep $MOUNT`" ]; then + if is_mounted $MOUNT; then [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* + [ "$ENABLE_QUOTA" ] && restore_quota_type || true + fi + + if [ "$I_UMOUNTED2" = "yes" ]; then + restore_mount $MOUNT2 || error "restore $MOUNT2 failed" + fi + + if [ "$I_MOUNTED2" = "yes" ]; then + cleanup_mount $MOUNT2 fi + if [ "$I_MOUNTED" = "yes" ]; then cleanupall -f || error "cleanup failed" + unset I_MOUNTED fi - unset I_MOUNTED } ####### @@ -1496,8 +2070,7 @@ check_network() { local WAIT=0 local MAX=$2 while [ $NETWORK -eq 0 ]; do - ping -c 1 -w 3 $1 > /dev/null - if [ $? -eq 0 ]; then + if ping -c 1 -w 3 $1 > /dev/null; then NETWORK=1 else WAIT=$((WAIT + 5)) @@ -1527,36 +2100,71 @@ comma_list() { echo "$*" | tr -s " " "\n" | sort -b -u | tr "\n" " " | sed 's/ \([^$]\)/,\1/g' } -# list is comma separated list -exclude_item_from_list () { +# list, excluded are the comma separated lists +exclude_items_from_list () { local list=$1 local excluded=$2 + local item list=${list//,/ } - list=$(echo " $list " | sed -re "s/\s+$excluded\s+/ /g") + for item in ${excluded//,/ }; do + list=$(echo " $list " | sed -re "s/\s+$item\s+/ /g") + done echo $(comma_list $list) } +# list, expand are the comma separated lists +expand_list () { + local list=${1//,/ } + local expand=${2//,/ } + local expanded= + + expanded=$(for i in $list $expand; do echo $i; done | sort -u) + echo $(comma_list $expanded) +} + +testslist_filter () { + local script=$LUSTRE/tests/${TESTSUITE}.sh + + [ -f $script ] || return 0 + + local start_at=$START_AT + local stop_at=$STOP_AT + + local var=${TESTSUITE//-/_}_START_AT + [ x"${!var}" != x ] && start_at=${!var} + var=${TESTSUITE//-/_}_STOP_AT + [ x"${!var}" != x ] && stop_at=${!var} + + sed -n 's/^test_\([^ (]*\).*/\1/p' $script | \ + awk ' BEGIN { if ("'${start_at:-0}'" != 0) flag = 1 } + /^'${start_at}'$/ {flag = 0} + {if (flag == 1) print $0} + /^'${stop_at}'$/ { flag = 1 }' +} + absolute_path() { (cd `dirname $1`; echo $PWD/`basename $1`) } -################################## -# Adaptive Timeouts funcs +get_facets () { + local name=$(echo $1 | tr "[:upper:]" "[:lower:]") + local type=$(echo $1 | tr "[:lower:]" "[:upper:]") -at_is_valid() { - if [ -z "$AT_MAX_PATH" ]; then - AT_MAX_PATH=$(do_facet $SINGLEMDS "find /sys/ -name at_max") - [ -z "$AT_MAX_PATH" ] && echo "missing /sys/.../at_max " && return 1 - fi - return 0 + local list="" + local count=${type}COUNT + for ((i=1; i<=${!count}; i++)) do + list="$list ${name}$i" + done + echo $(comma_list $list) } -at_is_enabled() { - at_is_valid || error "invalid call" +################################## +# Adaptive Timeouts funcs +at_is_enabled() { # only check mds, we assume at_max is the same on all nodes - local at_max=$(do_facet $SINGLEMDS "cat $AT_MAX_PATH") + local at_max=$(do_facet $SINGLEMDS "lctl get_param -n at_max") if [ $at_max -eq 0 ]; then return 1 else @@ -1567,13 +2175,11 @@ at_is_enabled() { at_max_get() { local facet=$1 - at_is_valid || error "invalid call" - # suppose that all ost-s has the same at_max set if [ $facet == "ost" ]; then - do_facet ost1 "cat $AT_MAX_PATH" + do_facet ost1 "lctl get_param -n at_max" else - do_facet $facet "cat $AT_MAX_PATH" + do_facet $facet "lctl get_param -n at_max" fi } @@ -1581,20 +2187,19 @@ at_max_set() { local at_max=$1 shift - at_is_valid || error "invalid call" - local facet for facet in $@; do if [ $facet == "ost" ]; then for i in `seq $OSTCOUNT`; do - do_facet ost$i "echo $at_max > $AT_MAX_PATH" + do_facet ost$i "lctl set_param at_max=$at_max" + done elif [ $facet == "mds" ]; then for i in `seq $MDSCOUNT`; do - do_facet mds$i "echo $at_max > $AT_MAX_PATH" + do_facet mds$i "lctl set_param at_max=$at_max" done else - do_facet $facet "echo $at_max > $AT_MAX_PATH" + do_facet $facet "lctl set_param at_max=$at_max" fi done } @@ -1675,12 +2280,7 @@ clear_failloc() { } set_nodes_failloc () { - local nodes=$1 - local node - - for node in $nodes ; do - do_node $node lctl set_param fail_loc=$2 - done + do_nodes $(comma_list $1) lctl set_param fail_loc=$2 } cancel_lru_locks() { @@ -1718,6 +2318,10 @@ pgcache_empty() { return 1 fi done + if [[ $MDSDEV1 != $MGSDEV ]]; then + stop mgs + fi + return 0 } @@ -1726,10 +2330,37 @@ debugsave() { } debugrestore() { - [ -n "$DEBUGSAVE" ] && lctl set_param debug="${DEBUGSAVE}" + [ -n "$DEBUGSAVE" ] && \ + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\";" DEBUGSAVE="" } +debug_size_save() { + DEBUG_SIZE_SAVED="$(lctl get_param -n debug_mb)" +} + +debug_size_restore() { + [ -n "$DEBUG_SIZE_SAVED" ] && \ + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE_SAVED" + DEBUG_SIZE_SAVED="" +} + +start_full_debug_logging() { + debugsave + debug_size_save + + local FULLDEBUG=-1 + local DEBUG_SIZE=150 + + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE" + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=$FULLDEBUG;" +} + +stop_full_debug_logging() { + debug_size_restore + debugrestore +} + ################################## # Test interface ################################## @@ -1737,15 +2368,22 @@ debugrestore() { error_noexit() { local TYPE=${TYPE:-"FAIL"} local ERRLOG - lctl set_param fail_loc=0 2>/dev/null || true + + local dump=true + # do not dump logs if $1=false + if [ "x$1" = "xfalse" ]; then + shift + dump=false + fi + log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " - ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) - echo "Dumping lctl log to $ERRLOG" - # We need to dump the logs on all nodes - local NODES=$(nodes_list) - for NODE in $NODES; do - do_node $NODE $LCTL dk $ERRLOG - done + + if $dump; then + ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) + echo "Dumping lctl log to $ERRLOG" + # We need to dump the logs on all nodes + do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG + fi debugrestore [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG TEST_FAILED=true @@ -1753,7 +2391,10 @@ error_noexit() { error() { error_noexit "$@" - $FAIL_ON_ERROR && exit 1 || true + if $FAIL_ON_ERROR; then + reset_fail_loc + exit 1 + fi } error_exit() { @@ -1770,13 +2411,20 @@ error_ignore() { error_noexit "$@" } +skip_env () { + $FAIL_ON_SKIP_ENV && error false $@ || skip $@ +} + skip () { - log " SKIP: ${TESTSUITE} ${TESTNAME} $@" - [ "$TESTSUITELOG" ] && \ - echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true + echo + log " SKIP: ${TESTSUITE} ${TESTNAME} $@" + [ "$TESTSUITELOG" ] && \ + echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true } build_test_filter() { + EXCEPT="$EXCEPT $(testslist_filter)" + [ "$ONLY" ] && log "only running test `echo $ONLY`" for O in $ONLY; do eval ONLY_${O}=true @@ -1796,12 +2444,12 @@ build_test_filter() { done } -_basetest() { - echo $* -} - basetest() { - IFS=abcdefghijklmnopqrstuvwxyz _basetest $1 + if [[ $1 = [a-z]* ]]; then + echo $1 + else + echo ${1%%[a-z]*} + fi } # print a newline if the last test was skipped @@ -1869,10 +2517,10 @@ equals_msg() { log() { echo "$*" - lsmod | grep lnet > /dev/null || load_modules + module_loaded lnet || load_modules local MSG="$*" - # Get rif of ' + # Get rid of ' MSG=${MSG//\'/\\\'} MSG=${MSG//\(/\\\(} MSG=${MSG//\)/\\\)} @@ -1881,10 +2529,7 @@ log() { MSG=${MSG//\>/\\\>} MSG=${MSG//\ /dev/null || true - done + do_nodes $(comma_list $(nodes_list)) $LCTL mark "$MSG" 2> /dev/null || true } trace() { @@ -1907,12 +2552,9 @@ check_mds() { } reset_fail_loc () { - local myNODES=$(nodes_list) - local NODE - - for NODE in $myNODES; do - do_node $NODE "lctl set_param fail_loc=0 2>/dev/null || true" - done + echo -n "Resetting fail_loc on all nodes..." + do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true" + echo done. } run_one() { @@ -1924,8 +2566,9 @@ run_one() { local SAVE_UMASK=`umask` umask 0022 - BEFORE=`date +%s` - log "== test $testnum: $message ============ `date +%H:%M:%S` ($BEFORE)" + local BEFORE=`date +%s` + echo + log "== test $testnum: $message == `date +%H:%M:%S` ($BEFORE)" #check_mds export TESTNAME=test_$testnum TEST_FAILED=false @@ -2025,6 +2668,7 @@ remote_mds () remote_mds_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ] } @@ -2039,9 +2683,17 @@ remote_ost () remote_ost_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } +remote_mgs_nodsh() +{ + local MGS + MGS=$(facet_host mgs) + remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] +} + mdts_nodes () { local MDSNODES local NODES_sort @@ -2121,13 +2773,17 @@ get_random_entry () { rnodes=${rnodes//,/ } - local nodes=($rnodes) + local -a nodes=($rnodes) local num=${#nodes[@]} - local i=$((RANDOM * num / 65536)) + local i=$((RANDOM * num * 2 / 65536)) echo ${nodes[i]} } +client_only () { + [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ] +} + is_patchless () { lctl get_param version | grep -q patchless @@ -2202,6 +2858,35 @@ check_runas_id() { add user $myRUNAS_UID:$myRUNAS_GID on these nodes." } +# obtain the UID/GID for MPI_USER +get_mpiuser_id() { + local mpi_user=$1 + + MPI_USER_UID=$(do_facet client "getent passwd $mpi_user | cut -d: -f3; +exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the UID for $mpi_user" + + MPI_USER_GID=$(do_facet client "getent passwd $mpi_user | cut -d: -f4; +exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the GID for $mpi_user" +} + +# obtain and cache Kerberos ticket-granting ticket +refresh_krb5_tgt() { + local myRUNAS_UID=$1 + local myRUNAS_GID=$2 + shift 2 + local myRUNAS=$@ + if [ -z "$myRUNAS" ]; then + error_exit "myRUNAS command must be specified for refresh_krb5_tgt" + fi + + CLIENTS=${CLIENTS:-$HOSTNAME} + do_nodes $CLIENTS "set -x +if ! $myRUNAS krb5_login.sh; then + echo "Failed to refresh Krb5 TGT for UID/GID $myRUNAS_UID/$myRUNAS_GID." + exit 1 +fi" +} + # Run multiop in the background, but wait for it to print # "PAUSING" to its stdout before returning from this function. multiop_bg_pause() { @@ -2231,6 +2916,19 @@ multiop_bg_pause() { return 0 } +do_and_time () { + local cmd=$1 + local rc + + SECONDS=0 + eval '$cmd' + + [ ${PIPESTATUS[0]} -eq 0 ] || rc=1 + + echo $SECONDS + return $rc +} + inodes_available () { local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk '{print $4}' | sort -un | head -1) || return 1 echo $IFree @@ -2265,6 +2963,7 @@ calc_sum () { } calc_osc_kbytes () { + df $MOUNT > /dev/null $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum } @@ -2272,7 +2971,7 @@ calc_osc_kbytes () { # generate a stream of formatted strings ( =) save_lustre_params() { local s - do_node $1 "lctl get_param $2" | while read s; do echo "$1 $s"; done + do_nodes --verbose $1 "lctl get_param $2 | while read s; do echo \\\$s; done" } # restore lustre parameters from input stream, produces by save_lustre_params @@ -2281,16 +2980,19 @@ restore_lustre_params() { local name local val while IFS=" =" read node name val; do - do_node $node "lctl set_param -n $name $val" + do_node ${node//:/} "lctl set_param -n $name $val" done } -check_catastrophe () { +check_catastrophe() { local rnodes=${1:-$(comma_list $(remote_nodes_list))} + local C=$CATASTROPHE + [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1 - [ -f $CATASTROPHE ] && [ $(cat $CATASTROPHE) -ne 0 ] && return 1 if [ $rnodes ]; then - do_nodes $rnodes "set -x; [ -f $CATASTROPHE ] && { [ \`cat $CATASTROPHE\` -eq 0 ] || false; } || true" + do_nodes $rnodes "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); +if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi +exit \\\$rc;" fi } @@ -2309,51 +3011,376 @@ get_stripe_info() { stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file` stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file` - stripe_index=`awk '/obdidx/ {start = 1; getline; print $1; exit}' $tmp_file` + stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file` rm -f $tmp_file } # CMD: determine mds index where directory inode presents get_mds_dir () { local dir=$1 - local file=$dir/$tfile + local file=$dir/f0.get_mds_dir_tmpfile + mkdir -p $dir rm -f $file + sleep 1 local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local oldused=($iused) + local -a oldused=($iused) - touch $file + openfile -f O_CREAT:O_LOV_DELAY_CREATE -m 0644 $file > /dev/null sleep 1 iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local newused=($iused) + local -a newused=($iused) local num=0 for ((i=0; i<${#newused[@]}; i++)); do if [ ${oldused[$i]} -lt ${newused[$i]} ]; then echo $(( i + 1 )) - rm -f $dir/$tfile + rm -f $file return 0 fi done error "mdt-s : inodes count OLD ${oldused[@]} NEW ${newused[@]}" } -mpi_run () { - local mpirun="$MPIRUN $MPIRUN_OPTIONS" - local command="$mpirun $@" +mdsrate_cleanup () { + if [ -d $4 ]; then + mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 + rmdir $4 + fi +} + +delayed_recovery_enabled () { + local var=${SINGLEMDS}_svc + do_facet $SINGLEMDS lctl get_param -n mdd.${!var}.stale_export_age > /dev/null 2>&1 +} + +######################## + +convert_facet2label() { + local facet=$1 + + if [ x$facet = xost ]; then + facet=ost1 + fi + + local varsvc=${facet}_svc - if [ "$MPI_USER" != root -a $mpirun ]; then - echo "+ chmod 0777 $MOUNT" - chmod 0777 $MOUNT - command="su $MPI_USER sh -c \"$command \"" + if [ -n ${!varsvc} ]; then + echo ${!varsvc} + else + error "No lablel for $facet!" fi +} - ls -ald $MOUNT - echo "+ $command" - eval $command +get_clientosc_proc_path() { + local ost=$1 + + echo "{$1}-osc-*" } -mdsrate_cleanup () { - mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 +get_lustre_version () { + local node=${1:-"mds"} + do_facet $node $LCTL get_param -n version | awk '/^lustre:/ {print $2}' +} + +get_mds_version_major () { + local version=$(get_lustre_version mds) + echo $version | awk -F. '{print $1}' +} + +get_mds_version_minor () { + local version=$(get_lustre_version mds) + echo $version | awk -F. '{print $2}' +} + +get_mdtosc_proc_path() { + local ost=$1 + local major=$(get_mds_version_major) + local minor=$(get_mds_version_minor) + if [ $major -le 1 -a $minor -le 8 ] ; then + echo "${ost}-osc" + else + echo "${ost}-osc-MDT0000" + fi +} + +get_osc_import_name() { + local facet=$1 + local ost=$2 + local label=$(convert_facet2label $ost) + + if [ "$facet" == "mds" ]; then + get_mdtosc_proc_path $label + return 0 + fi + + get_clientosc_proc_path $label + return 0 +} + +wait_import_state () { + local expected=$1 + local CONN_PROC=$2 + local CONN_STATE + local i=0 + + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + while [ "${CONN_STATE}" != "${expected}" ]; do + if [ "${expected}" == "DISCONN" ]; then + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" ] && return 0 + # with AT we can have connect request timeout ~ reconnect timeout + # and test can't see real disconnect + [ "${CONN_STATE}" == "CONNECTING" ] && return 0 + fi + # disconnect rpc should be wait not more obd_timeout + [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ + error "can't put import for $CONN_PROC into ${expected} state" && return 1 + sleep 1 + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + i=$(($i + 1)) + done + + log "$CONN_PROC now in ${CONN_STATE} state" + return 0 +} + +wait_osc_import_state() { + local facet=$1 + local ost_facet=$2 + local expected=$3 + local ost=$(get_osc_import_name $facet $ost_facet) + local CONN_PROC + local CONN_STATE + local i=0 + + CONN_PROC="osc.${ost}.ost_server_uuid" + CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) + while [ "${CONN_STATE}" != "${expected}" ]; do + if [ "${expected}" == "DISCONN" ]; then + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" ] && return 0 + # with AT we can have connect request timeout ~ reconnect timeout + # and test can't see real disconnect + [ "${CONN_STATE}" == "CONNECTING" ] && return 0 + fi + # disconnect rpc should be wait not more obd_timeout + [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ + error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1 + sleep 1 + CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) + i=$(($i + 1)) + done + + log "${ost_facet} now in ${CONN_STATE} state" + return 0 +} + +get_clientmdc_proc_path() { + echo "${1}-mdc-*" +} + +do_rpc_nodes () { + local list=$1 + shift + + do_nodes --verbose $list "PATH=$LUSTRE/tests/:$PATH sh rpc.sh $@ " +} + +wait_clients_import_state () { + local list=$1 + local facet=$2 + local expected=$3 + shift + + local label=$(convert_facet2label $facet) + local proc_path + case $facet in + ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;; + mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;; + *) error "unknown facet!" ;; + esac + + if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then + error "import is not in ${expected} state" + return 1 + fi +} + +oos_full() { + local -a AVAILA + local -a GRANTA + local OSCFULL=1 + AVAILA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param obdfilter.*.kbytesavail)) + GRANTA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param -n obdfilter.*.tot_granted)) + for ((i=0; i<${#AVAILA[@]}; i++)); do + local -a AVAIL1=(${AVAILA[$i]//=/ }) + GRANT=$((${GRANTA[$i]}/1024)) + echo -n $(echo ${AVAIL1[0]} | cut -d"." -f2) avl=${AVAIL1[1]} grnt=$GRANT diff=$((AVAIL1[1] - GRANT)) + [ $((AVAIL1[1] - GRANT)) -lt 400 ] && OSCFULL=0 && echo " FULL" || echo + done + return $OSCFULL +} + +pool_list () { + do_facet mgs lctl pool_list $1 +} + +create_pool() { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + do_facet mgs lctl pool_new $1 + local RC=$? + # get param should return err unless pool is created + [[ $RC -ne 0 ]] && return $RC + + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "" || RC=1 + if [[ $RC -eq 0 ]]; then + add_pool_to_list $1 + else + error "pool_new failed $1" + fi + return $RC +} + +add_pool_to_list () { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + local listvar=${fsname}_CREATED_POOLS + eval export ${listvar}=$(expand_list ${!listvar} $poolname) +} + +remove_pool_from_list () { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + local listvar=${fsname}_CREATED_POOLS + eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname) +} + +destroy_pool_int() { + local ost + local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \ + awk '$1 !~ /^Pool:/ {print $1}') + for ost in $OSTS; do + do_facet mgs lctl pool_remove $1 $ost + done + do_facet mgs lctl pool_destroy $1 +} + +# . or +destroy_pool() { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + [[ x$fsname = x$poolname ]] && fsname=$FSNAME + + local RC + + pool_list $fsname.$poolname || return $? + + destroy_pool_int $fsname.$poolname + RC=$? + [[ $RC -ne 0 ]] && return $RC + + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "foo" || RC=1 + + if [[ $RC -eq 0 ]]; then + remove_pool_from_list $fsname.$poolname + else + error "destroy pool failed $1" + fi + return $RC +} + +destroy_pools () { + local fsname=${1:-$FSNAME} + local poolname + local listvar=${fsname}_CREATED_POOLS + + pool_list $fsname + + [ x${!listvar} = x ] && return 0 + + echo destroy the created pools: ${!listvar} + for poolname in ${!listvar//,/ }; do + destroy_pool $fsname.$poolname + done +} + +cleanup_pools () { + local fsname=${1:-$FSNAME} + trap 0 + destroy_pools $fsname +} + +gather_logs () { + local list=$1 + + local ts=$(date +%s) + + # bug 20237, comment 11 + # It would also be useful to provide the option + # of writing the file to an NFS directory so it doesn't need to be copied. + local tmp=$TMP + local docp=true + [ -d "$SHARED_DIR_LOGS" ] && tmp=$SHARED_DIR_LOGS && docp=false + + # dump lustre logs, dmesg + do_nodes $list "log=$tmp/\\\$(hostname)-debug-$ts.log ; +lctl dk \\\$log >/dev/null; +log=$tmp/\\\$(hostname)-dmesg-$ts.log; +dmesg > \\\$log; " + + # FIXME: does it make sense to collect the logs for $ts only, but all + # TESTSUITE logs? + # rsync $TMP/*${TESTSUITE}* to gather the logs dumped by error fn + local logs=$TMP/'*'${TESTSUITE}'*' + if $docp; then + logs=$logs' '$tmp/'*'$ts'*' + fi + for node in ${list//,/ }; do + rsync -az $node:"$logs" $TMP + done + + local archive=$TMP/${TESTSUITE}-$ts.tar.bz2 + tar -jcf $archive $tmp/*$ts* $TMP/*${TESTSUITE}* + + echo $archive +} + +cleanup_logs () { + local list=${1:-$(comma_list $(nodes_list))} + + [ -n ${TESTSUITE} ] && do_nodes $list "rm -f $TMP/*${TESTSUITE}*" || true +} + +do_ls () { + local mntpt_root=$1 + local num_mntpts=$2 + local dir=$3 + local i + local cmd + local pids + local rc=0 + + for i in $(seq 0 $num_mntpts); do + cmd="ls -laf ${mntpt_root}$i/$dir" + echo + $cmd; + $cmd > /dev/null & + pids="$pids $!" + done + echo pids=$pids + for pid in $pids; do + wait $pid || rc=$? + done + + return $rc }