X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=a1b9f03f0f8dcf9089e9d5bf59f708ebbe9f8dc2;hp=e081f8d22ac5392688daadfa2fad52247d34ec1d;hb=e2cd3061d9419393bc55b19df2d9d51d0cbe8932;hpb=852f34ac50727d3a012b9b325f9614b2b4fa7db7 diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index e081f8d..a1b9f03 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -18,17 +18,9 @@ export IDENTITY_UPCALL=default #export PDSH="pdsh -S -Rssh -w" -# eg, assert_env LUSTRE MDSNODES OSTNODES CLIENTS -assert_env() { - local failed="" - for name in $@; do - if [ -z "${!name}" ]; then - echo "$0: $name must be set" - failed=1 - fi - done - [ $failed ] && exit 1 || true -} +# function used by scripts run on remote nodes +LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} +. $LUSTRE/tests/functions.sh assert_DIR () { local failed="" @@ -106,11 +98,14 @@ init_test_env() { if ! echo $PATH | grep -q $LUSTRE/tests; then export PATH=$PATH:$LUSTRE/tests fi - export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mdsrate"} + export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"} [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) if ! echo $PATH | grep -q $LUSTRE/tests/racer; then export PATH=$PATH:$LUSTRE/tests/racer fi + if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then + export PATH=$PATH:$LUSTRE/tests/mpi + fi export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} [ ! -f "$LCTL" ] && export LCTL=$(which lctl) export LFS=${LFS:-"$LUSTRE/utils/lfs"} @@ -134,7 +129,7 @@ init_test_env() { [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \ export LGSSD=$(which lgssd) export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"} - [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd) + [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null) export KRB5DIR=${KRB5DIR:-"/usr/kerberos"} export DIR2 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests} @@ -225,8 +220,8 @@ load_modules() { echo Loading modules from $LUSTRE load_module ../libcfs/libcfs/libcfs - [ "$PTLDEBUG" ] && lctl set_param debug=$PTLDEBUG - [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug=${SUBSYSTEM# } + [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG" + [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }" local MODPROBECONF= [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre @@ -460,7 +455,7 @@ ostdevlabel() { mount_facet() { local facet=$1 shift - local dev=${facet}_dev + local dev=$(facet_active $facet)_dev local opt=${facet}_opt echo "Starting ${facet}: ${!opt} $@ ${!dev} ${MOUNT%/*}/${facet}" do_facet ${facet} mount -t lustre ${!opt} $@ ${!dev} ${MOUNT%/*}/${facet} @@ -469,8 +464,8 @@ mount_facet() { echo "mount -t lustre $@ ${!dev} ${MOUNT%/*}/${facet}" echo "Start of ${!dev} on ${facet} failed ${RC}" else - do_facet ${facet} "lctl set_param debug=$PTLDEBUG; \ - lctl set_param subsystem_debug=${SUBSYSTEM# }; \ + do_facet ${facet} "lctl set_param debug=\\\"$PTLDEBUG\\\"; \ + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; \ lctl set_param debug_mb=${DEBUG_SIZE}; \ sync" @@ -490,6 +485,14 @@ start() { shift eval export ${facet}_dev=${device} eval export ${facet}_opt=\"$@\" + + local varname=${facet}failover_dev + if [ -n "${!varname}" ] ; then + eval export ${facet}failover_dev=${!varname} + else + eval export ${facet}failover_dev=$device + fi + do_facet ${facet} mkdir -p ${MOUNT%/*}/${facet} mount_facet ${facet} RC=$? @@ -514,6 +517,91 @@ stop() { wait_exit_ST ${facet} } +# save quota version (both administrative and operational quotas) +# add an additional parameter if mountpoint is ever different from $MOUNT +quota_save_version() { + local fsname=${2:-$FSNAME} + local spec=$1 + local ver=$(tr -c -d "123" <<< $spec) + local type=$(tr -c -d "ug" <<< $spec) + + [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier" + + $LFS quotaoff -ug $MOUNT # just in case + [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; } + + do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec" + local varsvc + local osts=$(get_facets OST) + for ost in ${osts//,/ }; do + varsvc=${ost}_svc + do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec" + done +} + +# client could mount several lustre +quota_type () { + local fsname=${1:-$FSNAME} + local rc=0 + do_facet mgs lctl get_param mdd.${fsname}-MDT*.quota_type || rc=$? + do_nodes $(comma_list $(osts_nodes)) \ + lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$? + return $rc +} + +restore_quota_type () { + local mntpt=${1:-$MOUNT} + local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2) + if [ ! "$old_QUOTA_TYPE" ] || [ "$quota_type" = "$old_QUOTA_TYPE" ]; then + return + fi + quota_save_version $old_QUOTA_TYPE +} + +setup_quota(){ + local mntpt=$1 + + # We need: + # 1. run quotacheck only if quota is off + # 2. save the original quota_type params, restore them after testing + + # Suppose that quota type the same on mds and ost + local quota_type=$(quota_type | grep MDT | cut -d "=" -f2) + [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!" + echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]" + if [ "$quota_type" != "$QUOTA_TYPE" ]; then + export old_QUOTA_TYPE=$quota_type + quota_save_version $QUOTA_TYPE + fi + + local quota_usrs=$QUOTA_USERS + + # get_filesystem_size + local disksz=$(lfs df $mntpt | grep "filesystem summary:" | awk '{print $3}') + local blk_soft=$((disksz + 1024)) + local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over + + local Inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | awk '{print $3}') + local i_soft=$Inodes + local i_hard=$((i_soft + i_soft / 20)) + + echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit: + $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard" + + local cmd + for usr in $quota_usrs; do + echo "Setting up quota on $HOSTNAME:$mntpt for $usr..." + for type in u g; do + cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt" + echo "+ $cmd" + eval $cmd || error "$cmd FAILED!" + done + # display the quota status + echo "Quota settings for $usr : " + $LFS quota -v -u $usr $mntpt || true + done +} + zconf_mount() { local OPTIONS local client=$1 @@ -532,8 +620,8 @@ zconf_mount() { do_node $client mkdir -p $mnt do_node $client mount -t lustre $OPTIONS $device $mnt || return 1 - do_node $client "lctl set_param debug=$PTLDEBUG; - lctl set_param subsystem_debug=${SUBSYSTEM# }; + do_node $client "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; lctl set_param debug_mb=${DEBUG_SIZE}" return 0 @@ -566,6 +654,64 @@ zconf_umount() { fi } +# nodes is comma list +sanity_mount_check_nodes () { + local nodes=$1 + shift + local mnts="$@" + local mnt + + # FIXME: assume that all cluster nodes run the same os + [ "$(uname)" = Linux ] || return 0 + + local rc=0 + for mnt in $mnts ; do + do_nodes $nodes "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts); +mpts=\\\$(mount | grep -w -c $mnt); +if [ \\\$running -ne \\\$mpts ]; then + echo \\\$(hostname) env are INSANE!; + exit 1; +fi" + [ $? -eq 0 ] || rc=1 + done + return $rc +} + +sanity_mount_check_servers () { + [ "$CLIENTONLY" ] && + { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true + echo Checking servers environments + + # FIXME: modify get_facets to display all facets wo params + local facets="$(get_facets OST),$(get_facets MDS)" + local node + local mnt + local facet + for facet in ${facets//,/ }; do + node=$(facet_host ${facet}) + mnt=${MOUNT%/*}/${facet} + sanity_mount_check_nodes $node $mnt || + { error "server $node environments are insane!"; return 1; } + done +} + +sanity_mount_check_clients () { + local clients=${1:-$CLIENTS} + local mntpt=${2:-$MOUNT} + local mntpt2=${3:-$MOUNT2} + + [ -z $clients ] && clients=$(hostname) + echo Checking clients $clients environments + + sanity_mount_check_nodes $clients $mntpt $mntpt2 || + error "clients environments are insane!" +} + +sanity_mount_check () { + sanity_mount_check_servers || return 1 + sanity_mount_check_clients || return 2 +} + # mount clients if not mouted zconf_mount_clients() { local OPTIONS @@ -584,13 +730,22 @@ zconf_mount_clients() { fi echo "Starting client $clients: $OPTIONS $device $mnt" - do_nodes $clients "mount | grep $mnt || { mkdir -p $mnt && mount -t lustre $OPTIONS $device $mnt || false; }" + + do_nodes $clients "set -x; +running=\\\$(mount | grep -c $mnt' '); +rc=0; +if [ \\\$running -eq 0 ] ; then + mkdir -p $mnt; + mount -t lustre $OPTIONS $device $mnt; + rc=$?; +fi; +exit $rc" echo "Started clients $clients: " - do_nodes $clients "mount | grep $mnt" + do_nodes $clients "mount | grep -w $mnt" - do_nodes $clients "lctl set_param debug=$PTLDEBUG; - lctl set_param subsystem_debug=${SUBSYSTEM# }; + do_nodes $clients "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; lctl set_param debug_mb=${DEBUG_SIZE};" return 0 @@ -604,20 +759,20 @@ zconf_umount_clients() { [ "$3" ] && force=-f echo "Stopping clients: $clients $mnt (opts:$force)" - do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts) + do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts); if [ \\\$running -ne 0 ] ; then -echo Stopping client \\\$(hostname) client $mnt opts:$force -lsof -t $mnt || need_kill=no +echo Stopping client \\\$(hostname) client $mnt opts:$force; +lsof -t $mnt || need_kill=no; if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then pids=\\\$(lsof -t $mnt | sort -u); - if [ -n \\\$pids ]; then - kill -9 \\\$pids + if [ -n \\\"\\\$pids\\\" ]; then + kill -9 \\\$pids; fi -fi -busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy") +fi; +busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy"); if [ \\\$busy -ne 0 ] ; then - echo "$mnt is still busy, wait one second" && sleep 1 - umount $force $mnt + echo "$mnt is still busy, wait one second" && sleep 1; + umount $force $mnt; fi fi" } @@ -679,65 +834,96 @@ check_progs_installed () { shift local progs=$@ - do_nodes $clients "set -x ; PATH=:$PATH status=true; for prog in $progs; do - which \\\$prog || { echo \\\$prog missing on \\\$(hostname) && status=false; } - done; - eval \\\$status" + do_nodes $clients "set -x ; PATH=:$PATH; status=true; +for prog in $progs; do + if ! [ \\\"\\\$(which \\\$prog)\\\" -o \\\"\\\${!prog}\\\" ]; then + echo \\\$prog missing on \\\$(hostname); + status=false; + fi +done; +eval \\\$status" +} + +client_var_name() { + echo __$(echo $1 | tr '-' 'X') } start_client_load() { local client=$1 - local var=${client}_load + local load=$2 + local var=$(client_var_name $client)_load + eval export ${var}=$load do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ BREAK_ON_ERROR=$BREAK_ON_ERROR \ END_RUN_FILE=$END_RUN_FILE \ LOAD_PID_FILE=$LOAD_PID_FILE \ TESTSUITELOG=$TESTSUITELOG \ - run_${!var}.sh" & + run_${load}.sh" & CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $!" - log "Started client load: ${!var} on $client" + log "Started client load: ${load} on $client" return 0 } start_client_loads () { - local clients=(${1//,/ }) + local -a clients=(${1//,/ }) local numloads=${#CLIENT_LOADS[@]} local testnum for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do testnum=$((nodenum % numloads)) - eval export ${clients[nodenum]}_load=${CLIENT_LOADS[testnum]} - start_client_load ${clients[nodenum]} + start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]} done } # only for remote client check_client_load () { local client=$1 - local var=${client}_load - + local var=$(client_var_name $client)_load local TESTLOAD=run_${!var}.sh ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1 - - check_catastrophe $client || return 2 - - # see if the load is still on the client + + # bug 18914: try to connect several times not only when + # check ps, but while check_catastrophe also local tries=3 local RC=254 while [ $RC = 254 -a $tries -gt 0 ]; do let tries=$tries-1 # assume success RC=0 + if ! check_catastrophe $client; then + RC=${PIPESTATUS[0]} + if [ $RC -eq 254 ]; then + # FIXME: not sure how long we shuold sleep here + sleep 10 + continue + fi + echo "check catastrophe failed: RC=$RC " + return $RC + fi + done + # We can continue try to connect if RC=254 + # Just print the warning about this + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking catastrophe on $client" + fi + + # see if the load is still on the client + tries=3 + RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then RC=${PIPESTATUS[0]} sleep 30 fi done if [ $RC = 254 ]; then - echo "got a return status of $RC from do_node while checking (i.e. with 'ps') the client load on the remote system" + echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client" # see if we can diagnose a bit why this is fi @@ -767,7 +953,7 @@ restart_client_loads () { for client in $clients; do check_client_load $client rc=${PIPESTATUS[0]} - if [ "$rc" != 0 -a "$expectedfail"]; then + if [ "$rc" != 0 -a "$expectedfail" ]; then start_client_load $client echo "Restarted client load: on $client. Checking ..." check_client_load $client @@ -818,12 +1004,13 @@ wait_update () { local RESULT local WAIT=0 local sleep=5 - while [ $WAIT -lt $MAX ]; do + while [ true ]; do RESULT=$(do_node $node "$TEST") if [ "$RESULT" == "$FINAL" ]; then echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'" return 0 fi + [ $WAIT -ge $MAX ] && break echo "Waiting $((MAX - WAIT)) secs for update" WAIT=$((WAIT + sleep)) sleep $sleep @@ -834,7 +1021,7 @@ wait_update () { wait_update_facet () { local facet=$1 - wait_update $(facet_host $facet) $@ + wait_update $(facet_active_host $facet) "$@" } wait_delete_completed () { @@ -932,6 +1119,7 @@ wait_remote_prog () { local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}') [ -z "$pids" ] && return 0 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids" + # FIXME: not portable for pid in $pids; do cat /proc/${pid}/status || true cat /proc/${pid}/wchan || true @@ -992,8 +1180,8 @@ replay_barrier() { do_facet $facet sync df $MOUNT local svc=${facet}_svc - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -1003,8 +1191,8 @@ replay_barrier_nodf() { do_facet $facet sync local svc=${facet}_svc echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -1013,8 +1201,8 @@ replay_barrier_nosync() { local facet=$1 echo running=${running} local svc=${facet}_svc echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL --device %${!svc} notransno + do_facet $facet $LCTL --device %${!svc} readonly do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" $LCTL mark "local REPLAY BARRIER on ${!svc}" } @@ -1147,17 +1335,17 @@ facet_active_host() { change_active() { local facet=$1 - failover=${facet}failover + local failover=${facet}failover host=`facet_host $failover` [ -z "$host" ] && return - curactive=`facet_active $facet` + local curactive=`facet_active $facet` if [ -z "${curactive}" -o "$curactive" == "$failover" ] ; then eval export ${facet}active=$facet else eval export ${facet}active=$failover fi # save the active host for this facet - activevar=${facet}active + local activevar=${facet}active echo "$activevar=${!activevar}" > $TMP/$activevar } @@ -1219,9 +1407,9 @@ do_nodes() { } do_facet() { - facet=$1 + local facet=$1 shift - HOST=`facet_active_host $facet` + local HOST=`facet_active_host $facet` [ -z $HOST ] && echo No host defined for facet ${facet} && exit 1 do_node $HOST "$@" } @@ -1262,14 +1450,11 @@ stopall() { fail mds1 fi - # assume client mount is local - grep " $MOUNT " /proc/mounts && zconf_umount $HOSTNAME $MOUNT $* - grep " $MOUNT2 " /proc/mounts && zconf_umount $HOSTNAME $MOUNT2 $* + local clients=$CLIENTS + [ -z $clients ] && clients=$(hostname) - if [ -n "$CLIENTS" ]; then - zconf_umount_clients $CLIENTS $MOUNT "$*" || true - [ -n "$MOUNT2" ] && zconf_umount_clients $CLIENTS $MOUNT2 "$*" || true - fi + zconf_umount_clients $clients $MOUNT "$*" || true + [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true [ "$CLIENTONLY" ] && return # The add fn does rm ${facet}active file, this would be enough @@ -1403,6 +1588,9 @@ writeconf_all () { } setupall() { + sanity_mount_check || + error "environments are insane!" + load_modules init_gss if [ -z "$CLIENTONLY" ]; then @@ -1469,6 +1657,7 @@ mounted_lustre_filesystems() { } init_facet_vars () { + [ "$CLIENTONLY" ] && return 0 local facet=$1 shift local device=$1 @@ -1488,15 +1677,27 @@ init_facet_vars () { if [ -z "${!varname}" ]; then eval $varname=$(facet_host $facet) fi + + # ${facet}failover_dev is set in cfg file + varname=${facet}failover_dev + if [ -n "${!varname}" ] ; then + eval export ${facet}failover_dev=${!varname} + else + eval export ${facet}failover_dev=$device + fi } init_facets_vars () { local DEVNAME - for num in `seq $MDSCOUNT`; do - DEVNAME=`mdsdevname $num` - init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS - done + if ! remote_mds_nodsh; then + for num in `seq $MDSCOUNT`; do + DEVNAME=`mdsdevname $num` + init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS + done + fi + + remote_ost_nodsh && return for num in `seq $OSTCOUNT`; do DEVNAME=`ostdevname $num` @@ -1504,17 +1705,77 @@ init_facets_vars () { done } +mds_sanity_check () { + local timeout=$1 + local period=0 + + while [ $period -lt $timeout ]; do + count=$(do_facet $SINGLEMDS "lctl dl | grep 'osc.*mdtlov_UUID' | grep ' IN ' 2>/dev/null | wc -l") + if [ $count -eq 0 ]; then + break + fi + + echo "There are $count OST are inactive, wait $period seconds, and try again" + sleep 3 + period=$((period+3)) + done + + [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up" +} + +som_check() { + SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1) + echo $SOM_ENABLED +} + init_param_vars () { - export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) - export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2) - export CLIVER=$(lctl get_param version | cut -d. -f 1,2) + if ! remote_ost_nodsh && ! remote_mds_nodsh; then + export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) + export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2) + export CLIVER=$(lctl get_param version | cut -d. -f 1,2) + fi + + remote_mds_nodsh || + TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") - TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") log "Using TIMEOUT=$TIMEOUT" + + mds_sanity_check $TIMEOUT + + if [ x"$(som_check)" = x"enabled" ]; then + ENABLE_QUOTA="" + fi + if [ "$ENABLE_QUOTA" ]; then + setup_quota $MOUNT || return 2 + fi } check_config () { local mntpt=$1 + + local mounted=$(mount | grep " $mntpt ") + if [ "$CLIENTONLY" ]; then + # bug 18021 + # CLIENTONLY should not depend on *_HOST settings + local mgc=$($LCTL device_list | awk '/MGC/ {print $4}') + # in theory someone could create a new, + # client-only config file that assumed lustre was already + # configured and didn't set the MGSNID. If MGSNID is not set, + # then we should use the mgs nid currently being used + # as the default value. bug 18021 + [[ x$MGSNID = x ]] && + MGSNID=${mgc//MGC/} + + if [[ x$mgc != xMGC$MGSNID ]]; then + if [ "$mgs_HOST" ]; then + local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g") + [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] || + error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc" + fi + fi + return 0 + fi + local myMGS_host=$mgs_HOST if [ "$NETTYPE" = "ptl" ]; then myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//) @@ -1525,10 +1786,12 @@ check_config () { mgshost=$(echo $mgshost | awk -F: '{print $1}') if [ "$mgshost" != "$myMGS_host" ]; then - FAIL_ON_ERROR=true \ - error "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE + error_exit "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE Please use correct config or set mds_HOST correctly!" fi + + sanity_mount_check || + error "environments are insane!" } check_timeout () { @@ -1572,6 +1835,7 @@ cleanup_and_setup_lustre() { check_and_cleanup_lustre() { if [ "`mount | grep $MOUNT`" ]; then [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* + [ "$ENABLE_QUOTA" ] && restore_quota_type || true fi if [ "$I_MOUNTED" = "yes" ]; then cleanupall -f || error "cleanup failed" @@ -1631,6 +1895,16 @@ exclude_items_from_list () { echo $(comma_list $list) } +# list, expand are the comma separated lists +expand_list () { + local list=${1//,/ } + local expand=${2//,/ } + local expanded= + + expanded=$(for i in $list $expand; do echo $i; done | sort -u) + echo $(comma_list $expanded) +} + absolute_path() { (cd `dirname $1`; echo $PWD/`basename $1`) } @@ -1650,19 +1924,9 @@ get_facets () { ################################## # Adaptive Timeouts funcs -at_is_valid() { - if [ -z "$AT_MAX_PATH" ]; then - AT_MAX_PATH=$(do_facet $SINGLEMDS "find /sys/ -name at_max") - [ -z "$AT_MAX_PATH" ] && echo "missing /sys/.../at_max " && return 1 - fi - return 0 -} - at_is_enabled() { - at_is_valid || error "invalid call" - # only check mds, we assume at_max is the same on all nodes - local at_max=$(do_facet $SINGLEMDS "cat $AT_MAX_PATH") + local at_max=$(do_facet $SINGLEMDS "lctl get_param -n at_max") if [ $at_max -eq 0 ]; then return 1 else @@ -1673,13 +1937,11 @@ at_is_enabled() { at_max_get() { local facet=$1 - at_is_valid || error "invalid call" - # suppose that all ost-s has the same at_max set if [ $facet == "ost" ]; then - do_facet ost1 "cat $AT_MAX_PATH" + do_facet ost1 "lctl get_param -n at_max" else - do_facet $facet "cat $AT_MAX_PATH" + do_facet $facet "lctl get_param -n at_max" fi } @@ -1687,20 +1949,19 @@ at_max_set() { local at_max=$1 shift - at_is_valid || error "invalid call" - local facet for facet in $@; do if [ $facet == "ost" ]; then for i in `seq $OSTCOUNT`; do - do_facet ost$i "echo $at_max > $AT_MAX_PATH" + do_facet ost$i "lctl set_param at_max=$at_max" + done elif [ $facet == "mds" ]; then for i in `seq $MDSCOUNT`; do - do_facet mds$i "echo $at_max > $AT_MAX_PATH" + do_facet mds$i "lctl set_param at_max=$at_max" done else - do_facet $facet "echo $at_max > $AT_MAX_PATH" + do_facet $facet "lctl set_param at_max=$at_max" fi done } @@ -1781,12 +2042,7 @@ clear_failloc() { } set_nodes_failloc () { - local nodes=$1 - local node - - for node in $nodes ; do - do_node $node lctl set_param fail_loc=$2 - done + do_nodes $(comma_list $1) lctl set_param fail_loc=$2 } cancel_lru_locks() { @@ -1848,10 +2104,7 @@ error_noexit() { ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) echo "Dumping lctl log to $ERRLOG" # We need to dump the logs on all nodes - local NODES=$(nodes_list) - for NODE in $NODES; do - do_node $NODE $LCTL dk $ERRLOG - done + do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG debugrestore [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG TEST_FAILED=true @@ -1877,9 +2130,10 @@ error_ignore() { } skip () { - log " SKIP: ${TESTSUITE} ${TESTNAME} $@" - [ "$TESTSUITELOG" ] && \ - echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true + echo + log " SKIP: ${TESTSUITE} ${TESTNAME} $@" + [ "$TESTSUITELOG" ] && \ + echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true } build_test_filter() { @@ -1978,7 +2232,7 @@ log() { lsmod | grep lnet > /dev/null || load_modules local MSG="$*" - # Get rif of ' + # Get rid of ' MSG=${MSG//\'/\\\'} MSG=${MSG//\(/\\\(} MSG=${MSG//\)/\\\)} @@ -1987,10 +2241,7 @@ log() { MSG=${MSG//\>/\\\>} MSG=${MSG//\ /dev/null || true - done + do_nodes $(comma_list $(nodes_list)) $LCTL mark "$MSG" 2> /dev/null || true } trace() { @@ -2013,12 +2264,9 @@ check_mds() { } reset_fail_loc () { - local myNODES=$(nodes_list) - local NODE - - for NODE in $myNODES; do - do_node $NODE "lctl set_param fail_loc=0 2>/dev/null || true" - done + echo -n "Resetting fail_loc on all nodes..." + do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true" + echo done. } run_one() { @@ -2031,7 +2279,8 @@ run_one() { umask 0022 local BEFORE=`date +%s` - log "== test $testnum: $message ============ `date +%H:%M:%S` ($BEFORE)" + echo + log "== test $testnum: $message == `date +%H:%M:%S` ($BEFORE)" #check_mds export TESTNAME=test_$testnum TEST_FAILED=false @@ -2131,6 +2380,7 @@ remote_mds () remote_mds_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ] } @@ -2145,6 +2395,7 @@ remote_ost () remote_ost_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } @@ -2234,9 +2485,9 @@ get_random_entry () { rnodes=${rnodes//,/ } - local nodes=($rnodes) + local -a nodes=($rnodes) local num=${#nodes[@]} - local i=$((RANDOM * num / 65536)) + local i=$((RANDOM * num * 2 / 65536)) echo ${nodes[i]} } @@ -2344,6 +2595,19 @@ multiop_bg_pause() { return 0 } +do_and_time () { + local cmd=$1 + local rc + + SECONDS=0 + eval '$cmd' + + [ ${PIPESTATUS[0]} -eq 0 ] || rc=1 + + echo $SECONDS + return $rc +} + inodes_available () { local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk '{print $4}' | sort -un | head -1) || return 1 echo $IFree @@ -2378,6 +2642,7 @@ calc_sum () { } calc_osc_kbytes () { + df $MOUNT > /dev/null $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum } @@ -2398,12 +2663,15 @@ restore_lustre_params() { done } -check_catastrophe () { +check_catastrophe() { local rnodes=${1:-$(comma_list $(remote_nodes_list))} + local C=$CATASTROPHE + [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1 - [ -f $CATASTROPHE ] && [ $(cat $CATASTROPHE) -ne 0 ] && return 1 if [ $rnodes ]; then - do_nodes $rnodes "set -x; [ -f $CATASTROPHE ] && { [ \`cat $CATASTROPHE\` -eq 0 ] || false; } || true" + do_nodes $rnodes "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); +if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi +exit \\\$rc;" fi } @@ -2432,13 +2700,14 @@ get_mds_dir () { local file=$dir/f0.get_mds_dir_tmpfile rm -f $file + sleep 1 local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local oldused=($iused) + local -a oldused=($iused) touch $file sleep 1 iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local newused=($iused) + local -a newused=($iused) local num=0 for ((i=0; i<${#newused[@]}; i++)); do @@ -2451,22 +2720,96 @@ get_mds_dir () { error "mdt-s : inodes count OLD ${oldused[@]} NEW ${newused[@]}" } -mpi_run () { - local mpirun="$MPIRUN $MPIRUN_OPTIONS" - local command="$mpirun $@" +mdsrate_cleanup () { + mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 +} - if [ "$MPI_USER" != root -a $mpirun ]; then - echo "+ chmod 0777 $MOUNT" - chmod 0777 $MOUNT - command="su $MPI_USER sh -c \"$command \"" - fi +delayed_recovery_enabled () { + local var=${SINGLEMDS}_svc + do_facet $SINGLEMDS lctl get_param -n mdd.${!var}.stale_export_age > /dev/null 2>&1 +} - ls -ald $MOUNT - echo "+ $command" - eval $command +######################## +convert_facet2name() { + case "$1" in + "ost" ) echo "OST0000" ;; + "ost1") echo "OST0000" ;; + "ost2") echo "OST0001" ;; + "ost3") echo "OST0002" ;; + "ost4") echo "OST0003" ;; + "ost5") echo "OST0004" ;; + *) error "unknown facet!" ;; + esac } -mdsrate_cleanup () { - mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 +get_clientosc_proc_path() { + local ost=$1 + + echo "{$1}-osc-*" +} + +get_lustre_version () { + local node=${1:-"mds"} + do_facet $node $LCTL get_param -n version | awk '/^lustre:/ {print $2}' } +get_mds_version_major () { + local version=$(get_lustre_version mds) + echo $version | awk -F. '{print $1}' +} + +get_mds_version_minor () { + local version=$(get_lustre_version mds) + echo $version | awk -F. '{print $2}' +} + +get_mdtosc_proc_path() { + local ost=$1 + local major=$(get_mds_version_major) + local minor=$(get_mds_version_minor) + if [ $major -le 1 -a $minor -le 8 ] ; then + echo "${ost}-osc" + else + echo "${ost}-osc-MDT0000" + fi +} + +get_osc_import_name() { + local node=$1 + local ost=$2 + local name=$(convert_facet2name $ost) + + if [ "$node" == "mds" ]; then + get_mdtosc_proc_path $name + return 0 + fi + + get_clientosc_proc_path $name + return 0 +} + +wait_osc_import_state() { + local node=$1 + local ost_facet=$2 + local expected=$3 + local ost=$(get_osc_import_name $node $ost_facet) + local CONN_PROC + local CONN_STATE + local i=0 + + CONN_PROC="osc.${FSNAME}-${ost}.ost_server_uuid" + CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) + while [ "${CONN_STATE}" != "${expected}" ]; do + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" -a "${expected}" == "DISCONN" ] && return 0 + # disconnect rpc should be wait not more obd_timeout + [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ + error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1 + sleep 1 + CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) + i=$(($i + 1)) + done + + log "${ost_facet} now in ${CONN_STATE} state" + return 0 +}