X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=cab077632fe8c8822d4bb492e6f5aebb80497fa6;hb=0a714ba01bbcdb43fa2d07e88652be2b8fb1c52f;hp=06d70cc99e193966cf953d6850aa3e24e0b9d175;hpb=54721a3902c853dea73bde97a2c93a9618a67018;p=fs%2Flustre-release.git diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 06d70cc..cab0776 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -42,6 +42,7 @@ usage() { } print_summary () { + trap 0 [ "$TESTSUITE" == "lfscktest" ] && return 0 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to ${ONLY}." local form="%-13s %-17s %s\n" @@ -63,8 +64,16 @@ print_summary () { done for O in $TESTSUITE_LIST; do - [ "${!O}" = "no" ] && \ - printf "$form" "Skipped" "$O" "" + if [ "${!O}" = "no" ]; then + # FIXME. + # only for those tests suits which are run directly from acc-sm script: + # bonnie, iozone, etc. + if [ -f "$TESTSUITELOG" ] && grep FAIL $TESTSUITELOG | grep -q ' '$O ; then + printf "$form" "UNFINISHED" "$O" "" + else + printf "$form" "Skipped" "$O" "" + fi + fi done for O in $TESTSUITE_LIST; do @@ -77,6 +86,7 @@ init_test_env() { export LUSTRE=`absolute_path $LUSTRE` export TESTSUITE=`basename $0 .sh` export TEST_FAILED=false + export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} export MKE2FS=${MKE2FS:-mke2fs} export DEBUGFS=${DEBUGFS:-debugfs} @@ -123,6 +133,8 @@ init_test_env() { export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre) export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " + export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod} + [ ! -f "$LUSTRE_RMMOD" ] && export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null) export FSTYPE=${FSTYPE:-"ldiskfs"} export NAME=${NAME:-local} export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} @@ -157,11 +169,16 @@ init_test_env() { IDENTITY_UPCALL=false ;; esac + export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} # Paths on remote nodes, if different export RLUSTRE=${RLUSTRE:-$LUSTRE} export RPWD=${RPWD:-$PWD} export I_MOUNTED=${I_MOUNTED:-"no"} + if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mds.ko -a \ + ! -f `dirname $0`/../mds/mds.ko ]; then + export CLIENTMODSONLY=yes + fi # command line @@ -180,7 +197,6 @@ init_test_env() { [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true rm -f $TMP/*active - } case `uname -r` in @@ -207,7 +223,7 @@ load_module() { fi } -load_modules() { +load_modules_local() { if [ -n "$MODPROBE" ]; then # use modprobe return 0 @@ -245,7 +261,7 @@ load_modules() { load_module osc/osc load_module lov/lov load_module mgc/mgc - if [ -z "$CLIENTONLY" ] && [ -z "$CLIENTMODSONLY" ]; then + if ! client_only; then grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; } grep -q jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; } [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs @@ -271,44 +287,16 @@ load_modules() { [ -f $LUSTRE/utils/mount.lustre ] && cp $LUSTRE/utils/mount.lustre /sbin/. || true } -RMMOD=rmmod -if [ `uname -r | cut -c 3` -eq 4 ]; then - RMMOD="modprobe -r" -fi - -wait_for_lnet() { - local UNLOADED=0 - local WAIT=0 - local MAX=60 - MODULES=$($LCTL modules | awk '{ print $2 }') - while [ -n "$MODULES" ]; do - sleep 5 - $RMMOD $MODULES > /dev/null 2>&1 || true - MODULES=$($LCTL modules | awk '{ print $2 }') - if [ -z "$MODULES" ]; then - return 0 - else - WAIT=$((WAIT + 5)) - echo "waiting, $((MAX - WAIT)) secs left" - fi - if [ $WAIT -eq $MAX ]; then - echo "LNET modules $MODULES will not unload" - lsmod - return 3 - fi - done -} - -unload_dep_module() { - #lsmod output - #libcfs 107852 17 llite_lloop,lustre,obdfilter,ost,... - local MODULE=$1 - local DEPS=$(lsmod | awk '($1 == "'$MODULE'") { print $4 }' | tr ',' ' ') - for SUBMOD in $DEPS; do - unload_dep_module $SUBMOD - done - [ "$MODULE" = "libcfs" ] && $LCTL dk $TMP/debug || true - $RMMOD $MODULE || true +load_modules () { + load_modules_local + # bug 19124 + # load modules on remote nodes optionally + # lustre-tests have to be installed on these nodes + if $LOAD_MODULES_REMOTE ; then + local list=$(comma_list $(remote_nodes_list)) + echo loading modules on $list + do_rpc_nodes $list load_modules + fi } check_mem_leak () { @@ -327,25 +315,15 @@ check_mem_leak () { unload_modules() { wait_exit_ST client # bug 12845 - lsmod | grep libcfs > /dev/null && $LCTL dl - unload_dep_module $FSTYPE - unload_dep_module libcfs - - local MODULES=$($LCTL modules | awk '{ print $2 }') - if [ -n "$MODULES" ]; then - echo "Modules still loaded: " - echo $MODULES - if [ "$(lctl dl)" ]; then - echo "Lustre still loaded" - lctl dl || true - lsmod - return 2 - else - echo "Lustre stopped but LNET is still loaded, waiting..." - wait_for_lnet || return 3 - fi + $LUSTRE_RMMOD $FSTYPE || return 2 + if $LOAD_MODULES_REMOTE ; then + local list=$(comma_list $(remote_nodes_list)) + echo unloading modules on $list + do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE + do_rpc_nodes $list check_mem_leak fi + HAVE_MODULES=false check_mem_leak || return 254 @@ -521,16 +499,20 @@ stop() { # add an additional parameter if mountpoint is ever different from $MOUNT quota_save_version() { local fsname=${2:-$FSNAME} + local spec=$1 + local ver=$(tr -c -d "123" <<< $spec) + local type=$(tr -c -d "ug" <<< $spec) + + [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier" - $LFS quotaoff -ug $MOUNT # just in case - [ -n "$1" ] && { $LFS quotacheck -$1 $MOUNT || error "quotacheck has failed"; } + [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; } - do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$1" + do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec" local varsvc local osts=$(get_facets OST) for ost in ${osts//,/ }; do varsvc=${ost}_svc - do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$1" + do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec" done } @@ -563,6 +545,7 @@ setup_quota(){ # Suppose that quota type the same on mds and ost local quota_type=$(quota_type | grep MDT | cut -d "=" -f2) [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!" + echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]" if [ "$quota_type" != "$QUOTA_TYPE" ]; then export old_QUOTA_TYPE=$quota_type quota_save_version $QUOTA_TYPE @@ -584,7 +567,7 @@ setup_quota(){ local cmd for usr in $quota_usrs; do - echo "Setting up quota on $client:$mntpt for $usr..." + echo "Setting up quota on $HOSTNAME:$mntpt for $usr..." for type in u g; do cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt" echo "+ $cmd" @@ -660,7 +643,7 @@ sanity_mount_check_nodes () { local rc=0 for mnt in $mnts ; do - do_nodes $nodes "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts); + do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts); mpts=\\\$(mount | grep -w -c $mnt); if [ \\\$running -ne \\\$mpts ]; then echo \\\$(hostname) env are INSANE!; @@ -672,10 +655,12 @@ fi" } sanity_mount_check_servers () { + [ "$CLIENTONLY" ] && + { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true echo Checking servers environments # FIXME: modify get_facets to display all facets wo params - local facets="$(get_facets OST),$(get_facets MDS)" + local facets="$(get_facets OST),$(get_facets MDS),mgs" local node local mnt local facet @@ -723,15 +708,15 @@ zconf_mount_clients() { echo "Starting client $clients: $OPTIONS $device $mnt" - do_nodes $clients "set -x; + do_nodes $clients " running=\\\$(mount | grep -c $mnt' '); rc=0; if [ \\\$running -eq 0 ] ; then mkdir -p $mnt; mount -t lustre $OPTIONS $device $mnt; - rc=$?; + rc=\\\$?; fi; -exit $rc" +exit \\\$rc" || return ${PIPESTATUS[0]} echo "Started clients $clients: " do_nodes $clients "mount | grep -w $mnt" @@ -751,7 +736,7 @@ zconf_umount_clients() { [ "$3" ] && force=-f echo "Stopping clients: $clients $mnt (opts:$force)" - do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts); + do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts); if [ \\\$running -ne 0 ] ; then echo Stopping client \\\$(hostname) client $mnt opts:$force; lsof -t $mnt || need_kill=no; @@ -817,6 +802,7 @@ boot_node() { local node=$1 if [ "$FAILURE_MODE" = HARD ]; then $POWER_UP $node + wait_for_host $node fi } @@ -826,10 +812,14 @@ check_progs_installed () { shift local progs=$@ - do_nodes $clients "set -x ; PATH=:$PATH status=true; for prog in $progs; do - which \\\$prog || { echo \\\$prog missing on \\\$(hostname) && status=false; } - done; - eval \\\$status" + do_nodes $clients "PATH=:$PATH; status=true; +for prog in $progs; do + if ! [ \\\"\\\$(which \\\$prog)\\\" -o \\\"\\\${!prog}\\\" ]; then + echo \\\$prog missing on \\\$(hostname); + status=false; + fi +done; +eval \\\$status" } client_var_name() { @@ -872,23 +862,46 @@ check_client_load () { local TESTLOAD=run_${!var}.sh ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1 - - check_catastrophe $client || return 2 - - # see if the load is still on the client + + # bug 18914: try to connect several times not only when + # check ps, but while check_catastrophe also local tries=3 local RC=254 while [ $RC = 254 -a $tries -gt 0 ]; do let tries=$tries-1 # assume success RC=0 + if ! check_catastrophe $client; then + RC=${PIPESTATUS[0]} + if [ $RC -eq 254 ]; then + # FIXME: not sure how long we shuold sleep here + sleep 10 + continue + fi + echo "check catastrophe failed: RC=$RC " + return $RC + fi + done + # We can continue try to connect if RC=254 + # Just print the warning about this + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking catastrophe on $client" + fi + + # see if the load is still on the client + tries=3 + RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then RC=${PIPESTATUS[0]} sleep 30 fi done if [ $RC = 254 ]; then - echo "got a return status of $RC from do_node while checking (i.e. with 'ps') the client load on the remote system" + echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client" # see if we can diagnose a bit why this is fi @@ -919,9 +932,10 @@ restart_client_loads () { check_client_load $client rc=${PIPESTATUS[0]} if [ "$rc" != 0 -a "$expectedfail" ]; then - start_client_load $client - echo "Restarted client load: on $client. Checking ..." - check_client_load $client + local var=$(client_var_name $client)_load + start_client_load $client ${!var} + echo "Restarted client load ${!var}: on $client. Checking ..." + check_client_load $client rc=${PIPESTATUS[0]} if [ "$rc" != 0 ]; then log "Client load failed to restart on node $client, rc=$rc" @@ -1010,7 +1024,7 @@ wait_delete_completed () { wait_for_host() { local host=$1 check_network "$host" 900 - while ! do_node $host "ls -d $LUSTRE " > /dev/null; do sleep 5; done + while ! do_node $host hostname > /dev/null; do sleep 5; done } wait_for() { @@ -1126,9 +1140,6 @@ facet_failover() { shutdown_facet $facet [ -n "$sleep_time" ] && sleep $sleep_time reboot_facet $facet - client_df & - DFPID=$! - echo "df pid is $DFPID" change_active $facet local TO=`facet_active_host $facet` echo "Failover $facet to $TO" @@ -1184,7 +1195,7 @@ ost_evict_client() { fail() { facet_failover $* || error "failover: $?" - df $MOUNT || error "post-failover df: $?" + client_df || error "post-failover df: $?" } fail_nodf() { @@ -1197,9 +1208,9 @@ fail_abort() { stop $facet change_active $facet mount_facet $facet -o abort_recovery - df $MOUNT || echo "first df failed: $?" + client_df || echo "first df failed: $?" sleep 1 - df $MOUNT || error "post-failover df: $?" + client_df || error "post-failover df: $?" } do_lmc() { @@ -1315,7 +1326,14 @@ change_active() { } do_node() { - HOST=$1 + local verbose=false + # do not stripe off hostname if verbose, bug 19215 + if [ x$1 = x--verbose ]; then + shift + verbose=true + fi + + local HOST=$1 shift local myPDSH=$PDSH if [ "$HOST" = "$HOSTNAME" ]; then @@ -1339,7 +1357,17 @@ do_node() { [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true return 0 fi - $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //" + + if $verbose ; then + # print HOSTNAME for myPDSH="no_dsh" + if [[ $myPDSH = no_dsh ]]; then + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /" + else + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + fi + else + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //" + fi return ${PIPESTATUS[0]} } @@ -1348,11 +1376,22 @@ single_local_node () { } do_nodes() { + local verbose=false + # do not stripe off hostname if verbose, bug 19215 + if [ x$1 = x--verbose ]; then + shift + verbose=true + fi + local rnodes=$1 shift if $(single_local_node $rnodes); then - do_node $rnodes $@ + if $verbose; then + do_node --verbose $rnodes $@ + else + do_node $rnodes $@ + fi return $? fi @@ -1367,7 +1406,11 @@ do_nodes() { $myPDSH $rnodes $LCTL mark "$@" > /dev/null 2>&1 || : fi - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g" + if $verbose ; then + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + else + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g" + fi return ${PIPESTATUS[0]} } @@ -1439,6 +1482,8 @@ stopall() { } cleanupall() { + nfs_client_mode && return + stopall $* unload_modules cleanup_gss @@ -1468,7 +1513,11 @@ formatall() { # We need ldiskfs here, may as well load them all load_modules [ "$CLIENTONLY" ] && return - echo "Formatting mdts, osts" + echo Formatting mgs, mds, osts + if [[ $MDSDEV1 != $MGSDEV ]] || [[ $mds1_HOST != $mgs_HOST ]]; then + add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10 + fi + for num in `seq $MDSCOUNT`; do echo "Format mds$num: $(mdsdevname $num)" if $VERBOSE; then @@ -1553,15 +1602,21 @@ writeconf_all () { } setupall() { + nfs_client_mode && return + sanity_mount_check || error "environments are insane!" load_modules init_gss if [ -z "$CLIENTONLY" ]; then - echo "Setup mdts, osts" + echo Setup mgs, mdt, osts echo $WRITECONF | grep -q "writeconf" && \ writeconf_all + if [[ $mds1_HOST != $mgs_HOST ]] || [[ $MDSDEV1 != $MGSDEV ]]; then + start mgs $MGSDEV $mgs_MOUNT_OPTS + fi + for num in `seq $MDSCOUNT`; do DEVNAME=$(mdsdevname $num) start mds$num $DEVNAME $MDS_MOUNT_OPTS @@ -1622,6 +1677,7 @@ mounted_lustre_filesystems() { } init_facet_vars () { + [ "$CLIENTONLY" ] && return 0 local facet=$1 shift local device=$1 @@ -1669,12 +1725,14 @@ init_facets_vars () { done } -mds_sanity_check () { - local timeout=$1 +osc_ensure_active () { + local facet=$1 + local type=$2 + local timeout=$3 local period=0 while [ $period -lt $timeout ]; do - count=$(do_facet $SINGLEMDS "lctl dl | grep 'osc.*mdtlov_UUID' | grep ' IN ' 2>/dev/null | wc -l") + count=$(do_facet $facet "lctl dl | grep '${FSNAME}-OST.*-osc-${type}' | grep ' IN ' 2>/dev/null | wc -l") if [ $count -eq 0 ]; then break fi @@ -1687,6 +1745,11 @@ mds_sanity_check () { [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up" } +som_check() { + SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1) + echo $SOM_ENABLED +} + init_param_vars () { if ! remote_ost_nodsh && ! remote_mds_nodsh; then export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) @@ -1699,15 +1762,60 @@ init_param_vars () { log "Using TIMEOUT=$TIMEOUT" - mds_sanity_check $TIMEOUT + osc_ensure_active $SINGLEMDS M $TIMEOUT + osc_ensure_active client c $TIMEOUT + if [ x"$(som_check)" = x"enabled" ]; then + ENABLE_QUOTA="" + fi if [ "$ENABLE_QUOTA" ]; then setup_quota $MOUNT || return 2 fi } +nfs_client_mode () { + if [ "$NFSCLIENT" ]; then + echo "NFSCLIENT mode: setup, cleanup, check config skipped" + local clients=$CLIENTS + [ -z $clients ] && clients=$(hostname) + + # FIXME: remove hostname when 19215 fixed + do_nodes $clients "echo \\\$(hostname); grep ' '$MOUNT' ' /proc/mounts" + declare -a nfsexport=(`grep ' '$MOUNT' ' /proc/mounts | awk '{print $1}' | awk -F: '{print $1 " " $2}'`) + do_nodes ${nfsexport[0]} "echo \\\$(hostname); df -T ${nfsexport[1]}" + return + fi + return 1 +} + check_config () { + nfs_client_mode && return + local mntpt=$1 + + local mounted=$(mount | grep " $mntpt ") + if [ "$CLIENTONLY" ]; then + # bug 18021 + # CLIENTONLY should not depend on *_HOST settings + local mgc=$($LCTL device_list | awk '/MGC/ {print $4}') + # in theory someone could create a new, + # client-only config file that assumed lustre was already + # configured and didn't set the MGSNID. If MGSNID is not set, + # then we should use the mgs nid currently being used + # as the default value. bug 18021 + [[ x$MGSNID = x ]] && + MGSNID=${mgc//MGC/} + + if [[ x$mgc != xMGC$MGSNID ]]; then + if [ "$mgs_HOST" ]; then + local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g") + [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] || + error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc" + fi + fi + return 0 + fi + local myMGS_host=$mgs_HOST if [ "$NETTYPE" = "ptl" ]; then myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//) @@ -1717,10 +1825,10 @@ check_config () { local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}') mgshost=$(echo $mgshost | awk -F: '{print $1}') - if [ "$mgshost" != "$myMGS_host" ]; then - log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE - Please use correct config or set mds_HOST correctly!" - fi +# if [ "$mgshost" != "$myMGS_host" ]; then +# error_exit "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE +# Please use correct config or set mds_HOST correctly!" +# fi sanity_mount_check || error "environments are insane!" @@ -1736,6 +1844,8 @@ check_timeout () { } check_and_setup_lustre() { + nfs_client_mode && return + local MOUNTED=$(mounted_lustre_filesystems) if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then [ "$REFORMAT" ] && formatall @@ -1747,6 +1857,11 @@ check_and_setup_lustre() { check_config $MOUNT init_facets_vars init_param_vars + + do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=\\\"$PTLDEBUG\\\"; + lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; + lctl set_param debug_mb=${DEBUG_SIZE}; + sync" fi if [ "$ONLY" == "setup" ]; then exit 0 @@ -1783,8 +1898,7 @@ check_network() { local WAIT=0 local MAX=$2 while [ $NETWORK -eq 0 ]; do - ping -c 1 -w 3 $1 > /dev/null - if [ $? -eq 0 ]; then + if ping -c 1 -w 3 $1 > /dev/null; then NETWORK=1 else WAIT=$((WAIT + 5)) @@ -1837,6 +1951,26 @@ expand_list () { echo $(comma_list $expanded) } +testslist_filter () { + local script=$LUSTRE/tests/${TESTSUITE}.sh + + [ -f $script ] || return 0 + + local start_at=$START_AT + local stop_at=$STOP_AT + + local var=${TESTSUITE}_START_AT + [ x"${!var}" != x ] && start_at=${!var} + var=${TESTSUITE}_STOP_AT + [ x"${!var}" != x ] && stop_at=${!var} + + sed -n 's/^test_\([^ (]*\).*/\1/p' $script | \ + awk ' BEGIN { if ("'${start_at:-0}'" != 0) flag = 1 } + /^'${start_at}'$/ {flag = 0} + {if (flag == 1) print $0} + /^'${stop_at}'$/ { flag = 1 }' +} + absolute_path() { (cd `dirname $1`; echo $PWD/`basename $1`) } @@ -1974,12 +2108,7 @@ clear_failloc() { } set_nodes_failloc () { - local nodes=$1 - local node - - for node in $nodes ; do - do_node $node lctl set_param fail_loc=$2 - done + do_nodes $(comma_list $1) lctl set_param fail_loc=$2 } cancel_lru_locks() { @@ -2017,6 +2146,10 @@ pgcache_empty() { return 1 fi done + if [[ $MDSDEV1 != $MGSDEV ]]; then + stop mgs + fi + return 0 } @@ -2025,10 +2158,37 @@ debugsave() { } debugrestore() { - [ -n "$DEBUGSAVE" ] && lctl set_param debug="${DEBUGSAVE}" + [ -n "$DEBUGSAVE" ] && \ + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=\\\"${DEBUGSAVE}\\\";" DEBUGSAVE="" } +debug_size_save() { + DEBUG_SIZE_SAVED="$(lctl get_param -n debug_mb)" +} + +debug_size_restore() { + [ -n "$DEBUG_SIZE_SAVED" ] && \ + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE_SAVED" + DEBUG_SIZE_SAVED="" +} + +start_full_debug_logging() { + debugsave + debug_size_save + + local FULLDEBUG=-1 + local DEBUG_SIZE=150 + + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug_mb=$DEBUG_SIZE" + do_nodes $(comma_list $(nodes_list)) "$LCTL set_param debug=$FULLDEBUG;" +} + +stop_full_debug_logging() { + debug_size_restore + debugrestore +} + ################################## # Test interface ################################## @@ -2037,14 +2197,22 @@ error_noexit() { local TYPE=${TYPE:-"FAIL"} local ERRLOG lctl set_param fail_loc=0 2>/dev/null || true + + local dump=true + # do not dump logs if $1=false + if [ "x$1" = "xfalse" ]; then + shift + dump=false + fi + log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " - ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) - echo "Dumping lctl log to $ERRLOG" - # We need to dump the logs on all nodes - local NODES=${NODES:-$(nodes_list)} - for NODE in $NODES; do - do_node $NODE $LCTL dk $ERRLOG - done + + if $dump; then + ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) + echo "Dumping lctl log to $ERRLOG" + # We need to dump the logs on all nodes + do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG + fi debugrestore [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG TEST_FAILED=true @@ -2069,6 +2237,10 @@ error_ignore() { error_noexit "$@" } +skip_env () { + $FAIL_ON_SKIP_ENV && error false $@ || skip $@ +} + skip () { echo log " SKIP: ${TESTSUITE} ${TESTNAME} $@" @@ -2077,6 +2249,8 @@ skip () { } build_test_filter() { + EXCEPT="$EXCEPT $(testslist_filter)" + [ "$ONLY" ] && log "only running test `echo $ONLY`" for O in $ONLY; do eval ONLY_${O}=true @@ -2096,12 +2270,8 @@ build_test_filter() { done } -_basetest() { - echo $* -} - basetest() { - IFS=abcdefghijklmnopqrstuvwxyz _basetest $1 + echo ${1%%[a-z]*} } # print a newline if the last test was skipped @@ -2181,10 +2351,7 @@ log() { MSG=${MSG//\>/\\\>} MSG=${MSG//\ /dev/null || true - done + do_nodes $(comma_list $(nodes_list)) $LCTL mark "$MSG" 2> /dev/null || true } trace() { @@ -2207,13 +2374,8 @@ check_mds() { } reset_fail_loc () { - local myNODES=$(nodes_list) - local NODE - echo -n "Resetting fail_loc on all nodes..." - for NODE in $myNODES; do - do_node $NODE "lctl set_param -n fail_loc=0 2>/dev/null || true" - done + do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true" echo done. } @@ -2328,6 +2490,7 @@ remote_mds () remote_mds_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ] } @@ -2342,6 +2505,7 @@ remote_ost () remote_ost_nodsh() { + [ "$CLIENTONLY" ] && return 0 || true remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } @@ -2438,6 +2602,10 @@ get_random_entry () { echo ${nodes[i]} } +client_only () { + [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ] +} + is_patchless () { lctl get_param version | grep -q patchless @@ -2512,6 +2680,35 @@ check_runas_id() { add user $myRUNAS_UID:$myRUNAS_GID on these nodes." } +# obtain the UID/GID for MPI_USER +get_mpiuser_id() { + local mpi_user=$1 + + MPI_USER_UID=$(do_facet client "getent passwd $mpi_user | cut -d: -f3; +exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the UID for $mpi_user" + + MPI_USER_GID=$(do_facet client "getent passwd $mpi_user | cut -d: -f4; +exit \\\${PIPESTATUS[0]}") || error_exit "failed to get the GID for $mpi_user" +} + +# obtain and cache Kerberos ticket-granting ticket +refresh_krb5_tgt() { + local myRUNAS_UID=$1 + local myRUNAS_GID=$2 + shift 2 + local myRUNAS=$@ + if [ -z "$myRUNAS" ]; then + error_exit "myRUNAS command must be specified for refresh_krb5_tgt" + fi + + CLIENTS=${CLIENTS:-$HOSTNAME} + do_nodes $CLIENTS "set -x +if ! $myRUNAS krb5_login.sh; then + echo "Failed to refresh Krb5 TGT for UID/GID $myRUNAS_UID/$myRUNAS_GID." + exit 1 +fi" +} + # Run multiop in the background, but wait for it to print # "PAUSING" to its stdout before returning from this function. multiop_bg_pause() { @@ -2596,7 +2793,7 @@ calc_osc_kbytes () { # generate a stream of formatted strings ( =) save_lustre_params() { local s - do_node $1 "lctl get_param $2" | while read s; do echo "$1 $s"; done + do_nodes --verbose $1 "lctl get_param $2 | while read s; do echo \\\$s; done" } # restore lustre parameters from input stream, produces by save_lustre_params @@ -2605,7 +2802,7 @@ restore_lustre_params() { local name local val while IFS=" =" read node name val; do - do_node $node "lctl set_param -n $name $val" + do_node ${node//:/} "lctl set_param -n $name $val" done } @@ -2636,7 +2833,7 @@ get_stripe_info() { stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file` stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file` - stripe_index=`awk '/obdidx/ {start = 1; getline; print $1; exit}' $tmp_file` + stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file` rm -f $tmp_file } @@ -2648,12 +2845,12 @@ get_mds_dir () { rm -f $file sleep 1 local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local oldused=($iused) + local -a oldused=($iused) touch $file sleep 1 iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') - local newused=($iused) + local -a newused=($iused) local num=0 for ((i=0; i<${#newused[@]}; i++)); do @@ -2666,30 +2863,6 @@ get_mds_dir () { error "mdt-s : inodes count OLD ${oldused[@]} NEW ${newused[@]}" } -mpi_run () { - local mpirun="$MPIRUN $MPIRUN_OPTIONS" - local command="$mpirun $@" - local mpilog=$TMP/mpi.log - local rc - - if [ "$MPI_USER" != root -a $mpirun ]; then - echo "+ chmod 0777 $MOUNT" - chmod 0777 $MOUNT - command="su $MPI_USER sh -c \"$command \"" - fi - - ls -ald $MOUNT - echo "+ $command" - eval $command 2>&1 > $mpilog || true - - rc=${PIPESTATUS[0]} - if [ $rc -eq 0 ] && grep -q "p4_error: : [^0]" $mpilog ; then - rc=1 - fi - cat $mpilog - return $rc -} - mdsrate_cleanup () { mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 } @@ -2700,16 +2873,21 @@ delayed_recovery_enabled () { } ######################## -convert_facet2name() { - case "$1" in - "ost" ) echo "OST0000" ;; - "ost1") echo "OST0000" ;; - "ost2") echo "OST0001" ;; - "ost3") echo "OST0002" ;; - "ost4") echo "OST0003" ;; - "ost5") echo "OST0004" ;; - *) error "unknown facet!" ;; - esac + +convert_facet2label() { + local facet=$1 + + if [ x$facet = xost ]; then + facet=ost1 + fi + + local varsvc=${facet}_svc + + if [ -n ${!varsvc} ]; then + echo ${!varsvc} + else + error "No lablel for $facet!" + fi } get_clientosc_proc_path() { @@ -2745,41 +2923,164 @@ get_mdtosc_proc_path() { } get_osc_import_name() { - local node=$1 + local facet=$1 local ost=$2 - local name=$(convert_facet2name $ost) + local label=$(convert_facet2label $ost) - if [ "$node" == "mds" ]; then - get_mdtosc_proc_path $name + if [ "$facet" == "mds" ]; then + get_mdtosc_proc_path $label return 0 fi - get_clientosc_proc_path $name + get_clientosc_proc_path $label + return 0 +} + +wait_import_state () { + local expected=$1 + local CONN_PROC=$2 + local CONN_STATE + local i=0 + + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + while [ "${CONN_STATE}" != "${expected}" ]; do + if [ "${expected}" == "DISCONN" ]; then + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" ] && return 0 + # with AT we can have connect request timeout ~ reconnect timeout + # and test can't see real disconnect + [ "${CONN_STATE}" == "CONNECTING" ] && return 0 + fi + # disconnect rpc should be wait not more obd_timeout + [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ + error "can't put import for $CONN_PROC into ${expected} state" && return 1 + sleep 1 + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + i=$(($i + 1)) + done + + log "$CONN_PROC now in ${CONN_STATE} state" return 0 } wait_osc_import_state() { - local node=$1 + local facet=$1 local ost_facet=$2 local expected=$3 - local ost=$(get_osc_import_name $node $ost_facet) + local ost=$(get_osc_import_name $facet $ost_facet) local CONN_PROC local CONN_STATE local i=0 - CONN_PROC="osc.${FSNAME}-${ost}.ost_server_uuid" - CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC | cut -f2) + CONN_PROC="osc.${ost}.ost_server_uuid" + CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) while [ "${CONN_STATE}" != "${expected}" ]; do - # for disconn we can check after proc entry is removed - [ "x${CONN_STATE}" == "x" -a "${expected}" == "DISCONN" ] && return 0 + if [ "${expected}" == "DISCONN" ]; then + # for disconn we can check after proc entry is removed + [ "x${CONN_STATE}" == "x" ] && return 0 + # with AT we can have connect request timeout ~ reconnect timeout + # and test can't see real disconnect + [ "${CONN_STATE}" == "CONNECTING" ] && return 0 + fi # disconnect rpc should be wait not more obd_timeout [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1 sleep 1 - CONN_STATE=$(do_facet $node lctl get_param -n $CONN_PROC | cut -f2) + CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) i=$(($i + 1)) done log "${ost_facet} now in ${CONN_STATE} state" return 0 } + +get_clientmdc_proc_path() { + echo "${1}-mdc-*" +} + +do_rpc_nodes () { + local list=$1 + shift + + do_nodes --verbose $list "PATH=$LUSTRE/tests/:$PATH sh rpc.sh $@ " +} + +wait_clients_import_state () { + local list=$1 + local facet=$2 + local expected=$3 + shift + + local label=$(convert_facet2label $facet) + local proc_path + case $facet in + ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;; + mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;; + *) error "unknown facet!" ;; + esac + + + if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then + error "import is not in ${expected} state" + return 1 + fi +} + +oos_full() { + local -a AVAILA + local -a GRANTA + local OSCFULL=1 + AVAILA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param obdfilter.*.kbytesavail)) + GRANTA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param -n obdfilter.*.tot_granted)) + for ((i=0; i<${#AVAILA[@]}; i++)); do + local -a AVAIL1=(${AVAILA[$i]//=/ }) + GRANT=$((${GRANTA[$i]}/1024)) + echo -n $(echo ${AVAIL1[0]} | cut -d"." -f2) avl=${AVAIL1[1]} grnt=$GRANT diff=$((AVAIL1[1] - GRANT)) + [ $((AVAIL1[1] - GRANT)) -lt 400 ] && OSCFULL=0 && echo " FULL" || echo + done + return $OSCFULL +} + +gather_logs () { + local list=$1 + + local ts=$(date +%s) + + # bug 20237, comment 11 + # It would also be useful to provide the option + # of writing the file to an NFS directory so it doesn't need to be copied. + local tmp=$TMP + local docp=true + [ -d "$SHARED_DIR_LOGS" ] && tmp=$SHARED_DIR_LOGS && docp=false + + # dump lustre logs, dmesg + do_nodes $list "log=$tmp/\\\$(hostname)-debug-$ts.log ; +lctl dk \\\$log >/dev/null; +log=$tmp/\\\$(hostname)-dmesg-$ts.log; +dmesg > \\\$log; " + + # FIXME: does it make sense to collect the logs for $ts only, but all + # TESTSUITE logs? + # rsync $TMP/*${TESTSUITE}* to gather the logs dumped by error fn + local logs=$TMP/'*'${TESTSUITE}'*' + if $docp; then + logs=$logs' '$tmp/'*'$ts'*' + fi + for node in ${list//,/ }; do + rsync -az $node:"$logs" $TMP + done + + local archive=$TMP/${TESTSUITE}-$ts.tar.bz2 + tar -jcf $archive $tmp/*$ts* $TMP/*${TESTSUITE}* + + echo $archive +} + +cleanup_logs () { + local list=${1:-$(comma_list $(nodes_list))} + + [ -n ${TESTSUITE} ] && do_nodes $list "rm -f $TMP/*${TESTSUITE}*" || true +} +