X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=0165fee28aed461a7d871aec1e5294be6b2812c8;hp=61e909ecfa26d2bc7a928e95e8fe9f372d927340;hb=8f6d85eebc312b64d8e8a35b0be3ae137a50a45c;hpb=82a1b0453c8dccf63d23f3c96f1b2355524b952c diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 61e909e..0165fee 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -15,12 +15,36 @@ export GSS=false export GSS_KRB5=false export GSS_PIPEFS=false export IDENTITY_UPCALL=default +export QUOTA_AUTO=1 #export PDSH="pdsh -S -Rssh -w" # function used by scripts run on remote nodes LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} . $LUSTRE/tests/functions.sh +. $LUSTRE/tests/yaml.sh + +LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg} + +EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh} + +if [ -f "$EXCEPT_LIST_FILE" ]; then + echo "Reading test skip list from $EXCEPT_LIST_FILE" + cat $EXCEPT_LIST_FILE + . $EXCEPT_LIST_FILE +fi + +if [[ -x $LUSTRE/utils/llobdstat ]]; then + LLOBDSTAT=$LUSTRE/utils/llobdstat +elif [[ -x /usr/bin/llobdstat ]]; then + LLOBDSTAT=/usr/bin/llobdstat +else + # Good luck + LLOBDSTAT=llobdstat +fi + +[ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf +[ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] && MODPROBECONF=/etc/modprobe.d/Lustre assert_DIR () { local failed="" @@ -43,17 +67,17 @@ usage() { print_summary () { trap 0 - [ "$TESTSUITE" == "lfscktest" ] && return 0 + [ "$TESTSUITE" == "lfsck" ] && return 0 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to ${ONLY}." local form="%-13s %-17s %s\n" printf "$form" "status" "script" "skipped tests E(xcluded) S(low)" echo "------------------------------------------------------------------------------------" - for O in $TESTSUITE_LIST; do + for O in $DEFAULT_SUITES; do local skipped="" local slow="" - local o=$(echo $O | tr "[:upper:]" "[:lower:]") + O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]") + local o=$(echo $O | tr "[:upper:]" "[:lower:]") o=${o//_/-} - o=${o//tyn/tyN} local log=${TMP}/${o}.log [ -f $log ] && skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' | sed 's/test_//g') [ -f $log ] && slow=$(grep SLOW $log | awk '{ printf " %s", $3 }' | sed 's/test_//g') @@ -63,7 +87,8 @@ print_summary () { done - for O in $TESTSUITE_LIST; do + for O in $DEFAULT_SUITES; do + O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]") if [ "${!O}" = "no" ]; then # FIXME. # only for those tests suits which are run directly from acc-sm script: @@ -76,7 +101,8 @@ print_summary () { fi done - for O in $TESTSUITE_LIST; do + for O in $DEFAULT_SUITES; do + O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]") [ "${!O}" = "done" -o "${!O}" = "no" ] || \ printf "$form" "UNFINISHED" "$O" "" done @@ -98,6 +124,10 @@ init_test_env() { #[ -d /r ] && export ROOT=${ROOT:-/r} export TMP=${TMP:-$ROOT/tmp} export TESTSUITELOG=${TMP}/${TESTSUITE}.log + if [[ -z $LOGDIRSET ]]; then + export LOGDIR=${LOGDIR:-${TMP}/test_logs/}/$(date +%s) + export LOGDIRSET=true + fi export HOSTNAME=${HOSTNAME:-`hostname`} if ! echo $PATH | grep -q $LUSTRE/utils; then export PATH=$PATH:$LUSTRE/utils @@ -106,16 +136,19 @@ init_test_env() { export PATH=$PATH:$LUSTRE/utils/gss fi if ! echo $PATH | grep -q $LUSTRE/tests; then - export PATH=$PATH:$LUSTRE/tests + export PATH=$PATH:$LUSTRE/tests fi + export LST=${LST:-"$LUSTRE/../lnet/utils/lst"} + [ ! -f "$LST" ] && export LST=$(which lst) export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"} [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) if ! echo $PATH | grep -q $LUSTRE/tests/racer; then - export PATH=$PATH:$LUSTRE/tests/racer + export PATH=$LUSTRE/tests/racer:$PATH: fi if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then export PATH=$PATH:$LUSTRE/tests/mpi fi + export RSYNC_RSH=${RSYNC_RSH:-rsh} export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} [ ! -f "$LCTL" ] && export LCTL=$(which lctl) export LFS=${LFS:-"$LUSTRE/utils/lfs"} @@ -155,7 +188,7 @@ init_test_env() { xkrb5*) echo "Using GSS/krb5 ptlrpc security flavor" which lgss_keyring > /dev/null 2>&1 || \ - error "built with gss disabled! SEC=$SEC" + error_exit "built with gss disabled! SEC=$SEC" GSS=true GSS_KRB5=true ;; @@ -204,21 +237,69 @@ case `uname -r` in *) EXT=".ko"; USE_QUOTA=yes;; esac + +module_loaded () { + /sbin/lsmod | grep -q $1 +} + +# Load a module on the system where this is running. +# +# Synopsis: load_module module_name [module arguments for insmod/modprobe] +# +# If module arguments are not given but MODOPTS_ is set, then its value +# will be used as the arguments. Otherwise arguments will be obtained from +# /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used. +# load_module() { + local optvar EXT=".ko" module=$1 shift BASE=`basename $module $EXT` - lsmod | grep -q ${BASE} || \ - if [ -f ${LUSTRE}/${module}${EXT} ]; then - insmod ${LUSTRE}/${module}${EXT} $@ + + # If no module arguments were passed, get them from $MODOPTS_, else from + # modprobe.conf + if [ $# -eq 0 ]; then + # $MODOPTS_; we could use associative arrays, but that's not in + # Bash until 4.x, so we resort to eval. + optvar="MODOPTS_$(basename $module | tr a-z A-Z)" + eval set -- \$$optvar + if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then + # Nothing in $MODOPTS_; try modprobe.conf + set -- $(grep "^options\\s*\<${module}\>" $MODPROBECONF) + # Get rid of "options $module" + (($# > 0)) && shift 2 + + # Ensure we have accept=all for lnet + if [ $module = lnet ]; then + # OK, this is a bit wordy... + local arg accept_all_present=false + for arg in "$@"; do + [ "$arg" = accept=all ] && accept_all_present=true + done + $accept_all_present || set -- "$@" accept=all + fi + fi + fi + + [ $# -gt 0 ] && echo "${module} options: '$*'" + + module_loaded ${BASE} && return + + # Note that insmod will ignore anything in modprobe.conf, which is why we're + # passing options on the command-line. + if [ "$BASE" == "lnet_selftest" ] && \ + [ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]; then + insmod ${LUSTRE}/../lnet/selftest/${module}${EXT} + elif [ -f ${LUSTRE}/${module}${EXT} ]; then + insmod ${LUSTRE}/${module}${EXT} "$@" else # must be testing a "make install" or "rpm" installation # note failed to load ptlrpc_gss is considered not fatal if [ "$BASE" == "ptlrpc_gss" ]; then - modprobe $BASE $@ 2>/dev/null || echo "gss/krb5 is not supported" + modprobe $BASE "$@" 2>/dev/null || echo "gss/krb5 is not supported" else - modprobe $BASE $@ + modprobe $BASE "$@" fi fi } @@ -226,10 +307,12 @@ load_module() { load_modules_local() { if [ -n "$MODPROBE" ]; then # use modprobe - return 0 + echo "Using modprobe to load modules" + return 0 fi if [ "$HAVE_MODULES" = true ]; then - # we already loaded + # we already loaded + echo "Modules already loaded" return 0 fi HAVE_MODULES=true @@ -238,15 +321,7 @@ load_modules_local() { load_module ../libcfs/libcfs/libcfs [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG" [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }" - local MODPROBECONF= - [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf - [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre - [ -z "$LNETOPTS" -a "$MODPROBECONF" ] && \ - LNETOPTS=$(awk '/^options lnet/ { print $0}' $MODPROBECONF | sed 's/^options lnet //g') - echo $LNETOPTS | grep -q "accept=all" || LNETOPTS="$LNETOPTS accept=all"; - echo "lnet options: '$LNETOPTS'" - # note that insmod will ignore anything in modprobe.conf - load_module ../lnet/lnet/lnet $LNETOPTS + load_module ../lnet/lnet/lnet LNETLND=${LNETLND:-"socklnd/ksocklnd"} load_module ../lnet/klnds/$LNETLND load_module lvfs/lvfs @@ -319,9 +394,11 @@ unload_modules() { if $LOAD_MODULES_REMOTE ; then local list=$(comma_list $(remote_nodes_list)) - echo unloading modules on $list - do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE - do_rpc_nodes $list check_mem_leak + if [ ! -z $list ]; then + echo unloading modules on $list + do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE + do_rpc_nodes $list check_mem_leak + fi fi HAVE_MODULES=false @@ -332,6 +409,17 @@ unload_modules() { return 0 } +check_gss_daemon_nodes() { + local list=$1 + dname=$2 + + do_nodesv $list "num=\\\$(ps -o cmd -C $dname | grep $dname | wc -l); +if [ \\\"\\\$num\\\" -ne 1 ]; then + echo \\\$num instance of $dname; + exit 1; +fi; " +} + check_gss_daemon_facet() { facet=$1 dname=$2 @@ -345,27 +433,41 @@ check_gss_daemon_facet() { } send_sigint() { - local facet=$1 + local list=$1 shift - do_facet $facet "killall -2 $@ 2>/dev/null || true" + echo Stopping $@ on $list + do_nodes $list "killall -2 $@ 2>/dev/null || true" } +# start gss daemons on all nodes, or +# "daemon" on "list" if set start_gss_daemons() { - # starting on MDT - for num in `seq $MDSCOUNT`; do - do_facet mds$num "$LSVCGSSD -v" - if $GSS_PIPEFS; then - do_facet mds$num "$LGSSD -v" - fi - done - # starting on OSTs - for num in `seq $OSTCOUNT`; do - do_facet ost$num "$LSVCGSSD -v" - done - # starting on client - # FIXME: is "client" the right facet name? + local list=$1 + local daemon=$2 + + if [ "$list" ] && [ "$daemon" ] ; then + echo "Starting gss daemon on nodes: $list" + do_nodes $list "$daemon" || return 8 + return 0 + fi + + local list=$(comma_list $(mdts_nodes)) + + echo "Starting gss daemon on mds: $list" + do_nodes $list "$LSVCGSSD -v" || return 1 + if $GSS_PIPEFS; then + do_nodes $list "$LGSSD -v" || return 2 + fi + + list=$(comma_list $(osts_nodes)) + echo "Starting gss daemon on ost: $list" + do_nodes $list "$LSVCGSSD -v" || return 3 + # starting on clients + + local clients=${CLIENTS:-`hostname`} if $GSS_PIPEFS; then - do_facet client "$LGSSD -v" + echo "Starting $LGSSD on clients $clients " + do_nodes $clients "$LGSSD -v" || return 4 fi # wait daemons entering "stable" status @@ -374,33 +476,37 @@ start_gss_daemons() { # # check daemons are running # - for num in `seq $MDSCOUNT`; do - check_gss_daemon_facet mds$num lsvcgssd - if $GSS_PIPEFS; then - check_gss_daemon_facet mds$num lgssd - fi - done - for num in `seq $OSTCOUNT`; do - check_gss_daemon_facet ost$num lsvcgssd - done + list=$(comma_list $(mdts_nodes) $(osts_nodes)) + check_gss_daemon_nodes $list lsvcgssd || return 5 if $GSS_PIPEFS; then - check_gss_daemon_facet client lgssd + list=$(comma_list $(mdts_nodes)) + check_gss_daemon_nodes $list lgssd || return 6 + fi + if $GSS_PIPEFS; then + check_gss_daemon_nodes $clients lgssd || return 7 fi } stop_gss_daemons() { - for num in `seq $MDSCOUNT`; do - send_sigint mds$num lsvcgssd lgssd - done - for num in `seq $OSTCOUNT`; do - send_sigint ost$num lsvcgssd - done - send_sigint client lgssd + local list=$(comma_list $(mdts_nodes)) + + send_sigint $list lsvcgssd lgssd + + list=$(comma_list $(osts_nodes)) + send_sigint $list lsvcgssd + + list=${CLIENTS:-`hostname`} + send_sigint $list lgssd } init_gss() { if $GSS; then - start_gss_daemons + if ! module_loaded ptlrpc_gss; then + load_module ptlrpc/gss/ptlrpc_gss + module_loaded ptlrpc_gss || + error_exit "init_gss : GSS=$GSS, but gss/krb5 is not supported!" + fi + start_gss_daemons || error_exit "start gss daemon failed! rc=$?" if [ -n "$LGSS_KEYRING_DEBUG" ]; then echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level @@ -538,9 +644,7 @@ restore_quota_type () { setup_quota(){ local mntpt=$1 - # We need: - # 1. run quotacheck only if quota is off - # 2. save the original quota_type params, restore them after testing + # We need save the original quota_type params, and restore them after testing # Suppose that quota type the same on mds and ost local quota_type=$(quota_type | grep MDT | cut -d "=" -f2) @@ -549,6 +653,9 @@ setup_quota(){ if [ "$quota_type" != "$QUOTA_TYPE" ]; then export old_QUOTA_TYPE=$quota_type quota_save_version $QUOTA_TYPE + else + qtype=$(tr -c -d "ug" <<< $QUOTA_TYPE) + $LFS quotacheck -$qtype $mntpt || error "quotacheck has failed for $type" fi local quota_usrs=$QUOTA_USERS @@ -691,14 +798,13 @@ sanity_mount_check () { # mount clients if not mouted zconf_mount_clients() { - local OPTIONS local clients=$1 local mnt=$2 - + local OPTIONS=${3:-$MOUNTOPT} # Only supply -o to mount if we have options - if [ -n "$MOUNTOPT" ]; then - OPTIONS="-o $MOUNTOPT" + if [ "$OPTIONS" ]; then + OPTIONS="-o $OPTIONS" fi local device=$MGSNID:/$FSNAME if [ -z "$mnt" -o -z "$FSNAME" ]; then @@ -738,7 +844,7 @@ zconf_umount_clients() { echo "Stopping clients: $clients $mnt (opts:$force)" do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts); if [ \\\$running -ne 0 ] ; then -echo Stopping client \\\$(hostname) client $mnt opts:$force; +echo Stopping client \\\$(hostname) $mnt opts:$force; lsof -t $mnt || need_kill=no; if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then pids=\\\$(lsof -t $mnt | sort -u); @@ -853,6 +959,8 @@ start_client_loads () { testnum=$((nodenum % numloads)) start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]} done + # bug 22169: wait the background threads to start + sleep 2 } # only for remote client @@ -966,7 +1074,7 @@ cleanup_check() { [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && \ echo "$0: lustre didn't clean up..." 1>&2 && return 202 || true - if [ "`/sbin/lsmod 2>&1 | egrep 'lnet|libcfs'`" ]; then + if module_loaded lnet || module_loaded libcfs; then echo "$0: modules still loaded..." 1>&2 /sbin/lsmod 1>&2 return 203 @@ -1013,12 +1121,13 @@ wait_delete_completed () { sleep 1 TOTAL=`lctl get_param -n osc.*.kbytesavail | \ awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` - [ "$TOTAL" -eq "$TOTALPREV" ] && break + [ "$TOTAL" -eq "$TOTALPREV" ] && return 0 echo "Waiting delete completed ... prev: $TOTALPREV current: $TOTAL " TOTALPREV=$TOTAL WAIT=$(( WAIT + 1)) done - echo "Delete completed." + echo "Delete is not completed in $MAX_WAIT sec" + return 1 } wait_for_host() { @@ -1036,12 +1145,12 @@ wait_for() { wait_recovery_complete () { local facet=$1 - # Use default policy if $2 is not passed by caller. + # Use default policy if $2 is not passed by caller. #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) # as we are in process of changing obd_timeout in different ways # let's set MAX longer than that local MAX=${2:-$(( TIMEOUT * 4 ))} - + local var_svc=${facet}_svc local procfile="*.${!var_svc}.recovery_status" local WAIT=0 @@ -1058,6 +1167,57 @@ wait_recovery_complete () { return 1 } +wait_mds_ost_sync () { + # just because recovery is done doesn't mean we've finished + # orphan cleanup. Wait for llogs to get synchronized. + echo "Waiting for orphan cleanup..." + # MAX value includes time needed for MDS-OST reconnection + local MAX=$(( TIMEOUT * 2 )) + local WAIT=0 + while [ $WAIT -lt $MAX ]; do + local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \ + "$LCTL get_param -n obdfilter.*.mds_sync")) + local con=1 + for ((i=0; i<${#sync[@]}; i++)); do + [ ${sync[$i]} -eq 0 ] && continue + # there is a not finished MDS-OST synchronization + con=0 + break; + done + sleep 2 # increase waiting time and cover statfs cache + [ ${con} -eq 1 ] && return 0 + echo "Waiting $WAIT secs for $facet mds-ost sync done." + WAIT=$((WAIT + 2)) + done + echo "$facet recovery not done in $MAX sec. $STATUS" + return 1 +} + +wait_destroy_complete () { + echo "Waiting for destroy to be done..." + # MAX value shouldn't be big as this mean server responsiveness + # never increase this just to make test pass but investigate + # why it takes so long time + local MAX=5 + local WAIT=0 + while [ $WAIT -lt $MAX ]; do + local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight)) + local con=1 + for ((i=0; i<${#RPCs[@]}; i++)); do + [ ${RPCs[$i]} -eq 0 ] && continue + # there are still some destroy RPCs in flight + con=0 + break; + done + sleep 1 + [ ${con} -eq 1 ] && return 0 # done waiting + echo "Waiting $WAIT secs for destroys to be done." + WAIT=$((WAIT + 1)) + done + echo "Destroys weren't done in $MAX sec." + return 1 +} + wait_exit_ST () { local facet=$1 @@ -1111,15 +1271,31 @@ wait_remote_prog () { return $rc } -client_df() { +clients_up() { # not every config has many clients + sleep 1 if [ ! -z "$CLIENTS" ]; then - $PDSH $CLIENTS "df $MOUNT" > /dev/null + $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null else - df $MOUNT > /dev/null + stat -f $MOUNT > /dev/null fi } +client_up() { + local client=$1 + # usually checked on particular client or locally + sleep 1 + if [ ! -z "$client" ]; then + $PDSH $client "stat -f $MOUNT" > /dev/null + else + stat -f $MOUNT > /dev/null + fi +} + +client_evicted() { + ! client_up $1 +} + client_reconnect() { uname -n >> $MOUNT/recon if [ -z "$CLIENTS" ]; then @@ -1195,7 +1371,7 @@ ost_evict_client() { fail() { facet_failover $* || error "failover: $?" - client_df || error "post-failover df: $?" + clients_up || error "post-failover df: $?" } fail_nodf() { @@ -1208,9 +1384,8 @@ fail_abort() { stop $facet change_active $facet mount_facet $facet -o abort_recovery - client_df || echo "first df failed: $?" - sleep 1 - client_df || error "post-failover df: $?" + clients_up || echo "first df failed: $?" + clients_up || error "post-failover df: $?" } do_lmc() { @@ -1349,32 +1524,47 @@ do_node() { if [ "$myPDSH" = "rsh" ]; then # we need this because rsh does not return exit code of an executed command - local command_status="$TMP/cs" - rsh $HOST ":> $command_status" - rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; - cd $RPWD; sh -c \"$@\") || - echo command failed >$command_status" - [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true + local command_status="$TMP/cs" + rsh $HOST ":> $command_status" + rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; + cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\") || + echo command failed >$command_status" + [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true return 0 fi if $verbose ; then # print HOSTNAME for myPDSH="no_dsh" if [[ $myPDSH = no_dsh ]]; then - $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /" + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /" else - $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" fi else - $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //" + $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed "s/^${HOST}: //" fi return ${PIPESTATUS[0]} } +do_nodev() { + do_node --verbose "$@" +} + single_local_node () { [ "$1" = "$HOSTNAME" ] } +# Outputs environment variable assignments that should be passed to remote nodes +get_env_vars() { + local var + local value + + for var in ${!MODOPTS_*}; do + value=${!var} + echo "${var}=\"$value\"" + done +} + do_nodes() { local verbose=false # do not stripe off hostname if verbose, bug 19215 @@ -1386,11 +1576,11 @@ do_nodes() { local rnodes=$1 shift - if $(single_local_node $rnodes); then + if single_local_node $rnodes; then if $verbose; then - do_node --verbose $rnodes $@ + do_nodev $rnodes "$@" else - do_node $rnodes $@ + do_node $rnodes "$@" fi return $? fi @@ -1407,9 +1597,9 @@ do_nodes() { fi if $verbose ; then - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" else - $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g" + $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" | sed -re "s/\w+:\s//g" fi return ${PIPESTATUS[0]} } @@ -1422,6 +1612,10 @@ do_facet() { do_node $HOST "$@" } +do_nodesv() { + do_nodes --verbose "$@" +} + add() { local facet=$1 shift @@ -1478,6 +1672,10 @@ stopall() { rm -f $TMP/ost${num}active done + if ! combined_mgs_mds ; then + stop mgs + fi + return 0 } @@ -1495,6 +1693,10 @@ mdsmkfsopts() test $nr = 1 && echo -n $MDS_MKFS_OPTS || echo -n $MDSn_MKFS_OPTS } +combined_mgs_mds () { + [[ $MDSDEV1 = $MGSDEV ]] && [[ $mds1_HOST = $mgs_HOST ]] +} + formatall() { if [ "$IAMDIR" == "yes" ]; then MDS_MKFS_OPTS="$MDS_MKFS_OPTS --iam-dir" @@ -1503,18 +1705,12 @@ formatall() { [ "$FSTYPE" ] && FSTYPE_OPT="--backfstype $FSTYPE" - if [ ! -z $SEC ]; then - MDS_MKFS_OPTS="$MDS_MKFS_OPTS --param srpc.flavor.default=$SEC" - MDSn_MKFS_OPTS="$MDSn_MKFS_OPTS --param srpc.flavor.default=$SEC" - OST_MKFS_OPTS="$OST_MKFS_OPTS --param srpc.flavor.default=$SEC" - fi - stopall # We need ldiskfs here, may as well load them all load_modules [ "$CLIENTONLY" ] && return echo Formatting mgs, mds, osts - if [[ $MDSDEV1 != $MGSDEV ]] || [[ $mds1_HOST != $mgs_HOST ]]; then + if ! combined_mgs_mds ; then add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10 fi @@ -1578,8 +1774,8 @@ switch_identity() { remount_client() { - zconf_umount `hostname` $1 || error "umount failed" - zconf_mount `hostname` $1 || error "mount failed" + zconf_umount `hostname` $1 || error "umount failed" + zconf_mount `hostname` $1 || error "mount failed" } writeconf_facet () { @@ -1608,12 +1804,12 @@ setupall() { error "environments are insane!" load_modules - init_gss + if [ -z "$CLIENTONLY" ]; then echo Setup mgs, mdt, osts echo $WRITECONF | grep -q "writeconf" && \ writeconf_all - if [[ $mds1_HOST != $mgs_HOST ]] || [[ $MDSDEV1 != $MGSDEV ]]; then + if ! combined_mgs_mds ; then start mgs $MGSDEV $mgs_MOUNT_OPTS fi @@ -1628,9 +1824,9 @@ setupall() { eval mds${num}failover_HOST=$(facet_host mds$num) fi - if [ $IDENTITY_UPCALL != "default" ]; then + if [ $IDENTITY_UPCALL != "default" ]; then switch_identity $num $IDENTITY_UPCALL - fi + fi done for num in `seq $OSTCOUNT`; do DEVNAME=$(ostdevname $num) @@ -1645,16 +1841,21 @@ setupall() { done fi + + init_gss + # wait a while to allow sptlrpc configuration be propogated to targets, # only needed when mounting new target devices. - $GSS && sleep 10 + if $GSS; then + sleep 10 + fi [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE mount_client $MOUNT [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT if [ "$MOUNT_2" ]; then - mount_client $MOUNT2 + mount_client $MOUNT2 [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2 fi @@ -1666,6 +1867,7 @@ setupall() { # by a context negotiation rpc with $TIMEOUT. # FIXME better by monitoring import status. if $GSS; then + set_flavor_all $SEC sleep $((TIMEOUT + 5)) else sleep 5 @@ -1673,7 +1875,7 @@ setupall() { } mounted_lustre_filesystems() { - awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts + awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts } init_facet_vars () { @@ -1745,11 +1947,6 @@ osc_ensure_active () { [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up" } -som_check() { - SOM_ENABLED=$(do_facet $SINGLEMDS "$LCTL get_param mdt.*.som" | awk -F= ' {print $2}' | head -n 1) - echo $SOM_ENABLED -} - init_param_vars () { if ! remote_ost_nodsh && ! remote_mds_nodsh; then export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) @@ -1765,12 +1962,17 @@ init_param_vars () { osc_ensure_active $SINGLEMDS M $TIMEOUT osc_ensure_active client c $TIMEOUT - if [ x"$(som_check)" = x"enabled" ]; then - ENABLE_QUOTA="" - fi - if [ "$ENABLE_QUOTA" ]; then - setup_quota $MOUNT || return 2 + if [ $QUOTA_AUTO -ne 0 ]; then + if [ "$ENABLE_QUOTA" ]; then + echo "enable quota as required" + setup_quota $MOUNT || return 2 + else + echo "disable quota as required" + $LFS quotaoff -ug $MOUNT > /dev/null 2>&1 + fi fi + + return 0 } nfs_client_mode () { @@ -1788,9 +1990,7 @@ nfs_client_mode () { return 1 } -check_config () { - nfs_client_mode && return - +check_config_client () { local mntpt=$1 local mounted=$(mount | grep " $mntpt ") @@ -1809,8 +2009,8 @@ check_config () { if [[ x$mgc != xMGC$MGSNID ]]; then if [ "$mgs_HOST" ]; then local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g") - [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] || - error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc" +# [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] || +# error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc" fi fi return 0 @@ -1826,10 +2026,20 @@ check_config () { mgshost=$(echo $mgshost | awk -F: '{print $1}') # if [ "$mgshost" != "$myMGS_host" ]; then -# error_exit "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE +# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE # Please use correct config or set mds_HOST correctly!" # fi +} + +check_config_clients () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + nfs_client_mode && return + + do_rpc_nodes $clients check_config_client $mntpt + sanity_mount_check || error "environments are insane!" } @@ -1843,37 +2053,65 @@ check_timeout () { fi } +is_mounted () { + local mntpt=$1 + local mounted=$(mounted_lustre_filesystems) + + echo $mounted' ' | grep -w -q $mntpt' ' +} + check_and_setup_lustre() { nfs_client_mode && return local MOUNTED=$(mounted_lustre_filesystems) local do_check=true - # MOUNT is not mounted - if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then + # 1. + # both MOUNT and MOUNT2 are not mounted + if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then [ "$REFORMAT" ] && formatall + # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set) setupall - MOUNTED=$(mounted_lustre_filesystems | head -1) - [ -z "$MOUNTED" ] && error "NAME=$NAME not mounted" + is_mounted $MOUNT || error "NAME=$NAME not mounted" export I_MOUNTED=yes do_check=false - - # MOUNT and MOUNT2 are mounted - elif $(echo $MOUNTED | grep -w -q $MOUNT2); then - - # MOUNT2 is mounted, MOUNT_2 is not set - if ! [ "$MOUNT_2" ]; then - zconf_umount `hostname` $MOUNT2 - export I_UMOUNTED2=yes - - # MOUNT2 is mounted, MOUNT_2 is set - else - check_config $MOUNT2 - fi + # 2. + # MOUNT2 is mounted + elif is_mounted $MOUNT2; then + # 3. + # MOUNT2 is mounted, while MOUNT_2 is not set + if ! [ "$MOUNT_2" ]; then + cleanup_mount $MOUNT2 + export I_UMOUNTED2=yes + + # 4. + # MOUNT2 is mounted, MOUNT_2 is set + else + # FIXME: what to do if check_config failed? + # i.e. if: + # 1) remote client has mounted other Lustre fs ? + # 2) it has insane env ? + # let's try umount MOUNT2 on all clients and mount it again: + if ! check_config_clients $MOUNT2; then + cleanup_mount $MOUNT2 + restore_mount $MOUNT2 + export I_MOUNTED2=yes + fi + fi + + # 5. + # MOUNT is mounted MOUNT2 is not mounted + elif [ "$MOUNT_2" ]; then + restore_mount $MOUNT2 + export I_MOUNTED2=yes fi if $do_check; then - check_config $MOUNT + # FIXME: what to do if check_config failed? + # i.e. if: + # 1) remote client has mounted other Lustre fs? + # 2) lustre is mounted on remote_clients atall ? + check_config_clients $MOUNT init_facets_vars init_param_vars @@ -1882,35 +2120,58 @@ check_and_setup_lustre() { lctl set_param debug_mb=${DEBUG_SIZE}; sync" fi + + init_gss + set_flavor_all $SEC + if [ "$ONLY" == "setup" ]; then exit 0 fi } +restore_mount () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + zconf_mount_clients $clients $mntpt +} + +cleanup_mount () { + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 + + zconf_umount_clients $clients $mntpt +} + cleanup_and_setup_lustre() { if [ "$ONLY" == "cleanup" -o "`mount | grep $MOUNT`" ]; then lctl set_param debug=0 || true cleanupall if [ "$ONLY" == "cleanup" ]; then - exit 0 + exit 0 fi fi check_and_setup_lustre } check_and_cleanup_lustre() { - if [ "`mount | grep $MOUNT`" ]; then + if is_mounted $MOUNT; then [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* [ "$ENABLE_QUOTA" ] && restore_quota_type || true fi + if [ "$I_UMOUNTED2" = "yes" ]; then - mount_client $MOUNT2 || error "restore MOUNT2 failed" + restore_mount $MOUNT2 || error "restore $MOUNT2 failed" + fi + + if [ "$I_MOUNTED2" = "yes" ]; then + cleanup_mount $MOUNT2 fi if [ "$I_MOUNTED" = "yes" ]; then cleanupall -f || error "cleanup failed" + unset I_MOUNTED fi - unset I_MOUNTED } ####### @@ -1982,9 +2243,9 @@ testslist_filter () { local start_at=$START_AT local stop_at=$STOP_AT - local var=${TESTSUITE}_START_AT + local var=${TESTSUITE//-/_}_START_AT [ x"${!var}" != x ] && start_at=${!var} - var=${TESTSUITE}_STOP_AT + var=${TESTSUITE//-/_}_STOP_AT [ x"${!var}" != x ] && stop_at=${!var} sed -n 's/^test_\([^ (]*\).*/\1/p' $script | \ @@ -2028,9 +2289,9 @@ at_max_get() { # suppose that all ost-s has the same at_max set if [ $facet == "ost" ]; then - do_facet ost1 "lctl get_param -n at_max" + do_facet ost1 "lctl get_param -n at_max" else - do_facet $facet "lctl get_param -n at_max" + do_facet $facet "lctl get_param -n at_max" fi } @@ -2042,15 +2303,15 @@ at_max_set() { for facet in $@; do if [ $facet == "ost" ]; then for i in `seq $OSTCOUNT`; do - do_facet ost$i "lctl set_param at_max=$at_max" + do_facet ost$i "lctl set_param at_max=$at_max" done elif [ $facet == "mds" ]; then for i in `seq $MDSCOUNT`; do - do_facet mds$i "lctl set_param at_max=$at_max" + do_facet mds$i "lctl set_param at_max=$at_max" done else - do_facet $facet "lctl set_param at_max=$at_max" + do_facet $facet "lctl set_param at_max=$at_max" fi done } @@ -2169,10 +2430,6 @@ pgcache_empty() { return 1 fi done - if [[ $MDSDEV1 != $MGSDEV ]]; then - stop mgs - fi - return 0 } @@ -2218,8 +2475,6 @@ stop_full_debug_logging() { error_noexit() { local TYPE=${TYPE:-"FAIL"} - local ERRLOG - lctl set_param fail_loc=0 2>/dev/null || true local dump=true # do not dump logs if $1=false @@ -2230,25 +2485,23 @@ error_noexit() { log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " + # We need to dump the logs on all nodes if $dump; then - ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s) - echo "Dumping lctl log to $ERRLOG" - # We need to dump the logs on all nodes - do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG + gather_logs $(comma_list $(nodes_list)) fi + debugrestore [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG - TEST_FAILED=true + echo "$@" > $LOGDIR/err } error() { error_noexit "$@" - $FAIL_ON_ERROR && exit 1 || true + exit 1 } error_exit() { - error_noexit "$@" - exit 1 + error "$@" } # use only if we are ignoring failures for this test, bugno required. @@ -2290,15 +2543,27 @@ build_test_filter() { done for G in $GRANT_CHECK_LIST; do eval GCHECK_ONLY_${G}=true - done + done } basetest() { - echo ${1%%[a-z]*} + if [[ $1 = [a-z]* ]]; then + echo $1 + else + echo ${1%%[a-z]*} + fi } # print a newline if the last test was skipped export LAST_SKIPPED= +# +# Main entry into test-framework. This is called with the name and +# description of a test. The name is used to find the function to run +# the test using "test_$name". +# +# This supports a variety of methods of specifying specific test to +# run or not run. These need to be documented... +# run_test() { assert_DIR @@ -2307,13 +2572,13 @@ run_test() { testname=ONLY_$1 if [ ${!testname}x != x ]; then [ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED= - run_one $1 "$2" + run_one_logged $1 "$2" return $? fi testname=ONLY_$base if [ ${!testname}x != x ]; then [ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED= - run_one $1 "$2" + run_one_logged $1 "$2" return $? fi LAST_SKIPPED="y" @@ -2346,23 +2611,18 @@ run_test() { fi LAST_SKIPPED= - run_one $1 "$2" + run_one_logged $1 "$2" return $? } -EQUALS="======================================================================" equals_msg() { - msg="$@" - - local suffixlen=$((${#EQUALS} - ${#msg})) - [ $suffixlen -lt 5 ] && suffixlen=5 - log `echo $(printf '===== %s %.*s\n' "$msg" $suffixlen $EQUALS)` + banner "$*" } log() { echo "$*" - lsmod | grep lnet > /dev/null || load_modules + module_loaded lnet || load_modules local MSG="$*" # Get rid of ' @@ -2378,16 +2638,21 @@ log() { } trace() { - log "STARTING: $*" - strace -o $TMP/$1.strace -ttt $* - RC=$? - log "FINISHED: $*: rc $RC" - return 1 + log "STARTING: $*" + strace -o $TMP/$1.strace -ttt $* + RC=$? + log "FINISHED: $*: rc $RC" + return 1 } pass() { - $TEST_FAILED && echo -n "FAIL " || echo -n "PASS " - echo $@ + # Set TEST_STATUS here; will be used for logging the result + if [ -f $LOGDIR/err ]; then + TEST_STATUS="FAIL" + else + TEST_STATUS="PASS" + fi + echo $TEST_STATUS " " $@ } check_mds() { @@ -2402,33 +2667,80 @@ reset_fail_loc () { echo done. } + +# +# Log a message (on all nodes) padded with "=" before and after. +# Also appends a timestamp and prepends the testsuite name. +# + +EQUALS="====================================================================================================" +banner() { + msg="== ${TESTSUITE} $*" + last=${msg: -1:1} + [[ $last != "=" && $last != " " ]] && msg="$msg " + msg=$(printf '%s%.*s' "$msg" $((${#EQUALS} - ${#msg})) $EQUALS ) + # always include at least == after the message + log "$msg== $(date +"%H:%M:%S (%s)")" +} + +# +# Run a single test function and cleanup after it. +# +# This function should be run in a subshell so the test func can +# exit() without stopping the whole script. +# run_one() { - testnum=$1 - message=$2 + local testnum=$1 + local message=$2 tfile=f${testnum} export tdir=d0.${TESTSUITE}/d${base} - + export TESTNAME=test_$testnum local SAVE_UMASK=`umask` umask 0022 - local BEFORE=`date +%s` - echo - log "== test $testnum: $message == `date +%H:%M:%S` ($BEFORE)" - #check_mds - export TESTNAME=test_$testnum - TEST_FAILED=false + banner "test $testnum: $message" test_${testnum} || error "test_$testnum failed with $?" - #check_mds cd $SAVE_PWD reset_fail_loc check_grant ${testnum} || error "check_grant $testnum failed with $?" check_catastrophe || error "LBUG/LASSERT detected" ps auxww | grep -v grep | grep -q multiop && error "multiop still running" - pass "($((`date +%s` - $BEFORE))s)" - TEST_FAILED=false unset TESTNAME unset tdir umask $SAVE_UMASK + return 0 +} + +# +# Wrapper around run_one to ensure: +# - test runs in subshell +# - output of test is saved to separate log file for error reporting +# - test result is saved to data file +# +run_one_logged() { + local BEFORE=`date +%s` + local TEST_ERROR + local name=${TESTSUITE}.test_${1}.test_log.$(hostname).log + local test_log=$LOGDIR/$name + rm -rf $LOGDIR/err + + echo + (run_one $1 "$2") 2>&1 | tee $test_log + local RC=${PIPESTATUS[0]} + + [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \ + echo "test_$1 returned $RC" | tee $LOGDIR/err + + duration=$((`date +%s` - $BEFORE)) + pass "(${duration}s)" + [ -f $LOGDIR/err ] && TEST_ERROR=$(cat $LOGDIR/err) + log_sub_test test_${1} $TEST_STATUS $duration "$RC" "$TEST_ERROR" + + if [ -f $LOGDIR/err ]; then + $FAIL_ON_ERROR && exit $RC + fi + + return 0 } canonical_path() { @@ -2438,50 +2750,50 @@ canonical_path() { sync_clients() { [ -d $DIR1 ] && cd $DIR1 && sync; sleep 1; sync [ -d $DIR2 ] && cd $DIR2 && sync; sleep 1; sync - cd $SAVE_PWD + cd $SAVE_PWD } check_grant() { export base=`basetest $1` [ "$CHECK_GRANT" == "no" ] && return 0 - testname=GCHECK_ONLY_${base} + testname=GCHECK_ONLY_${base} [ ${!testname}x == x ] && return 0 echo -n "checking grant......" - cd $SAVE_PWD - # write some data to sync client lost_grant - rm -f $DIR1/${tfile}_check_grant_* 2>&1 - for i in `seq $OSTCOUNT`; do - $LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1 - dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \ - count=1 > /dev/null 2>&1 - done - # sync all the data and make sure no pending data on server - sync_clients - - #get client grant and server grant - client_grant=0 + cd $SAVE_PWD + # write some data to sync client lost_grant + rm -f $DIR1/${tfile}_check_grant_* 2>&1 + for i in `seq $OSTCOUNT`; do + $LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1 + dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \ + count=1 > /dev/null 2>&1 + done + # sync all the data and make sure no pending data on server + sync_clients + + #get client grant and server grant + client_grant=0 for d in `lctl get_param -n osc.*.cur_grant_bytes`; do - client_grant=$((client_grant + $d)) - done - server_grant=0 - for d in `lctl get_param -n obdfilter.*.tot_granted`; do - server_grant=$((server_grant + $d)) - done - - # cleanup the check_grant file - for i in `seq $OSTCOUNT`; do - rm $DIR1/${tfile}_check_grant_$i - done - - #check whether client grant == server grant - if [ $client_grant != $server_grant ]; then - echo "failed: client:${client_grant} server: ${server_grant}" - return 1 - else - echo "pass" - fi + client_grant=$((client_grant + $d)) + done + server_grant=0 + for d in `lctl get_param -n obdfilter.*.tot_granted`; do + server_grant=$((server_grant + $d)) + done + + # cleanup the check_grant file + for i in `seq $OSTCOUNT`; do + rm $DIR1/${tfile}_check_grant_$i + done + + #check whether client grant == server grant + if [ $client_grant != $server_grant ]; then + echo "failed: client:${client_grant} server: ${server_grant}" + return 1 + else + echo "pass" + fi } ######################## @@ -2517,6 +2829,13 @@ remote_mds_nodsh() remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ] } +require_dsh_mds() +{ + remote_mds_nodsh && echo "SKIP: $TESTSUITE: remote MDS with nodsh" && \ + MSKIPPED=1 && return 1 + return 0 +} + remote_ost () { local node @@ -2532,6 +2851,13 @@ remote_ost_nodsh() remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } +require_dsh_ost() +{ + remote_ost_nodsh && echo "SKIP: $TESTSUITE: remote OST with nodsh" && \ + OSKIPPED=1 && return 1 + return 0 +} + remote_mgs_nodsh() { local MGS @@ -2539,6 +2865,12 @@ remote_mgs_nodsh() remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } +local_mode () +{ + remote_mds_nodsh || remote_ost_nodsh || \ + $(single_local_node $(comma_list $(nodes_list))) +} + mdts_nodes () { local MDSNODES local NODES_sort @@ -2658,9 +2990,10 @@ mixed_mdt_devs () { generate_machine_file() { local nodes=${1//,/ } local machinefile=$2 - rm -f $machinefile || error "can't rm $machinefile" + rm -f $machinefile for node in $nodes; do - echo $node >>$machinefile + echo $node >>$machinefile || \ + { echo "can not generate machinefile $machinefile" && return 1; } done } @@ -2671,6 +3004,17 @@ get_stripe () { rm -f $file } +setstripe_nfsserver () { + local dir=$1 + + local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \ + { print $1 }' /proc/mounts | cut -f 1 -d : | head -1) + + [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1 + + do_nodev $nfsserver lfs setstripe "$@" +} + check_runas_id_ret() { local myRC=0 local myRUNAS_UID=$1 @@ -2779,6 +3123,10 @@ inodes_available () { echo $IFree } +mdsrate_inodes_available () { + echo $(($(inodes_available) - 1)) +} + # reset llite stat counters clear_llite_stats(){ lctl set_param -n llite.*.stats 0 @@ -2816,7 +3164,7 @@ calc_osc_kbytes () { # generate a stream of formatted strings ( =) save_lustre_params() { local s - do_nodes --verbose $1 "lctl get_param $2 | while read s; do echo \\\$s; done" + do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done" } # restore lustre parameters from input stream, produces by save_lustre_params @@ -2845,19 +3193,19 @@ exit \\\$rc;" # $2 file # $3 $RUNAS get_stripe_info() { - local tmp_file + local tmp_file - stripe_size=0 - stripe_count=0 - stripe_index=0 - tmp_file=$(mktemp) + stripe_size=0 + stripe_count=0 + stripe_index=0 + tmp_file=$(mktemp) - do_facet $1 $3 lfs getstripe -v $2 > $tmp_file + do_facet $1 $3 lfs getstripe -v $2 > $tmp_file - stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file` - stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file` - stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file` - rm -f $tmp_file + stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file` + stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file` + stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file` + rm -f $tmp_file } # CMD: determine mds index where directory inode presents @@ -2865,12 +3213,13 @@ get_mds_dir () { local dir=$1 local file=$dir/f0.get_mds_dir_tmpfile + mkdir -p $dir rm -f $file sleep 1 local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') local -a oldused=($iused) - touch $file + openfile -f O_CREAT:O_LOV_DELAY_CREATE -m 0644 $file > /dev/null sleep 1 iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') local -a newused=($iused) @@ -2887,7 +3236,10 @@ get_mds_dir () { } mdsrate_cleanup () { - mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 + if [ -d $4 ]; then + mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 + rmdir $4 + fi } delayed_recovery_enabled () { @@ -2960,12 +3312,13 @@ get_osc_import_name() { } wait_import_state () { - local expected=$1 - local CONN_PROC=$2 + local facet=$1 + local expected=$2 + local CONN_PROC=$3 local CONN_STATE local i=0 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + CONN_STATE=$(do_facet $facet $LCTL get_param -n $CONN_PROC | awk '/state/ {print $2}') while [ "${CONN_STATE}" != "${expected}" ]; do if [ "${expected}" == "DISCONN" ]; then # for disconn we can check after proc entry is removed @@ -2978,7 +3331,7 @@ wait_import_state () { [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ error "can't put import for $CONN_PROC into ${expected} state" && return 1 sleep 1 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + CONN_STATE=$(do_facet $facet $LCTL get_param -n $CONN_PROC | awk '/state/ {print $2}') i=$(($i + 1)) done @@ -2991,62 +3344,60 @@ wait_osc_import_state() { local ost_facet=$2 local expected=$3 local ost=$(get_osc_import_name $facet $ost_facet) - local CONN_PROC - local CONN_STATE - local i=0 + local CONN_PROC="osc.${ost}.import" - CONN_PROC="osc.${ost}.ost_server_uuid" - CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) - while [ "${CONN_STATE}" != "${expected}" ]; do - if [ "${expected}" == "DISCONN" ]; then - # for disconn we can check after proc entry is removed - [ "x${CONN_STATE}" == "x" ] && return 0 - # with AT we can have connect request timeout ~ reconnect timeout - # and test can't see real disconnect - [ "${CONN_STATE}" == "CONNECTING" ] && return 0 - fi - # disconnect rpc should be wait not more obd_timeout - [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \ - error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1 - sleep 1 - CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2) - i=$(($i + 1)) - done - - log "${ost_facet} now in ${CONN_STATE} state" + wait_import_state $facet $expected $CONN_PROC || return 1 return 0 } get_clientmdc_proc_path() { - echo "${1}-mdc-*" + local mdc=$(convert_facet2label $1) + + echo "${mdc}-mdc-*" +} + +wait_mdc_import_state() { + local facet=$1 + local expected=$2 + local mdc=$(get_clientmdc_proc_path $facet) + local CONN_PROC="mdc.${mdc}.import" + + wait_import_state client $expected $CONN_PROC || return 1 + return 0 } do_rpc_nodes () { local list=$1 shift - do_nodes --verbose $list "PATH=$LUSTRE/tests/:$PATH sh rpc.sh $@ " + # Add paths to lustre tests for 32 and 64 bit systems. + local RPATH="$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH" + do_nodesv $list "PATH=$RPATH sh rpc.sh $@ " } -wait_clients_import_state () { - local list=$1 - local facet=$2 - local expected=$3 +wait_client_import_state () { + local facet=$1 + local expected=$2 shift - local label=$(convert_facet2label $facet) - local proc_path case $facet in - ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;; - mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;; - *) error "unknown facet!" ;; + ost* ) wait_osc_import_state client $facet $expected || return 1;; + mds* ) wait_mdc_import_state $facet $expected || return 1 ;; + * ) error "unknown facet!" + return 1 ;; esac + return 0 +} +wait_clients_import_state () { + local list=$1 + shift - if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then - error "import is not in ${expected} state" + if ! do_rpc_nodes $list wait_client_import_state "$@"; then + error "import is not in expected state" return 1 fi + return 0 } oos_full() { @@ -3066,6 +3417,102 @@ oos_full() { return $OSCFULL } +pool_list () { + do_facet mgs lctl pool_list $1 +} + +create_pool() { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + do_facet mgs lctl pool_new $1 + local RC=$? + # get param should return err unless pool is created + [[ $RC -ne 0 ]] && return $RC + + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "" || RC=1 + if [[ $RC -eq 0 ]]; then + add_pool_to_list $1 + else + error "pool_new failed $1" + fi + return $RC +} + +add_pool_to_list () { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + local listvar=${fsname}_CREATED_POOLS + eval export ${listvar}=$(expand_list ${!listvar} $poolname) +} + +remove_pool_from_list () { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + local listvar=${fsname}_CREATED_POOLS + eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname) +} + +destroy_pool_int() { + local ost + local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \ + awk '$1 !~ /^Pool:/ {print $1}') + for ost in $OSTS; do + do_facet mgs lctl pool_remove $1 $ost + done + do_facet mgs lctl pool_destroy $1 +} + +# . or +destroy_pool() { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + [[ x$fsname = x$poolname ]] && fsname=$FSNAME + + local RC + + pool_list $fsname.$poolname || return $? + + destroy_pool_int $fsname.$poolname + RC=$? + [[ $RC -ne 0 ]] && return $RC + + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "foo" || RC=1 + + if [[ $RC -eq 0 ]]; then + remove_pool_from_list $fsname.$poolname + else + error "destroy pool failed $1" + fi + return $RC +} + +destroy_pools () { + local fsname=${1:-$FSNAME} + local poolname + local listvar=${fsname}_CREATED_POOLS + + pool_list $fsname + + [ x${!listvar} = x ] && return 0 + + echo destroy the created pools: ${!listvar} + for poolname in ${!listvar//,/ }; do + destroy_pool $fsname.$poolname + done +} + +cleanup_pools () { + local fsname=${1:-$FSNAME} + trap 0 + destroy_pools $fsname +} + gather_logs () { local list=$1 @@ -3076,27 +3523,30 @@ gather_logs () { # of writing the file to an NFS directory so it doesn't need to be copied. local tmp=$TMP local docp=true - [ -d "$SHARED_DIR_LOGS" ] && tmp=$SHARED_DIR_LOGS && docp=false - + [ -f $LOGDIR/shared ] && docp=false + # dump lustre logs, dmesg - do_nodes $list "log=$tmp/\\\$(hostname)-debug-$ts.log ; -lctl dk \\\$log >/dev/null; -log=$tmp/\\\$(hostname)-dmesg-$ts.log; -dmesg > \\\$log; " - - # FIXME: does it make sense to collect the logs for $ts only, but all - # TESTSUITE logs? - # rsync $TMP/*${TESTSUITE}* to gather the logs dumped by error fn - local logs=$TMP/'*'${TESTSUITE}'*' - if $docp; then - logs=$logs' '$tmp/'*'$ts'*' - fi - for node in ${list//,/ }; do - rsync -az $node:"$logs" $TMP - done - local archive=$TMP/${TESTSUITE}-$ts.tar.bz2 - tar -jcf $archive $tmp/*$ts* $TMP/*${TESTSUITE}* + prefix="$LOGDIR/${TESTSUITE}.${TESTNAME}" + suffix="$ts.log" + echo "Dumping lctl log to ${prefix}.*.${suffix}" + + if [ "$CLIENTONLY" -o "$PDSH" == "no_dsh" ]; then + echo "Dumping logs only on local client." + $LCTL dk > ${prefix}.debug_log.$(hostname).${suffix} + dmesg > ${prefix}.dmesg.$(hostname).${suffix} + return + fi + + do_nodesv $list \ + "$LCTL dk > ${prefix}.debug_log.\\\$(hostname).${suffix}; + dmesg > ${prefix}.dmesg.\\\$(hostname).${suffix}" + if [ ! -f $LOGDIR/shared ]; then + do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR + fi + + local archive=$LOGDIR/${TESTSUITE}-$ts.tar.bz2 + tar -jcf $archive $LOGDIR/*$ts* $LOGDIR/*${TESTSUITE}* echo $archive } @@ -3107,3 +3557,385 @@ cleanup_logs () { [ -n ${TESTSUITE} ] && do_nodes $list "rm -f $TMP/*${TESTSUITE}*" || true } +do_ls () { + local mntpt_root=$1 + local num_mntpts=$2 + local dir=$3 + local i + local cmd + local pids + local rc=0 + + for i in $(seq 0 $num_mntpts); do + cmd="ls -laf ${mntpt_root}$i/$dir" + echo + $cmd; + $cmd > /dev/null & + pids="$pids $!" + done + echo pids=$pids + for pid in $pids; do + wait $pid || rc=$? + done + + return $rc +} + +get_clients_mount_count () { + local clients=${CLIENTS:-`hostname`} + + # we need to take into account the clients mounts and + # exclude mds/ost mounts if any; + do_nodes $clients cat /proc/mounts | grep lustre | grep $MOUNT | wc -l +} + +# gss functions +PROC_CLI="srpc_info" + +combination() +{ + local M=$1 + local N=$2 + local R=1 + + if [ $M -lt $N ]; then + R=0 + else + N=$((N + 1)) + while [ $N -le $M ]; do + R=$((R * N)) + N=$((N + 1)) + done + fi + + echo $R + return 0 +} + +calc_connection_cnt() { + local dir=$1 + + # MDT->MDT = 2 * C(M, 2) + # MDT->OST = M * O + # CLI->OST = C * O + # CLI->MDT = C * M + comb_m2=$(combination $MDSCOUNT 2) + + local num_clients=$(get_clients_mount_count) + + local cnt_mdt2mdt=$((comb_m2 * 2)) + local cnt_mdt2ost=$((MDSCOUNT * OSTCOUNT)) + local cnt_cli2ost=$((num_clients * OSTCOUNT)) + local cnt_cli2mdt=$((num_clients * MDSCOUNT)) + local cnt_all2ost=$((cnt_mdt2ost + cnt_cli2ost)) + local cnt_all2mdt=$((cnt_mdt2mdt + cnt_cli2mdt)) + local cnt_all2all=$((cnt_mdt2ost + cnt_mdt2mdt + cnt_cli2ost + cnt_cli2mdt)) + + local var=cnt_$dir + local res=${!var} + + echo $res +} + +set_rule() +{ + local tgt=$1 + local net=$2 + local dir=$3 + local flavor=$4 + local cmd="$tgt.srpc.flavor" + + if [ $net == "any" ]; then + net="default" + fi + cmd="$cmd.$net" + + if [ $dir != "any" ]; then + cmd="$cmd.$dir" + fi + + cmd="$cmd=$flavor" + log "Setting sptlrpc rule: $cmd" + do_facet mgs "$LCTL conf_param $cmd" +} + +count_flvr() +{ + local output=$1 + local flavor=$2 + local count=0 + + rpc_flvr=`echo $flavor | awk -F - '{ print $1 }'` + bulkspec=`echo $flavor | awk -F - '{ print $2 }'` + + count=`echo "$output" | grep "rpc flavor" | grep $rpc_flvr | wc -l` + + if [ "x$bulkspec" != "x" ]; then + algs=`echo $bulkspec | awk -F : '{ print $2 }'` + + if [ "x$algs" != "x" ]; then + bulk_count=`echo "$output" | grep "bulk flavor" | grep $algs | wc -l` + else + bulk=`echo $bulkspec | awk -F : '{ print $1 }'` + if [ $bulk == "bulkn" ]; then + bulk_count=`echo "$output" | grep "bulk flavor" \ + | grep "null/null" | wc -l` + elif [ $bulk == "bulki" ]; then + bulk_count=`echo "$output" | grep "bulk flavor" \ + | grep "/null" | grep -v "null/" | wc -l` + else + bulk_count=`echo "$output" | grep "bulk flavor" \ + | grep -v "/null" | grep -v "null/" | wc -l` + fi + fi + + [ $bulk_count -lt $count ] && count=$bulk_count + fi + + echo $count +} + +flvr_cnt_cli2mdt() +{ + local flavor=$1 + local cnt + + local clients=${CLIENTS:-`hostname`} + + for c in ${clients//,/ }; do + output=`do_node $c lctl get_param -n mdc.*-MDT*-mdc-*.$PROC_CLI 2>/dev/null` + tmpcnt=`count_flvr "$output" $flavor` + cnt=$((cnt + tmpcnt)) + done + echo $cnt +} + +flvr_cnt_cli2ost() +{ + local flavor=$1 + local cnt + + local clients=${CLIENTS:-`hostname`} + + for c in ${clients//,/ }; do + output=`do_node $c lctl get_param -n osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null` + tmpcnt=`count_flvr "$output" $flavor` + cnt=$((cnt + tmpcnt)) + done + echo $cnt +} + +flvr_cnt_mdt2mdt() +{ + local flavor=$1 + local cnt=0 + + if [ $MDSCOUNT -le 1 ]; then + echo 0 + return + fi + + for num in `seq $MDSCOUNT`; do + output=`do_facet mds$num lctl get_param -n mdc.*-MDT*-mdc[0-9]*.$PROC_CLI 2>/dev/null` + tmpcnt=`count_flvr "$output" $flavor` + cnt=$((cnt + tmpcnt)) + done + echo $cnt; +} + +flvr_cnt_mdt2ost() +{ + local flavor=$1 + local cnt=0 + + for num in `seq $MDSCOUNT`; do + output=`do_facet mds$num lctl get_param -n osc.*OST*-osc-MDT*.$PROC_CLI 2>/dev/null` + tmpcnt=`count_flvr "$output" $flavor` + cnt=$((cnt + tmpcnt)) + done + echo $cnt; +} + +flvr_cnt_mgc2mgs() +{ + local flavor=$1 + + output=`do_facet client lctl get_param -n mgc.*.$PROC_CLI 2>/dev/null` + count_flvr "$output" $flavor +} + +do_check_flavor() +{ + local dir=$1 # from to + local flavor=$2 # flavor expected + local res=0 + + if [ $dir == "cli2mdt" ]; then + res=`flvr_cnt_cli2mdt $flavor` + elif [ $dir == "cli2ost" ]; then + res=`flvr_cnt_cli2ost $flavor` + elif [ $dir == "mdt2mdt" ]; then + res=`flvr_cnt_mdt2mdt $flavor` + elif [ $dir == "mdt2ost" ]; then + res=`flvr_cnt_mdt2ost $flavor` + elif [ $dir == "all2ost" ]; then + res1=`flvr_cnt_mdt2ost $flavor` + res2=`flvr_cnt_cli2ost $flavor` + res=$((res1 + res2)) + elif [ $dir == "all2mdt" ]; then + res1=`flvr_cnt_mdt2mdt $flavor` + res2=`flvr_cnt_cli2mdt $flavor` + res=$((res1 + res2)) + elif [ $dir == "all2all" ]; then + res1=`flvr_cnt_mdt2ost $flavor` + res2=`flvr_cnt_cli2ost $flavor` + res3=`flvr_cnt_mdt2mdt $flavor` + res4=`flvr_cnt_cli2mdt $flavor` + res=$((res1 + res2 + res3 + res4)) + fi + + echo $res +} + +wait_flavor() +{ + local dir=$1 # from to + local flavor=$2 # flavor expected + local expect=${3:-$(calc_connection_cnt $dir)} # number expected + + local res=0 + + for ((i=0;i<20;i++)); do + echo -n "checking..." + res=$(do_check_flavor $dir $flavor) + if [ $res -eq $expect ]; then + echo "found $res $flavor connections of $dir, OK" + return 0 + else + echo "found $res $flavor connections of $dir, not ready ($expect)" + sleep 4 + fi + done + + echo "Error checking $flavor of $dir: expect $expect, actual $res" + return 1 +} + +restore_to_default_flavor() +{ + local proc="mgs.MGS.live.$FSNAME" + + echo "restoring to default flavor..." + + nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l` + + # remove all existing rules if any + if [ $nrule -ne 0 ]; then + echo "$nrule existing rules" + for rule in `do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor."`; do + echo "remove rule: $rule" + spec=`echo $rule | awk -F = '{print $1}'` + do_facet mgs "$LCTL conf_param $spec=" + done + fi + + # verify no rules left + nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l` + [ $nrule -ne 0 ] && error "still $nrule rules left" + + # wait for default flavor to be applied + # currently default flavor for all connections are 'null' + wait_flavor all2all null + echo "now at default flavor settings" +} + +set_flavor_all() +{ + local flavor=${1:-null} + + echo "setting all flavor to $flavor" + + # FIXME need parameter to this fn + # and remove global vars + local cnt_all2all=$(calc_connection_cnt all2all) + + local res=$(do_check_flavor all2all $flavor) + if [ $res -eq $cnt_all2all ]; then + echo "already have total $res $flavor connections" + return + fi + + echo "found $res $flavor out of total $cnt_all2all connections" + restore_to_default_flavor + + [[ $flavor = null ]] && return 0 + + set_rule $FSNAME any any $flavor + wait_flavor all2all $flavor +} + + +check_logdir() { + local dir=$1 + # Checking for shared logdir + if [ ! -d $dir ]; then + # Not found. Create local logdir + mkdir -p $dir + else + touch $dir/node.$(hostname).yml + fi + return 0 +} + +check_write_access() { + local dir=$1 + for node in $(nodes_list); do + if [ ! -f "$dir/node.${node}.yml" ]; then + # Logdir not accessible/writable from this node. + return 1 + fi + done + return 0 +} + +init_logging() { + if [[ -n $YAML_LOG ]]; then + return + fi + export YAML_LOG=${LOGDIR}/results.yml + mkdir -p $LOGDIR + init_clients_lists + + do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $LOGDIR + if check_write_access $LOGDIR; then + touch $LOGDIR/shared + echo "Logging to shared log directory: $LOGDIR" + else + echo "Logging to local directory: $LOGDIR" + fi + + yml_nodes_file $LOGDIR + yml_results_file >> $YAML_LOG +} + +log_test() { + yml_log_test $1 >> $YAML_LOG +} + +log_sub_test() { + yml_log_sub_test $@ >> $YAML_LOG +} + +run_llverdev() +{ + local dev=$1 + local devname=$(basename $1) + local size=$(grep "$devname"$ /proc/partitions | awk '{print $3}') + size=$(($size / 1024 / 1024)) # Gb + + local partial_arg="" + # Run in partial (fast) mode if the size + # of a partition > 10 GB + [ $size -gt 10 ] && partial_arg="-p" + + llverdev --force $partial_arg $dev +}