X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=f72a59fab5b3f92283e421167e328927caf96518;hp=dc711299c0d2bfd9ef44ddcbacdf4507994d71c8;hb=a7a2133bfab42eba077f1b8d5c991c651c8028c3;hpb=3037e29bff9eaf8e4c90c6874ce31b5e376b0c17 diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index dc71129..f72a59f 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -84,6 +84,14 @@ print_summary () { init_test_env() { export LUSTRE=`absolute_path $LUSTRE` export TESTSUITE=`basename $0 .sh` + export TEST_FAILED=false + + export MKE2FS=${MKE2FS:-mke2fs} + export DEBUGFS=${DEBUGFS:-debugfs} + export TUNE2FS=${TUNE2FS:-tune2fs} + export E2LABEL=${E2LABEL:-e2label} + export DUMPE2FS=${DUMPE2FS:-dumpe2fs} + export E2FSCK=${E2FSCK:-e2fsck} #[ -d /r ] && export ROOT=${ROOT:-/r} export TMP=${TMP:-$ROOT/tmp} @@ -100,7 +108,7 @@ init_test_env() { fi export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mdsrate"} [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) - if ! echo $PATH | grep -q $LUSTRE/test/racer; then + if ! echo $PATH | grep -q $LUSTRE/tests/racer; then export PATH=$PATH:$LUSTRE/tests/racer fi export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} @@ -120,7 +128,7 @@ init_test_env() { export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre) export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " - export FSYTPE=${FSTYPE:-"ldiskfs"} + export FSTYPE=${FSTYPE:-"ldiskfs"} export NAME=${NAME:-local} export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \ @@ -219,9 +227,10 @@ load_modules() { load_module ../libcfs/libcfs/libcfs [ "$PTLDEBUG" ] && lctl set_param debug=$PTLDEBUG [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug=${SUBSYSTEM# } + local MODPROBECONF= [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf - [ -f /etc/modprobe.d/Lustre ] && MODPROBECONF=/etc/modprobe.d/Lustre - [ -z "$LNETOPTS" -a -n "$MODPROBECONF" ] && \ + [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre + [ -z "$LNETOPTS" -a "$MODPROBECONF" ] && \ LNETOPTS=$(awk '/^options lnet/ { print $0}' $MODPROBECONF | sed 's/^options lnet //g') echo $LNETOPTS | grep -q "accept=all" || LNETOPTS="$LNETOPTS accept=all"; echo "lnet options: '$LNETOPTS'" @@ -234,8 +243,8 @@ load_modules() { load_module ptlrpc/ptlrpc load_module ptlrpc/gss/ptlrpc_gss [ "$USE_QUOTA" = "yes" -a "$LQUOTA" != "no" ] && load_module quota/lquota - load_module fid/fid load_module fld/fld + load_module fid/fid load_module lmv/lmv load_module mdc/mdc load_module osc/osc @@ -243,6 +252,7 @@ load_modules() { load_module mgc/mgc if [ -z "$CLIENTONLY" ] && [ -z "$CLIENTMODSONLY" ]; then grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; } + grep -q jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; } [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs load_module mgs/mgs load_module mds/mds @@ -257,9 +267,9 @@ load_modules() { load_module llite/lustre load_module llite/llite_lloop + [ -d /r ] && OGDB=${OGDB:-"/r/tmp"} OGDB=${OGDB:-$TMP} rm -f $OGDB/ogdb-$HOSTNAME - [ -d /r ] && OGDB="/r/tmp" $LCTL modules > $OGDB/ogdb-$HOSTNAME # 'mount' doesn't look in $PATH, just sbin @@ -418,6 +428,10 @@ stop_gss_daemons() { init_gss() { if $GSS; then start_gss_daemons + + if [ -n "$LGSS_KEYRING_DEBUG" ]; then + echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level + fi fi } @@ -460,7 +474,7 @@ mount_facet() { lctl set_param debug_mb=${DEBUG_SIZE}; \ sync" - label=$(do_facet ${facet} "e2label ${!dev}") + label=$(do_facet ${facet} "$E2LABEL ${!dev}") [ -z "$label" ] && echo no label for ${!dev} && exit 1 eval export ${facet}_svc=${label} echo Started ${label} @@ -528,12 +542,27 @@ zconf_mount() { zconf_umount() { local client=$1 local mnt=$2 + local force + local busy + local need_kill + [ "$3" ] && force=-f local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true if [ $running -ne 0 ]; then echo "Stopping client $client $mnt (opts:$force)" - lsof | grep "$mnt" || true - do_node $client umount $force $mnt + do_node $client lsof -t $mnt || need_kill=no + if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then + pids=$(do_node $client lsof -t $mnt | sort -u); + if [ -n $pids ]; then + do_node $client kill -9 $pids || true + fi + fi + + busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true + if [ $busy -ne 0 ] ; then + echo "$mnt is still busy, wait one second" && sleep 1 + do_node $client umount $force $mnt + fi fi } @@ -570,18 +599,59 @@ zconf_mount_clients() { zconf_umount_clients() { local clients=$1 local mnt=$2 + local force + [ "$3" ] && force=-f - echo "Umounting clients: $clients" echo "Stopping clients: $clients $mnt (opts:$force)" - do_nodes $clients umount $force $mnt + do_nodes $clients "set -x; running=\\\$(grep -c $mnt' ' /proc/mounts) +if [ \\\$running -ne 0 ] ; then +echo Stopping client \\\$(hostname) client $mnt opts:$force +lsof -t $mnt || need_kill=no +if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then + pids=\\\$(lsof -t $mnt | sort -u); + if [ -n \\\"\\\$pids\\\" ]; then + kill -9 \\\$pids + fi +fi +busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy") +if [ \\\$busy -ne 0 ] ; then + echo "$mnt is still busy, wait one second" && sleep 1 + umount $force $mnt +fi +fi" +} + +shudown_node_hard () { + local host=$1 + local attempts=3 + + for i in $(seq $attempts) ; do + $POWER_DOWN $host + sleep 1 + ping -w 3 -c 1 $host > /dev/null 2>&1 || return 0 + echo "waiting for $host to fail attempts=$attempts" + [ $i -lt $attempts ] || \ + { echo "$host still pingable after power down! attempts=$attempts" && return 1; } + done +} + +shutdown_client() { + local client=$1 + local mnt=${2:-$MOUNT} + local attempts=3 + + if [ "$FAILURE_MODE" = HARD ]; then + shudown_node_hard $client + else + zconf_umount_clients $client $mnt -f + fi } shutdown_facet() { - facet=$1 + local facet=$1 if [ "$FAILURE_MODE" = HARD ]; then - $POWER_DOWN `facet_active_host $facet` - sleep 2 + shudown_node_hard $(facet_active_host $facet) elif [ "$FAILURE_MODE" = SOFT ]; then stop $facet fi @@ -603,6 +673,118 @@ boot_node() { fi } +# recovery-scale functions +check_progs_installed () { + local clients=$1 + shift + local progs=$@ + + do_nodes $clients "set -x ; PATH=:$PATH status=true; for prog in $progs; do + which \\\$prog || { echo \\\$prog missing on \\\$(hostname) && status=false; } + done; + eval \\\$status" +} + +start_client_load() { + local client=$1 + local var=${client}_load + + do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ + BREAK_ON_ERROR=$BREAK_ON_ERROR \ + END_RUN_FILE=$END_RUN_FILE \ + LOAD_PID_FILE=$LOAD_PID_FILE \ + TESTSUITELOG=$TESTSUITELOG \ + run_${!var}.sh" & + CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $!" + log "Started client load: ${!var} on $client" + + return 0 +} + +start_client_loads () { + local clients=(${1//,/ }) + local numloads=${#CLIENT_LOADS[@]} + local testnum + + for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do + testnum=$((nodenum % numloads)) + eval export ${clients[nodenum]}_load=${CLIENT_LOADS[testnum]} + start_client_load ${clients[nodenum]} + done +} + +# only for remote client +check_client_load () { + local client=$1 + local var=${client}_load + + local TESTLOAD=run_${!var}.sh + + ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1 + + check_catastrophe $client || return 2 + + # see if the load is still on the client + local tries=3 + local RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 + if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then + RC=${PIPESTATUS[0]} + sleep 30 + fi + done + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking (i.e. with 'ps') the client load on the remote system" + # see if we can diagnose a bit why this is + fi + + return $RC +} +check_client_loads () { + local clients=${1//,/ } + local client= + local rc=0 + + for client in $clients; do + check_client_load $client + rc=${PIPESTATUS[0]} + if [ "$rc" != 0 ]; then + log "Client load failed on node $client, rc=$rc" + return $rc + fi + done +} + +restart_client_loads () { + local clients=${1//,/ } + local expectedfail=${2:-""} + local client= + local rc=0 + + for client in $clients; do + check_client_load $client + rc=${PIPESTATUS[0]} + if [ "$rc" != 0 -a "$expectedfail" ]; then + start_client_load $client + echo "Restarted client load: on $client. Checking ..." + check_client_load $client + rc=${PIPESTATUS[0]} + if [ "$rc" != 0 ]; then + log "Client load failed to restart on node $client, rc=$rc" + # failure one client load means test fail + # we do not need to check other + return $rc + fi + else + return $rc + fi + done +} +# End recovery-scale functions + # verify that lustre actually cleaned up properly cleanup_check() { [ -f $CATASTROPHE ] && [ `cat $CATASTROPHE` -ne 0 ] && \ @@ -627,6 +809,34 @@ cleanup_check() { return 0 } +wait_update () { + local node=$1 + local TEST=$2 + local FINAL=$3 + local MAX=${4:-90} + + local RESULT + local WAIT=0 + local sleep=5 + while [ $WAIT -lt $MAX ]; do + RESULT=$(do_node $node "$TEST") + if [ "$RESULT" == "$FINAL" ]; then + echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'" + return 0 + fi + echo "Waiting $((MAX - WAIT)) secs for update" + WAIT=$((WAIT + sleep)) + sleep $sleep + done + echo "Update not seen after $MAX sec: wanted '$FINAL' got '$RESULT'" + return 3 +} + +wait_update_facet () { + local facet=$1 + wait_update $(facet_host $facet) $@ +} + wait_delete_completed () { local TOTALPREV=`lctl get_param -n osc.*.kbytesavail | \ awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` @@ -646,32 +856,39 @@ wait_delete_completed () { } wait_for_host() { - HOST=$1 - check_network "$HOST" 900 - while ! do_node $HOST "ls -d $LUSTRE " > /dev/null; do sleep 5; done + local host=$1 + check_network "$host" 900 + while ! do_node $host "ls -d $LUSTRE " > /dev/null; do sleep 5; done } wait_for() { - facet=$1 - HOST=`facet_active_host $facet` - wait_for_host $HOST + local facet=$1 + local host=`facet_active_host $facet` + wait_for_host $host } -wait_mds_recovery_done () { - local timeout=`do_facet $SINGLEMDS lctl get_param -n timeout` -#define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) -# as we are in process of changing obd_timeout in different ways -# let's set MAX longer than that - MAX=$(( timeout * 4 )) - WAIT=0 +wait_recovery_complete () { + local facet=$1 + + # Use default policy if $2 is not passed by caller. + #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) + # as we are in process of changing obd_timeout in different ways + # let's set MAX longer than that + local MAX=${2:-$(( TIMEOUT * 4 ))} + + local var_svc=${facet}_svc + local procfile="*.${!var_svc}.recovery_status" + local WAIT=0 + local STATUS= + while [ $WAIT -lt $MAX ]; do - STATUS=`do_facet $SINGLEMDS "lctl get_param -n mdt.*-MDT0000.recovery_status | grep status"` - echo $STATUS | grep COMPLETE && return 0 + STATUS=$(do_facet $facet lctl get_param -n $procfile | grep status) + [[ $STATUS = "status: COMPLETE" ]] && return 0 sleep 5 WAIT=$((WAIT + 5)) - echo "Waiting $(($MAX - $WAIT)) secs for MDS recovery done" + echo "Waiting $((MAX - WAIT)) secs for $facet recovery done. $STATUS" done - echo "MDS recovery not done in $MAX sec" + echo "$facet recovery not done in $MAX sec. $STATUS" return 1 } @@ -680,6 +897,7 @@ wait_exit_ST () { local WAIT=0 local INTERVAL=1 + local running # conf-sanity 31 takes a long time cleanup while [ $WAIT -lt 300 ]; do running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null && lctl dl | grep ' ST '") || true @@ -749,8 +967,8 @@ client_reconnect() { } facet_failover() { - facet=$1 - sleep_time=$2 + local facet=$1 + local sleep_time=$2 echo "Failing $facet on node `facet_active_host $facet`" shutdown_facet $facet [ -n "$sleep_time" ] && sleep $sleep_time @@ -759,7 +977,7 @@ facet_failover() { DFPID=$! echo "df pid is $DFPID" change_active $facet - TO=`facet_active_host $facet` + local TO=`facet_active_host $facet` echo "Failover $facet to $TO" wait_for $facet mount_facet $facet || error "Restart of $facet failed" @@ -890,6 +1108,8 @@ declare -fx h2o2ib facet_host() { local facet=$1 + + [ "$facet" == client ] && echo -n $HOSTNAME && return varname=${facet}_HOST if [ -z "${!varname}" ]; then if [ "${facet:0:3}" == "ost" ]; then @@ -1163,16 +1383,6 @@ remount_client() zconf_mount `hostname` $1 || error "mount failed" } -set_obd_timeout() { - local facet=$1 - local timeout=$2 - - do_facet $facet lsmod | grep -q obdclass || \ - do_facet $facet "modprobe obdclass" - - do_facet $facet "lctl set_param timeout=$timeout" -} - writeconf_facet () { local facet=$1 local dev=$2 @@ -1201,7 +1411,6 @@ setupall() { writeconf_all for num in `seq $MDSCOUNT`; do DEVNAME=$(mdsdevname $num) - set_obd_timeout mds$num $TIMEOUT start mds$num $DEVNAME $MDS_MOUNT_OPTS # We started mds, now we should set failover variables properly. @@ -1217,7 +1426,6 @@ setupall() { done for num in `seq $OSTCOUNT`; do DEVNAME=$(ostdevname $num) - set_obd_timeout ost$num $TIMEOUT start ost$num $DEVNAME $OST_MOUNT_OPTS # We started ost$num, now we should set ost${num}failover variable properly. @@ -1242,6 +1450,8 @@ setupall() { [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2 fi + init_param_vars + # by remounting mdt before ost, initial connect from mdt to ost might # timeout because ost is not ready yet. wait some time to its fully # recovery. initial obd_connect timeout is 5s; in GSS case it's preceeded @@ -1269,7 +1479,7 @@ init_facet_vars () { eval export ${facet}_opt=\"$@\" local dev=${facet}_dev - local label=$(do_facet ${facet} "e2label ${!dev}") + local label=$(do_facet ${facet} "$E2LABEL ${!dev}") [ -z "$label" ] && echo no label for ${!dev} && exit 1 eval export ${facet}_svc=${label} @@ -1294,19 +1504,42 @@ init_facets_vars () { done } +init_param_vars () { + export MDSVER=$(do_facet $SINGLEMDS "lctl get_param version" | cut -d. -f1,2) + export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2) + export CLIVER=$(lctl get_param version | cut -d. -f 1,2) + + TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") + log "Using TIMEOUT=$TIMEOUT" +} + check_config () { local mntpt=$1 - + local myMGS_host=$mgs_HOST + if [ "$NETTYPE" = "ptl" ]; then + myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//) + fi + echo Checking config lustre mounted on $mntpt local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}') mgshost=$(echo $mgshost | awk -F: '{print $1}') - if [ "$mgshost" != "$mgs_HOST" ]; then + + if [ "$mgshost" != "$myMGS_host" ]; then FAIL_ON_ERROR=true \ - error "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST + error "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE Please use correct config or set mds_HOST correctly!" fi } +check_timeout () { + local mdstimeout=$(do_facet $SINGLEMDS "lctl get_param -n timeout") + local cltimeout=$(lctl get_param -n timeout) + if [ $mdstimeout -ne $TIMEOUT ] || [ $mdstimeout -ne $cltimeout ]; then + error "timeouts are wrong! mds: $mdstimeout, client: $cltimeout, TIMEOUT=$TIMEOUT" + return 1 + fi +} + check_and_setup_lustre() { local MOUNTED=$(mounted_lustre_filesystems) if [ -z "$MOUNTED" ] || ! $(echo $MOUNTED | grep -w -q $MOUNT); then @@ -1318,6 +1551,7 @@ check_and_setup_lustre() { else check_config $MOUNT init_facets_vars + init_param_vars fi if [ "$ONLY" == "setup" ]; then exit 0 @@ -1384,10 +1618,45 @@ comma_list() { echo "$*" | tr -s " " "\n" | sort -b -u | tr "\n" " " | sed 's/ \([^$]\)/,\1/g' } +# list, excluded are the comma separated lists +exclude_items_from_list () { + local list=$1 + local excluded=$2 + local item + + list=${list//,/ } + for item in ${excluded//,/ }; do + list=$(echo " $list " | sed -re "s/\s+$item\s+/ /g") + done + echo $(comma_list $list) +} + +# list, expand are the comma separated lists +expand_list () { + local list=${1//,/ } + local expand=${2//,/ } + local expanded= + + expanded=$(for i in $list $expand; do echo $i; done | sort -u) + echo $(comma_list $expanded) +} + absolute_path() { (cd `dirname $1`; echo $PWD/`basename $1`) } +get_facets () { + local name=$(echo $1 | tr "[:upper:]" "[:lower:]") + local type=$(echo $1 | tr "[:lower:]" "[:upper:]") + + local list="" + local count=${type}COUNT + for ((i=1; i<=${!count}; i++)) do + list="$list ${name}$i" + done + echo $(comma_list $list) +} + ################################## # Adaptive Timeouts funcs @@ -1595,6 +1864,7 @@ error_noexit() { done debugrestore [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG + TEST_FAILED=true } error() { @@ -1742,7 +2012,8 @@ trace() { } pass() { - echo PASS $@ + $TEST_FAILED && echo -n "FAIL " || echo -n "PASS " + echo $@ } check_mds() { @@ -1765,13 +2036,15 @@ run_one() { message=$2 tfile=f${testnum} export tdir=d0.${TESTSUITE}/d${base} + local SAVE_UMASK=`umask` umask 0022 - BEFORE=`date +%s` + local BEFORE=`date +%s` log "== test $testnum: $message ============ `date +%H:%M:%S` ($BEFORE)" #check_mds export TESTNAME=test_$testnum + TEST_FAILED=false test_${testnum} || error "test_$testnum failed with $?" #check_mds cd $SAVE_PWD @@ -1780,6 +2053,7 @@ run_one() { check_catastrophe || error "LBUG/LASSERT detected" ps auxww | grep -v grep | grep -q multiop && error "multiop still running" pass "($((`date +%s` - $BEFORE))s)" + TEST_FAILED=false unset TESTNAME unset tdir umask $SAVE_UMASK @@ -1884,6 +2158,13 @@ remote_ost_nodsh() remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] } +remote_mgs_nodsh() +{ + local MGS + MGS=$(facet_host mgs) + remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ] +} + mdts_nodes () { local MDSNODES local NODES_sort @@ -1958,14 +2239,30 @@ init_clients_lists () { CLIENTCOUNT=$((${#remoteclients[@]} + 1)) } +get_random_entry () { + local rnodes=$1 + + rnodes=${rnodes//,/ } + + local nodes=($rnodes) + local num=${#nodes[@]} + local i=$((RANDOM * num * 2 / 65536)) + + echo ${nodes[i]} +} + is_patchless () { lctl get_param version | grep -q patchless } +check_versions () { + [ "$MDSVER" = "$CLIVER" -a "$OSTVER" = "$CLIVER" ] +} + get_node_count() { - local nodes="$@" - echo $nodes | wc -w || true + local nodes="$@" + echo $nodes | wc -w || true } mixed_ost_devs () { @@ -1998,8 +2295,9 @@ get_stripe () { check_runas_id_ret() { local myRC=0 - local myRUNAS_ID=$1 - shift + local myRUNAS_UID=$1 + local myRUNAS_GID=$2 + shift 2 local myRUNAS=$@ if [ -z "$myRUNAS" ]; then error_exit "myRUNAS command must be specified for check_runas_id" @@ -2010,20 +2308,21 @@ check_runas_id_ret() { fi mkdir $DIR/d0_runas_test chmod 0755 $DIR - chown $myRUNAS_ID:$myRUNAS_ID $DIR/d0_runas_test + chown $myRUNAS_UID:$myRUNAS_GID $DIR/d0_runas_test $myRUNAS touch $DIR/d0_runas_test/f$$ || myRC=$? rm -rf $DIR/d0_runas_test return $myRC } check_runas_id() { - local myRUNAS_ID=$1 - shift + local myRUNAS_UID=$1 + local myRUNAS_GID=$2 + shift 2 local myRUNAS=$@ - check_runas_id_ret $myRUNAS_ID $myRUNAS || \ - error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_ID. + check_runas_id_ret $myRUNAS_UID $myRUNAS_GID $myRUNAS || \ + error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_UID. Please set RUNAS_ID to some UID which exists on MDS and client or - add user $myRUNAS_ID:$myRUNAS_ID on these nodes." + add user $myRUNAS_UID:$myRUNAS_GID on these nodes." } # Run multiop in the background, but wait for it to print @@ -2055,25 +2354,9 @@ multiop_bg_pause() { return 0 } -check_rate() { - local OP=$1 - local TARGET_RATE=$2 - local NUM_CLIENTS=$3 - local LOG=$4 - - local RATE=$(awk '/^Rate: [0-9\.]+ '"${OP}"'s\/sec/ { print $2}' ${LOG}) - - # We need to use bc since the rate is a floating point number - local RES=$(echo "${RATE} < ${TARGET_RATE}" | bc -l ) - if [ ${RES} -eq 0 ]; then - echo "Success: ${RATE} ${OP}s/sec met target rate" \ - "${TARGET_RATE} ${OP}s/sec for ${NUM_CLIENTS} client(s)." - return 0 - else - echo "Failure: ${RATE} ${OP}s/sec did not meet target rate" \ - "${TARGET_RATE} ${OP}s/sec for ${NUM_CLIENTS} client(s)." - return 1 - fi +inodes_available () { + local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk '{print $4}' | sort -un | head -1) || return 1 + echo $IFree } # reset llite stat counters @@ -2126,11 +2409,74 @@ restore_lustre_params() { } check_catastrophe () { - local rnodes=$(comma_list $(remote_nodes_list)) + local rnodes=${1:-$(comma_list $(remote_nodes_list))} - [ -f $CATASTROPHE ] && [ `cat $CATASTROPHE` -ne 0 ] && return 1 + [ -f $CATASTROPHE ] && [ $(cat $CATASTROPHE) -ne 0 ] && return 1 if [ $rnodes ]; then - do_nodes $rnodes "[ -f $CATASTROPHE ] && { [ \`cat $CATASTROPHE\` -eq 0 ] || false; } || true" + do_nodes $rnodes "set -x; [ -f $CATASTROPHE ] && { [ \`cat $CATASTROPHE\` -eq 0 ] || false; } || true" fi } +# $1 node +# $2 file +# $3 $RUNAS +get_stripe_info() { + local tmp_file + + stripe_size=0 + stripe_count=0 + stripe_index=0 + tmp_file=$(mktemp) + + do_facet $1 $3 lfs getstripe -v $2 > $tmp_file + + stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file` + stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file` + stripe_index=`awk '/obdidx/ {start = 1; getline; print $1; exit}' $tmp_file` + rm -f $tmp_file +} + +# CMD: determine mds index where directory inode presents +get_mds_dir () { + local dir=$1 + local file=$dir/f0.get_mds_dir_tmpfile + + rm -f $file + local iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') + local oldused=($iused) + + touch $file + sleep 1 + iused=$(lfs df -i $dir | grep MDT | awk '{print $3}') + local newused=($iused) + + local num=0 + for ((i=0; i<${#newused[@]}; i++)); do + if [ ${oldused[$i]} -lt ${newused[$i]} ]; then + echo $(( i + 1 )) + rm -f $file + return 0 + fi + done + error "mdt-s : inodes count OLD ${oldused[@]} NEW ${newused[@]}" +} + +mpi_run () { + local mpirun="$MPIRUN $MPIRUN_OPTIONS" + local command="$mpirun $@" + + if [ "$MPI_USER" != root -a $mpirun ]; then + echo "+ chmod 0777 $MOUNT" + chmod 0777 $MOUNT + command="su $MPI_USER sh -c \"$command \"" + fi + + ls -ald $MOUNT + echo "+ $command" + eval $command +} + +mdsrate_cleanup () { + mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 +} +