2 # vim:expandtab:shiftwidth=4:softtabstop=4:tabstop=4:
4 trap 'print_summary && echo "test-framework exiting on error"' ERR
9 export REFORMAT=${REFORMAT:-""}
10 export WRITECONF=${WRITECONF:-""}
12 export GMNALNID=${GMNALNID:-/usr/sbin/gmlndnid}
13 export CATASTROPHE=${CATASTROPHE:-/proc/sys/lnet/catastrophe}
14 #export PDSH="pdsh -S -Rssh -w"
16 # function used by scripts run on remote nodes
17 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
18 . $LUSTRE/tests/functions.sh
20 LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
22 EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh}
24 if [ -f "$EXCEPT_LIST_FILE" ]; then
25 echo "Reading test skip list from $EXCEPT_LIST_FILE"
32 [[ $DIR/ = $MOUNT/* ]] || \
33 { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
34 [[ $DIR1/ = $MOUNT1/* ]] || \
35 { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
36 [[ $DIR2/ = $MOUNT2/* ]] || \
37 { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
39 [ -n "$failed" ] && exit 99 || true
43 echo "usage: $0 [-r] [-f cfgfile]"
51 [ "$TESTSUITE" == "lfscktest" ] && return 0
52 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to ${ONLY}."
53 local form="%-13s %-17s %s\n"
54 printf "$form" "status" "script" "skipped tests E(xcluded) S(low)"
55 echo "------------------------------------------------------------------------------------"
56 for O in $TESTSUITE_LIST; do
59 local o=$(echo $O | tr "[:upper:]" "[:lower:]")
62 local log=${TMP}/${o}.log
63 [ -f $log ] && skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
64 [ -f $log ] && slow=$(grep SLOW $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
65 [ "${!O}" = "done" ] && \
66 printf "$form" "Done" "$O" "E=$skipped" && \
67 [ -n "$slow" ] && printf "$form" "-" "-" "S=$slow"
71 for O in $TESTSUITE_LIST; do
72 if [ "${!O}" = "no" ]; then
74 # only for those tests suits which are run directly from acc-sm script:
75 # bonnie, iozone, etc.
76 if [ -f "$TESTSUITELOG" ] && grep FAIL $TESTSUITELOG | grep -q ' '$O ; then
77 printf "$form" "UNFINISHED" "$O" ""
79 printf "$form" "Skipped" "$O" ""
84 for O in $TESTSUITE_LIST; do
85 [ "${!O}" = "done" -o "${!O}" = "no" ] || \
86 printf "$form" "UNFINISHED" "$O" ""
91 export LUSTRE=`absolute_path $LUSTRE`
92 export TESTSUITE=`basename $0 .sh`
93 export TEST_FAILED=false
94 export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
96 export MKE2FS=${MKE2FS:-mke2fs}
97 export DEBUGFS=${DEBUGFS:-debugfs}
98 export TUNE2FS=${TUNE2FS:-tune2fs}
99 export E2LABEL=${E2LABEL:-e2label}
100 export DUMPE2FS=${DUMPE2FS:-dumpe2fs}
101 export E2FSCK=${E2FSCK:-e2fsck}
102 export LFSCK1=${LFSCK1:-lfsck}
103 export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check filesystem after each test suit
104 export SKIP_LFSCK=${SKIP_LFSCK:-"yes"} # bug 13698, change to "no" when fixed
105 export SHARED_DIRECTORY=${SHARED_DIRECTORY:-"/tmp"}
106 export FSCK_MAX_ERR=4 # File system errors left uncorrected
107 if [ "$SKIP_LFSCK" == "no" ]; then
108 if [ ! -x `which $LFSCK1` ]; then
110 error_exit "$E2FSCK does not support lfsck"
113 export MDSDB=${MDSDB:-$SHARED_DIRECTORY/mdsdb}
114 export OSTDB=${OSTDB:-$SHARED_DIRECTORY/ostdb}
115 export MDSDB_OPT="--mdsdb $MDSDB"
116 export OSTDB_OPT="--ostdb $OSTDB-\$ostidx"
120 #[ -d /r ] && export ROOT=${ROOT:-/r}
121 export TMP=${TMP:-$ROOT/tmp}
122 export TESTSUITELOG=${TMP}/${TESTSUITE}.log
123 export HOSTNAME=${HOSTNAME:-`hostname`}
124 if ! echo $PATH | grep -q $LUSTRE/utils; then
125 export PATH=$PATH:$LUSTRE/utils
127 if ! echo $PATH | grep -q $LUSTRE/test; then
128 export PATH=$PATH:$LUSTRE/tests
130 export LST=${LST:-"$LUSTRE/../lnet/utils/lst"}
131 [ ! -f "$LST" ] && export LST=$(which lst)
132 export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
133 [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
134 if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
135 export PATH=$PATH:$LUSTRE/tests/racer
137 if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then
138 export PATH=$PATH:$LUSTRE/tests/mpi
140 export RSYNC_RSH=${RSYNC_RSH:-rsh}
141 export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
142 export LFS=${LFS:-"$LUSTRE/utils/lfs"}
143 [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
144 export LFS=${LFS:-"$LUSTRE/utils/lfs"}
145 [ ! -f "$LFS" ] && export LFS=$(which lfs)
146 export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
147 [ ! -f "$MKFS" ] && export MKFS=$(which mkfs.lustre)
148 export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
149 [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre)
150 export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} "
151 export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod}
152 [ ! -f "$LUSTRE_RMMOD" ] && export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null)
153 export FSTYPE=${FSTYPE:-"ldiskfs"}
154 export NAME=${NAME:-local}
156 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
158 if [ "$ACCEPTOR_PORT" ]; then
159 export PORT_OPT="--port $ACCEPTOR_PORT"
162 export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
164 # Paths on remote nodes, if different
165 export RLUSTRE=${RLUSTRE:-$LUSTRE}
166 export RPWD=${RPWD:-$PWD}
167 export I_MOUNTED=${I_MOUNTED:-"no"}
168 if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mds.ko -a \
169 ! -f `dirname $0`/../mds/mds.ko ]; then
170 export CLIENTMODSONLY=yes
175 while getopts "rvwf:" opt $*; do
178 r) REFORMAT=--reformat;;
180 w) WRITECONF=writeconf;;
185 shift $((OPTIND - 1))
188 [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
193 *) EXT=".ko"; USE_QUOTA=yes;;
197 do_facet mgs lctl pool_list $1
201 local fsname=${1%%.*}
202 local poolname=${1##$fsname.}
204 do_facet mgs lctl pool_new $1
206 # get param should return err unless pool is created
207 [[ $RC -ne 0 ]] && return $RC
209 wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
210 2>/dev/null || echo foo" "" || RC=1
211 if [[ $RC -eq 0 ]]; then
214 error "pool_new failed $1"
219 add_pool_to_list () {
220 local fsname=${1%%.*}
221 local poolname=${1##$fsname.}
223 local listvar=${fsname}_CREATED_POOLS
224 eval export ${listvar}=$(expand_list ${!listvar} $poolname)
227 remove_pool_from_list () {
228 local fsname=${1%%.*}
229 local poolname=${1##$fsname.}
231 local listvar=${fsname}_CREATED_POOLS
232 eval export ${listvar}=$(exclude_items_from_list ${!listvar} $poolname)
236 /sbin/lsmod | grep -q $1
242 BASE=`basename $module $EXT`
244 module_loaded ${BASE} && return
246 if [ "$BASE" == "lnet_selftest" ] && \
247 [ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]; then
248 insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
250 elif [ -f ${LUSTRE}/${module}${EXT} ]; then
251 insmod ${LUSTRE}/${module}${EXT} $@
253 # must be testing a "make install" or "rpm" installation
258 load_modules_local() {
259 if [ -n "$MODPROBE" ]; then
263 if [ "$HAVE_MODULES" = true ]; then
269 echo Loading modules from $LUSTRE
270 load_module ../lnet/libcfs/libcfs
271 [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
272 [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
274 [ -f /etc/modprobe.conf ] && MODPROBECONF=/etc/modprobe.conf
275 [ ! "$MODPROBECONF" -a -d /etc/modprobe.d ] && MODPROBECONF=/etc/modprobe.d/Lustre
276 [ -z "$LNETOPTS" -a "$MODPROBECONF" ] && \
277 LNETOPTS=$(awk '/^options lnet/ { print $0}' $MODPROBECONF | sed 's/^options lnet //g')
278 echo $LNETOPTS | grep -q "accept=all" || LNETOPTS="$LNETOPTS accept=all";
280 # disable it for now since it only hides the stack overflow upon test w/
282 # if [ "$NETTYPE" = "tcp" -o "$NETTYPE" = "o2ib" -o "$NETTYPE" = "ptl" ]; then
283 # echo $LNETOPTS | grep -q "local_nid_dist_zero=0" ||
284 # LNETOPTS="$LNETOPTS local_nid_dist_zero=0"
286 echo "lnet options: '$LNETOPTS'"
287 # note that insmod will ignore anything in modprobe.conf
288 load_module ../lnet/lnet/lnet $LNETOPTS
289 LNETLND=${LNETLND:-"socklnd/ksocklnd"}
290 load_module ../lnet/klnds/$LNETLND
291 load_module lvfs/lvfs
292 load_module obdclass/obdclass
293 load_module ptlrpc/ptlrpc
294 [ "$USE_QUOTA" = "yes" ] && load_module quota/lquota $LQUOTAOPTS
299 if ! client_only; then
302 grep -q crc16 /proc/kallsyms || { modprobe crc16 2>/dev/null || true; }
303 grep -q jbd /proc/kallsyms || { modprobe jbd 2>/dev/null || true; }
304 [ "$FSTYPE" = "ldiskfs" ] && load_module ../ldiskfs/ldiskfs/ldiskfs
305 load_module lvfs/fsfilt_$FSTYPE
307 load_module obdfilter/obdfilter
310 load_module llite/lustre
311 load_module llite/llite_lloop
312 rm -f $TMP/ogdb-$HOSTNAME
314 [ -d /r ] && OGDB="/r/tmp"
315 $LCTL modules > $OGDB/ogdb-$HOSTNAME
316 # 'mount' doesn't look in $PATH, just sbin
317 [ -f $LUSTRE/utils/mount.lustre ] && cp $LUSTRE/utils/mount.lustre /sbin/. || true
323 # load modules on remote nodes optionally
324 # lustre-tests have to be installed on these nodes
325 if $LOAD_MODULES_REMOTE ; then
326 local list=$(comma_list $(remote_nodes_list))
327 echo loading modules on $list
328 do_rpc_nodes $list load_modules
333 LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
334 LEAK_PORTALS=$(dmesg | tail -n 20 | grep "Portals memory leaked" || true)
335 if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
336 echo "$LEAK_LUSTRE" 1>&2
337 echo "$LEAK_PORTALS" 1>&2
338 mv $TMP/debug $TMP/debug-leak.`date +%s` || true
339 echo "Memory leaks detected"
340 [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
346 wait_exit_ST client # bug 12845
348 if $LOAD_MODULES_REMOTE ; then
349 local list=$(comma_list $(remote_nodes_list))
350 if [ ! -z $list ]; then
351 echo unloading modules on $list
352 do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE
353 do_rpc_nodes $list check_mem_leak
357 $LUSTRE_RMMOD $FSTYPE || return 2
361 check_mem_leak || return 254
363 echo "modules unloaded."
371 local dev=$(facet_active $facet)_dev
372 local opt=${facet}_opt
373 local mntpt=$(facet_mntpt $facet)
375 echo "Starting ${facet}: ${!opt} $@ ${!dev} $mntpt"
376 do_facet ${facet} mount -t lustre ${!opt} $@ ${!dev} $mntpt
378 if [ $RC -ne 0 ]; then
379 echo "mount -t lustre $@ ${!dev} $mntpt"
380 echo "Start of ${!dev} on ${facet} failed ${RC}"
382 do_facet ${facet} "lctl set_param debug=\\\"$PTLDEBUG\\\"; \
383 lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\"; \
384 lctl set_param debug_mb=${DEBUG_SIZE}; \
387 label=$(do_facet ${facet} "$E2LABEL ${!dev}")
388 [ -z "$label" ] && echo no label for ${!dev} && exit 1
389 eval export ${facet}_svc=${label}
390 echo Started ${label}
395 # start facet device options
401 eval export ${facet}_dev=${device}
402 eval export ${facet}_opt=\"$@\"
404 local varname=${facet}failover_dev
405 if [ -n "${!varname}" ] ; then
406 eval export ${facet}failover_dev=${!varname}
408 eval export ${facet}failover_dev=$device
411 local mntpt=$(facet_mntpt $facet)
412 do_facet ${facet} mkdir -p $mntpt
413 eval export ${facet}_MOUNT=$mntpt
423 local HOST=`facet_active_host $facet`
424 [ -z $HOST ] && echo stop: no host for $facet && return 0
426 local mntpt=$(facet_mntpt $facet)
427 running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
428 if [ ${running} -ne 0 ]; then
429 echo "Stopping $mntpt (opts:$@)"
430 do_facet ${facet} umount -d $@ $mntpt
433 # umount should block, but we should wait for unrelated obd's
434 # like the MGS or MGC to also stop.
436 wait_exit_ST ${facet}
439 # set quota version (both administrative and operational quotas)
440 quota_set_version() {
441 do_facet mds "lctl set_param lquota.${FSNAME}-MDT*.quota_type=$1"
443 local osts=$(get_facets OST)
444 for ost in ${osts//,/ }; do
446 do_facet $ost "lctl set_param lquota.${!varsvc}.quota_type=$1"
450 # save quota version (both administrative and operational quotas)
451 # the function will also switch to the new version and the new type
452 quota_save_version() {
454 local ver=$(tr -c -d "123" <<< $spec)
455 local type=$(tr -c -d "ug" <<< $spec)
457 local lustre_version=$(get_lustre_version mds)
458 if [[ $lustre_version = 1.8* ]] ; then
459 $LFS quotaoff -ug $MOUNT # just in case
460 [ -n "$ver" ] && quota_set_version $ver
462 echo mds running $lustre_version
463 [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
466 [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
468 do_facet mgs "lctl conf_param ${FSNAME}-MDT*.$(get_md_name).quota_type=$spec"
470 local osts=$(get_facets OST)
471 for ost in ${osts//,/ }; do
473 do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
477 # client could mount several lustre
479 local fsname=${1:-$FSNAME}
481 do_facet mgs lctl get_param md*.${fsname}-MDT*.quota_type || rc=$?
482 do_nodes $(comma_list $(osts_nodes)) \
483 lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
487 restore_quota_type () {
488 local mntpt=${1:-$MOUNT}
489 local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2)
490 if [ ! "$old_QUOTA_TYPE" ] || [ "$quota_type" = "$old_QUOTA_TYPE" ]; then
493 quota_save_version $old_QUOTA_TYPE
500 # 1. run quotacheck only if quota is off
501 # 2. save the original quota_type params, restore them after testing
503 # Suppose that quota type the same on mds and ost
504 local quota_type=$(quota_type | grep MDT | cut -d "=" -f2)
505 [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!"
506 echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]"
507 if [ "$quota_type" != "$QUOTA_TYPE" ]; then
508 export old_QUOTA_TYPE=$quota_type
509 quota_save_version $QUOTA_TYPE
512 local quota_usrs=$QUOTA_USERS
514 # get_filesystem_size
515 local disksz=$(lfs df $mntpt | grep "filesystem summary:" | awk '{print $3}')
516 local blk_soft=$((disksz + 1024))
517 local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
519 local Inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | awk '{print $3}')
521 local i_hard=$((i_soft + i_soft / 20))
523 echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit:
524 $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard"
527 for usr in $quota_usrs; do
528 echo "Setting up quota on $client:$mntpt for $usr..."
530 cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt"
532 eval $cmd || error "$cmd FAILED!"
534 # display the quota status
535 echo "Quota settings for $usr : "
536 $LFS quota -v -u $usr $mntpt || true
544 # Only supply -o to mount if we have options
545 if [ -n "$MOUNTOPT" ]; then
546 OPTIONS="-o $MOUNTOPT"
548 local device=$MGSNID:/$FSNAME
549 if [ -z "$mnt" -o -z "$FSNAME" ]; then
550 echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
554 echo "Starting client: $client: $OPTIONS $device $mnt"
555 do_node $client mkdir -p $mnt
556 do_node $client mount -t lustre $OPTIONS $device $mnt || return 1
557 do_node $client "lctl set_param debug=\\\"$PTLDEBUG\\\";
558 lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
559 lctl set_param debug_mb=${DEBUG_SIZE}"
572 local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
573 if [ $running -ne 0 ]; then
574 echo "Stopping client $client $mnt (opts:$force)"
575 do_node $client lsof -t $mnt || need_kill=no
576 if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then
577 pids=$(do_node $client lsof -t $mnt | sort -u);
578 if [ -n $pids ]; then
579 do_node $client kill -9 $pids || true
583 busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true
584 if [ $busy -ne 0 ] ; then
585 echo "$mnt is still busy, wait one second" && sleep 1
586 do_node $client umount $force $mnt
591 # nodes is comma list
592 sanity_mount_check_nodes () {
598 # FIXME: assume that all cluster nodes run the same os
599 [ "$(uname)" = Linux ] || return 0
602 for mnt in $mnts ; do
603 do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts);
604 mpts=\\\$(mount | grep -w -c $mnt);
605 if [ \\\$running -ne \\\$mpts ]; then
606 echo \\\$(hostname) env are INSANE!;
614 sanity_mount_check_servers () {
616 { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
617 echo Checking servers environments
619 # FIXME: modify get_facets to display all facets wo params
620 local facets="$(get_facets OST),$(get_facets MDS),mgs"
624 for facet in ${facets//,/ }; do
625 node=$(facet_host ${facet})
626 mntpt=$(facet_mntpt $facet)
627 sanity_mount_check_nodes $node $mntpt ||
628 { error "server $node environments are insane!"; return 1; }
632 sanity_mount_check_clients () {
633 local clients=${1:-$CLIENTS}
634 local mntpt=${2:-$MOUNT}
635 local mntpt2=${3:-$MOUNT2}
637 [ -z $clients ] && clients=$(hostname)
638 echo Checking clients $clients environments
640 sanity_mount_check_nodes $clients $mntpt $mntpt2 ||
641 error "clients environments are insane!"
644 sanity_mount_check () {
645 sanity_mount_check_servers || return 1
646 sanity_mount_check_clients || return 2
649 # mount clients if not mouted
650 zconf_mount_clients() {
653 local OPTIONS=${3:-$MOUNTOPT}
655 # Only supply -o to mount if we have options
656 if [ "$OPTIONS" ]; then
657 OPTIONS="-o $OPTIONS"
659 local device=$MGSNID:/$FSNAME
660 if [ -z "$mnt" -o -z "$FSNAME" ]; then
661 echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
665 echo "Starting client $clients: $OPTIONS $device $mnt"
668 running=\\\$(mount | grep -c $mnt' ');
670 if [ \\\$running -eq 0 ] ; then
672 mount -t lustre $OPTIONS $device $mnt;
677 echo "Started clients $clients: "
678 do_nodes $clients "mount | grep -w $mnt"
680 do_nodes $clients "sysctl -w lnet.debug=\\\"$PTLDEBUG\\\";
681 sysctl -w lnet.subsystem_debug=\\\"${SUBSYSTEM# }\\\";
682 sysctl -w lnet.debug_mb=${DEBUG_SIZE};"
687 zconf_umount_clients() {
694 echo "Stopping clients: $clients $mnt (opts:$force)"
695 do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts);
696 if [ \\\$running -ne 0 ] ; then
697 echo Stopping client \\\$(hostname) $mnt opts:$force;
698 lsof -t $mnt || need_kill=no;
699 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
700 pids=\\\$(lsof -t $mnt | sort -u);
701 if [ -n \\\"\\\$pids\\\" ]; then
705 busy=\\\$(umount $force $mnt 2>&1 | grep -c "busy");
706 if [ \\\$busy -ne 0 ] ; then
707 echo "$mnt is still busy, wait one second" && sleep 1;
713 shudown_node_hard () {
717 for i in $(seq $attempts) ; do
720 ping -w 3 -c 1 $host > /dev/null 2>&1 || return 0
721 echo "waiting for $host to fail attempts=$attempts"
722 [ $i -lt $attempts ] || \
723 { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
729 local mnt=${2:-$MOUNT}
732 if [ "$FAILURE_MODE" = HARD ]; then
733 shudown_node_hard $client
735 zconf_umount_clients $client $mnt -f
741 if [ "$FAILURE_MODE" = HARD ]; then
742 shudown_node_hard $(facet_active_host $facet)
743 elif [ "$FAILURE_MODE" = SOFT ]; then
750 if [ "$FAILURE_MODE" = HARD ]; then
751 $POWER_UP `facet_active_host $facet`
759 if [ "$FAILURE_MODE" = HARD ]; then
765 # recovery-scale functions
766 check_progs_installed () {
771 do_nodes $clients "PATH=:$PATH; status=true;
772 for prog in $progs; do
773 if ! [ \\\"\\\$(which \\\$prog)\\\" -o \\\"\\\${!prog}\\\" ]; then
774 echo \\\$prog missing on \\\$(hostname);
782 echo __$(echo $1 | tr '-' 'X')
785 start_client_load() {
788 local var=$(client_var_name $client)_load
789 eval export ${var}=$load
791 do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
792 BREAK_ON_ERROR=$BREAK_ON_ERROR \
793 END_RUN_FILE=$END_RUN_FILE \
794 LOAD_PID_FILE=$LOAD_PID_FILE \
795 TESTSUITELOG=$TESTSUITELOG \
797 CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $!"
798 log "Started client load: ${load} on $client"
803 start_client_loads () {
804 local -a clients=(${1//,/ })
805 local numloads=${#CLIENT_LOADS[@]}
808 for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
809 testnum=$((nodenum % numloads))
810 start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]}
812 # bug 22169: wait the background threads to start
816 # only for remote client
817 check_client_load () {
819 local var=$(client_var_name $client)_load
820 local TESTLOAD=run_${!var}.sh
822 ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
824 # bug 18914: try to connect several times not only when
825 # check ps, but while check_catastrophe also
828 while [ $RC = 254 -a $tries -gt 0 ]; do
832 if ! check_catastrophe $client; then
834 if [ $RC -eq 254 ]; then
835 # FIXME: not sure how long we shuold sleep here
839 echo "check catastrophe failed: RC=$RC "
844 # We can continue try to connect if RC=254
845 # Just print the warning about this
846 if [ $RC = 254 ]; then
847 echo "got a return status of $RC from do_node while checking catastrophe on $client"
850 # see if the load is still on the client
853 while [ $RC = 254 -a $tries -gt 0 ]; do
857 if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
862 if [ $RC = 254 ]; then
863 echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
864 # see if we can diagnose a bit why this is
869 check_client_loads () {
870 local clients=${1//,/ }
874 for client in $clients; do
875 check_client_load $client
877 if [ "$rc" != 0 ]; then
878 log "Client load failed on node $client, rc=$rc"
884 restart_client_loads () {
885 local clients=${1//,/ }
886 local expectedfail=${2:-""}
890 for client in $clients; do
891 check_client_load $client
893 if [ "$rc" != 0 -a "$expectedfail" ]; then
894 local var=$(client_var_name $client)_load
895 start_client_load $client ${!var}
896 echo "Restarted client load ${!var}: on $client. Checking ..."
897 check_client_load $client
899 if [ "$rc" != 0 ]; then
900 log "Client load failed to restart on node $client, rc=$rc"
901 # failure one client load means test fail
902 # we do not need to check other
910 # End recovery-scale functions
912 # verify that lustre actually cleaned up properly
914 [ -f $CATASTROPHE ] && [ `cat $CATASTROPHE` -ne 0 ] && \
915 error "LBUG/LASSERT detected"
916 BUSY=`dmesg | grep -i destruct || true`
919 [ -e $TMP/debug ] && mv $TMP/debug $TMP/debug-busy.`date +%s`
923 check_mem_leak || exit 204
925 [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && \
926 echo "$0: lustre didn't clean up..." 1>&2 && return 202 || true
928 if module_loaded lnet || module_loaded libcfs; then
929 echo "$0: modules still loaded..." 1>&2
946 RESULT=$(do_node $node "$TEST")
947 if [ "$RESULT" == "$FINAL" ]; then
948 echo "Updated after $WAIT sec: wanted '$FINAL' got '$RESULT'"
951 [ $WAIT -ge $MAX ] && break
952 echo "Waiting $((MAX - WAIT)) secs for update"
953 WAIT=$((WAIT + sleep))
956 echo "Update not seen after $MAX sec: wanted '$FINAL' got '$RESULT'"
960 wait_update_facet () {
963 wait_update $(facet_active_host $facet) "$@"
966 wait_delete_completed () {
967 local TOTALPREV=`lctl get_param -n osc.*.kbytesavail | \
968 awk 'BEGIN{total=0}; {total+=$1}; END{print total}'`
972 while [ "$WAIT" -ne "$MAX_WAIT" ]; do
974 TOTAL=`lctl get_param -n osc.*.kbytesavail | \
975 awk 'BEGIN{total=0}; {total+=$1}; END{print total}'`
976 [ "$TOTAL" -eq "$TOTALPREV" ] && return 0
977 echo "Waiting delete completed ... prev: $TOTALPREV current: $TOTAL "
981 echo "Delete is not completed in $MAX_WAIT sec"
987 check_network "$host" 900
988 while ! do_node $host hostname > /dev/null; do sleep 5; done
993 local host=`facet_active_host $facet`
997 wait_recovery_complete () {
1000 # Use default policy if $2 is not passed by caller.
1001 local MAX=${2:-$(max_recovery_time)}
1003 local var_svc=${facet}_svc
1004 local procfile="*.${!var_svc}.recovery_status"
1008 while [ $WAIT -lt $MAX ]; do
1009 STATUS=$(do_facet $facet lctl get_param -n $procfile | grep status)
1010 [[ $STATUS = "status: COMPLETE" ]] && return 0
1013 echo "Waiting $((MAX - WAIT)) secs for $facet recovery done. $STATUS"
1015 echo "$facet recovery not done in $MAX sec. $STATUS"
1019 wait_mds_ost_sync () {
1020 # just because recovery is done doesn't mean we've finished
1021 # orphan cleanup. Wait for llogs to get synchronized.
1022 echo "Waiting for orphan cleanup..."
1023 # MAX value includes time needed for MDS-OST reconnection
1024 local MAX=$(( TIMEOUT * 2 ))
1026 while [ $WAIT -lt $MAX ]; do
1027 local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \
1028 "$LCTL get_param -n obdfilter.*.mds_sync"))
1030 for ((i=0; i<${#sync[@]}; i++)); do
1031 [ ${sync[$i]} -eq 0 ] && continue
1032 # there is a not finished MDS-OST synchronization
1036 sleep 2 # increase waiting time and cover statfs cache
1037 [ ${con} -eq 1 ] && return 0
1038 echo "Waiting $WAIT secs for $facet mds-ost sync done."
1041 echo "$facet recovery not done in $MAX sec. $STATUS"
1045 wait_destroy_complete () {
1046 echo "Waiting for destroy to be done..."
1047 # MAX value shouldn't be big as this mean server responsiveness
1048 # never increase this just to make test pass but investigate
1049 # why it takes so long time
1052 while [ $WAIT -lt $MAX ]; do
1053 local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight))
1055 for ((i=0; i<${#RPCs[@]}; i++)); do
1056 [ ${RPCs[$i]} -eq 0 ] && continue
1057 # there are still some destroy RPCs in flight
1062 [ ${con} -eq 1 ] && return 0 # done waiting
1063 echo "Waiting $WAIT secs for destroys to be done."
1066 echo "Destroys weren't done in $MAX sec."
1076 # conf-sanity 31 takes a long time cleanup
1077 while [ $WAIT -lt 300 ]; do
1078 running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null && lctl dl | grep ' ST '") || true
1079 [ -z "${running}" ] && return 0
1080 echo "waited $WAIT for${running}"
1081 [ $INTERVAL -lt 64 ] && INTERVAL=$((INTERVAL + INTERVAL))
1083 WAIT=$((WAIT + INTERVAL))
1085 echo "service didn't stop after $WAIT seconds. Still running:"
1090 wait_remote_prog () {
1096 [ "$PDSH" = "no_dsh" ] && return 0
1098 while [ $WAIT -lt $2 ]; do
1099 running=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep) || true
1100 [ -z "${running}" ] && return 0 || true
1101 echo "waited $WAIT for: "
1103 [ $INTERVAL -lt 60 ] && INTERVAL=$((INTERVAL + INTERVAL))
1105 WAIT=$((WAIT + INTERVAL))
1107 local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}')
1108 [ -z "$pids" ] && return 0
1109 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids"
1110 # FIXME: not portable
1111 for pid in $pids; do
1112 cat /proc/${pid}/status || true
1113 cat /proc/${pid}/wchan || true
1115 kill -9 $pid || true
1124 # not every config has many clients
1126 if [ -n "$CLIENTS" ]; then
1127 $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null
1129 stat -f $MOUNT > /dev/null
1135 # usually checked on particular client or locally
1137 if [ ! -z "$client" ]; then
1138 $PDSH $client "stat -f $MOUNT" > /dev/null
1140 stat -f $MOUNT > /dev/null
1148 client_reconnect() {
1149 uname -n >> $MOUNT/recon
1150 if [ -z "$CLIENTS" ]; then
1151 df $MOUNT; uname -n >> $MOUNT/recon
1153 do_nodes $CLIENTS "df $MOUNT; uname -n >> $MOUNT/recon" > /dev/null
1155 echo Connected clients:
1157 ls -l $MOUNT/recon > /dev/null
1164 echo "Failing $facet on node `facet_active_host $facet`"
1165 shutdown_facet $facet
1166 [ -n "$sleep_time" ] && sleep $sleep_time
1170 RECOVERY_START_TIME=`date +%s`
1171 echo "df pid is $DFPID"
1172 change_active $facet
1173 local TO=`facet_active_host $facet`
1174 echo "Failover $facet to $TO"
1176 mount_facet $facet || error "Restart of $facet failed"
1185 do_facet $facet sync
1187 local svc=${facet}_svc
1188 do_facet $facet $LCTL --device %${!svc} notransno
1189 do_facet $facet $LCTL --device %${!svc} readonly
1190 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
1191 $LCTL mark "local REPLAY BARRIER on ${!svc}"
1194 replay_barrier_nodf() {
1195 local facet=$1 echo running=${running}
1196 do_facet $facet sync
1197 local svc=${facet}_svc
1198 echo Replay barrier on ${!svc}
1199 do_facet $facet $LCTL --device %${!svc} notransno
1200 do_facet $facet $LCTL --device %${!svc} readonly
1201 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
1202 $LCTL mark "local REPLAY BARRIER on ${!svc}"
1205 mds_evict_client() {
1206 UUID=`lctl get_param -n mdc.${mds_svc}-mdc-*.uuid`
1207 local mdtdevice=$(get_mds_mdt_device_proc_path)
1208 do_facet mds "lctl set_param -n ${mdtdevice}.${mds_svc}.evict_client $UUID"
1211 ost_evict_client() {
1212 UUID=`lctl get_param -n osc.${ost1_svc}-osc-*.uuid`
1213 do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.evict_client $UUID"
1217 facet_failover $* || error "failover: $?"
1218 clients_up || error "post-failover df: $?"
1223 facet_failover $facet
1229 change_active $facet
1230 mount_facet $facet -o abort_recovery
1231 clients_up || echo "first df failed: $?"
1232 clients_up || error "post-failover df: $?"
1236 echo There is no lmc. This is mountconf, baby.
1241 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1242 ID=`$PDSH $1 $GMNALNID -l | cut -d\ -f2`
1248 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1254 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1255 ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | awk '{print $1}'`
1256 if [ -z "$ID" ]; then
1257 echo "Could not get a ptl id for $1..."
1266 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1273 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1274 if type __h2elan >/dev/null 2>&1; then
1277 ID=`echo $1 | sed 's/[^0-9]*//g'`
1285 if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else
1286 ID=`echo $1 | sed 's/[^0-9]*//g'`
1290 declare -fx h2openib
1293 h2name_or_ip "$1" "o2ib"
1300 [ "$facet" == client ] && echo -n $HOSTNAME && return
1301 varname=${facet}_HOST
1302 if [ -z "${!varname}" ]; then
1303 if [ "${facet:0:3}" == "ost" ]; then
1304 eval ${facet}_HOST=${ost_HOST}
1312 local activevar=${facet}active
1314 if [ -f $TMP/${facet}active ] ; then
1315 source $TMP/${facet}active
1318 active=${!activevar}
1319 if [ -z "$active" ] ; then
1326 facet_active_host() {
1328 local active=`facet_active $facet`
1329 if [ "$facet" == client ]; then
1332 echo `facet_host $active`
1338 local failover=${facet}failover
1339 host=`facet_host $failover`
1340 [ -z "$host" ] && return
1341 local curactive=`facet_active $facet`
1342 if [ -z "${curactive}" -o "$curactive" == "$failover" ] ; then
1343 eval export ${facet}active=$facet
1345 eval export ${facet}active=$failover
1347 # save the active host for this facet
1348 local activevar=${facet}active
1349 echo "$activevar=${!activevar}" > $TMP/$activevar
1354 # do not stripe off hostname if verbose, bug 19215
1355 if [ x$1 = x--verbose ]; then
1363 if [ "$HOST" = "$HOSTNAME" ]; then
1365 elif [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" ]; then
1366 echo "cannot run remote command on $HOST with $myPDSH"
1370 echo "CMD: $HOST $@" >&2
1371 $myPDSH $HOST $LCTL mark "$@" > /dev/null 2>&1 || :
1374 if [ "$myPDSH" = "rsh" ]; then
1375 # we need this because rsh does not return exit code of an executed command
1376 local command_status="$TMP/cs"
1377 rsh $HOST ":> $command_status"
1378 rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin;
1379 cd $RPWD; sh -c \"$@\") ||
1380 echo command failed >$command_status"
1381 [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
1386 # print HOSTNAME for myPDSH="no_dsh"
1387 if [[ $myPDSH = no_dsh ]]; then
1388 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /"
1390 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
1393 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed "s/^${HOST}: //"
1395 return ${PIPESTATUS[0]}
1399 do_node --verbose "$@"
1402 single_local_node () {
1403 [ "$1" = "$HOSTNAME" ]
1408 # do not stripe off hostname if verbose, bug 19215
1409 if [ x$1 = x--verbose ]; then
1417 if $(single_local_node $rnodes); then
1419 do_nodev $rnodes "$@"
1421 do_node $rnodes "$@"
1426 # This is part from do_node
1429 [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" -o "$myPDSH" = "rsh" ] && \
1430 echo "cannot run remote command on $rnodes with $myPDSH" && return 128
1433 echo "CMD: $rnodes $@" >&2
1434 $myPDSH $rnodes $LCTL mark "$@" > /dev/null 2>&1 || :
1438 $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")"
1440 $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; sh -c \"$@\")" | sed -re "s/\w+:\s//g"
1442 return ${PIPESTATUS[0]}
1448 local HOST=`facet_active_host $facet`
1449 [ -z $HOST ] && echo No host defined for facet ${facet} && exit 1
1454 do_nodes --verbose "$@"
1460 # make sure its not already running
1462 rm -f $TMP/${facet}active
1463 do_facet ${facet} $MKFS $*
1469 #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
1470 eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}}
1476 if combined_mgs_mds && [[ $facet = "mgs" ]] ; then
1479 local var=${facet}_MOUNT
1480 eval mntpt=${!var:-${MOUNT%/*}/$facet}
1489 # make sure we are using the primary server, so test-framework will
1490 # be able to clean up properly.
1491 activemds=`facet_active mds`
1492 if [ $activemds != "mds" ]; then
1496 local clients=$CLIENTS
1497 [ -z $clients ] && clients=$(hostname)
1499 zconf_umount_clients $clients $MOUNT "$*" || true
1500 [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
1502 [ "$CLIENTONLY" ] && return
1503 # The add fn does rm ${facet}active file, this would be enough
1504 # if we use do_facet <facet> only after the facet added, but
1505 # currently we use do_facet mds in local.sh
1507 rm -f ${TMP}/mdsactive
1508 for num in `seq $OSTCOUNT`; do
1510 rm -f $TMP/ost${num}active
1512 if ! combined_mgs_mds ; then
1519 cleanup_echo_devs () {
1520 local devs=$($LCTL dl | grep echo | awk '{print $4}')
1522 for dev in $devs; do
1523 $LCTL --device $dev cleanup
1524 $LCTL --device $dev detach
1529 nfs_client_mode && return
1537 combined_mgs_mds () {
1538 [[ $MDSDEV = $MGSDEV ]] && [[ $mds_HOST = $mgs_HOST ]]
1542 [ "$FSTYPE" ] && FSTYPE_OPT="--backfstype $FSTYPE"
1545 # We need ldiskfs here, may as well load them all
1547 [ "$CLIENTONLY" ] && return
1548 echo Formatting mgs, mds, osts
1549 if ! combined_mgs_mds ; then
1550 add mgs $mgs_MKFS_OPTS $FSTYPE_OPT --reformat $MGSDEV || exit 10
1554 add mds $MDS_MKFS_OPTS $FSTYPE_OPT --reformat $MDSDEV || exit 10
1556 add mds $MDS_MKFS_OPTS $FSTYPE_OPT --reformat $MDSDEV > /dev/null || exit 10
1559 for num in `seq $OSTCOUNT`; do
1561 add ost$num $OST_MKFS_OPTS $FSTYPE_OPT --reformat `ostdevname $num` || exit 10
1563 add ost$num $OST_MKFS_OPTS $FSTYPE_OPT --reformat `ostdevname $num` > /dev/null || exit 10
1569 grep " $1 " /proc/mounts || zconf_mount $HOSTNAME $*
1574 zconf_umount `hostname` $1 || error "umount failed"
1575 zconf_mount `hostname` $1 || error "mount failed"
1578 writeconf_facet () {
1582 do_facet $facet "$TUNEFS --writeconf $dev"
1586 writeconf_facet mds $MDSDEV
1588 for num in `seq $OSTCOUNT`; do
1589 DEVNAME=`ostdevname $num`
1590 writeconf_facet ost$num $DEVNAME
1595 nfs_client_mode && return
1597 sanity_mount_check ||
1598 error "environments are insane!"
1601 if [ -z "$CLIENTONLY" ]; then
1602 echo Setup mgs, mdt, osts
1604 echo $WRITECONF | grep -q "writeconf" && \
1607 if ! combined_mgs_mds ; then
1608 start mgs $MGSDEV $mgs_MOUNT_OPTS
1611 start mds $MDSDEV $MDS_MOUNT_OPTS
1612 # We started mds, now we should set failover variable properly.
1613 # Set mdsfailover_HOST if it is not set (the default failnode).
1614 if [ -z "$mdsfailover_HOST" ]; then
1615 mdsfailover_HOST=$(facet_host mds)
1618 for num in `seq $OSTCOUNT`; do
1619 DEVNAME=`ostdevname $num`
1620 start ost$num $DEVNAME $OST_MOUNT_OPTS
1622 # We started ost$num, now we should set ost${num}failover variable properly.
1623 # Set ost${num}failover_HOST if it is not set (the default failnode).
1624 varname=ost${num}failover_HOST
1625 if [ -z "${!varname}" ]; then
1626 eval ost${num}failover_HOST=$(facet_host ost${num})
1631 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
1633 [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT
1635 if [ "$MOUNT_2" ]; then
1636 mount_client $MOUNT2
1637 [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2
1643 mounted_lustre_filesystems() {
1644 awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts
1647 init_facet_vars () {
1648 [ "$CLIENTONLY" ] && return 0
1655 eval export ${facet}_dev=${device}
1656 eval export ${facet}_opt=\"$@\"
1658 local dev=${facet}_dev
1659 local label=$(do_facet ${facet} "$E2LABEL ${!dev}")
1660 [ -z "$label" ] && echo no label for ${!dev} && exit 1
1662 eval export ${facet}_svc=${label}
1664 local varname=${facet}failover_HOST
1665 if [ -z "${!varname}" ]; then
1666 eval $varname=$(facet_host $facet)
1669 # ${facet}failover_dev is set in cfg file
1670 varname=${facet}failover_dev
1671 if [ -n "${!varname}" ] ; then
1672 eval export ${facet}failover_dev=${!varname}
1674 eval export ${facet}failover_dev=$device
1677 # get mount point of already mounted device
1678 # is facet_dev is already mounted then use the real
1679 # mount point of this facet; otherwise use $(facet_mntpt $facet)
1680 # i.e. ${facet}_MOUNT if specified by user or default
1681 local mntpt=$(do_facet ${facet} cat /proc/mounts | \
1682 awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }')
1683 if [ -z $mntpt ]; then
1684 mntpt=$(facet_mntpt $facet)
1686 eval export ${facet}_MOUNT=$mntpt
1689 init_facets_vars () {
1691 init_facet_vars mds $MDSDEV $MDS_MOUNT_OPTS
1693 remote_ost_nodsh && return
1695 for num in `seq $OSTCOUNT`; do
1696 DEVNAME=`ostdevname $num`
1697 init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS
1701 init_param_vars () {
1702 if ! remote_ost_nodsh && ! remote_mds_nodsh; then
1703 export MDSVER=$(do_facet mds "lctl get_param version" | cut -d. -f1,2)
1704 export OSTVER=$(do_facet ost1 "lctl get_param version" | cut -d. -f1,2)
1705 export CLIVER=$(lctl get_param version | cut -d. -f 1,2)
1709 TIMEOUT=$(do_facet mds "lctl get_param -n timeout")
1711 log "Using TIMEOUT=$TIMEOUT"
1713 if [ "$ENABLE_QUOTA" ]; then
1714 setup_quota $MOUNT || return 2
1718 nfs_client_mode () {
1719 if [ "$NFSCLIENT" ]; then
1720 echo "NFSCLIENT mode: setup, cleanup, check config skipped"
1721 local clients=$CLIENTS
1722 [ -z $clients ] && clients=$(hostname)
1724 # FIXME: remove hostname when 19215 fixed
1725 do_nodes $clients "echo \\\$(hostname); grep ' '$MOUNT' ' /proc/mounts"
1726 declare -a nfsexport=(`grep ' '$MOUNT' ' /proc/mounts | awk '{print $1}' | awk -F: '{print $1 " " $2}'`)
1727 do_nodes ${nfsexport[0]} "echo \\\$(hostname); df -T ${nfsexport[1]}"
1733 check_config_client () {
1736 local mounted=$(mount | grep " $mntpt ")
1737 if [ "$CLIENTONLY" ]; then
1739 # CLIENTONLY should not depend on *_HOST settings
1740 local mgc=$($LCTL device_list | awk '/MGC/ {print $4}')
1741 # in theory someone could create a new,
1742 # client-only config file that assumed lustre was already
1743 # configured and didn't set the MGSNID. If MGSNID is not set,
1744 # then we should use the mgs nid currently being used
1745 # as the default value. bug 18021
1746 [[ x$MGSNID = x ]] &&
1749 if [[ x$mgc != xMGC$MGSNID ]]; then
1750 if [ "$mgs_HOST" ]; then
1751 local mgc_ip=$(ping -q -c1 -w1 $mgs_HOST | grep PING | awk '{print $3}' | sed -e "s/(//g" -e "s/)//g")
1752 [[ x$mgc = xMGC$mgc_ip@$NETTYPE ]] ||
1753 error_exit "MGSNID=$MGSNID, mounted: $mounted, MGC : $mgc"
1759 local myMGS_host=$mgs_HOST
1760 if [ "$NETTYPE" = "ptl" ]; then
1761 myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//)
1764 echo Checking config lustre mounted on $mntpt
1765 local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}')
1766 mgshost=$(echo $mgshost | awk -F: '{print $1}')
1768 # if [ "$mgshost" != "$myMGS_host" ]; then
1769 # error_exit "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE
1770 # Please use correct config or set mds_HOST correctly!"
1775 check_config_clients () {
1776 local clients=${CLIENTS:-$HOSTNAME}
1779 nfs_client_mode && return
1781 do_rpc_nodes $clients check_config_client $mntpt
1783 sanity_mount_check ||
1784 error "environments are insane!"
1788 local mdstimeout=$(do_facet mds "lctl get_param -n timeout")
1789 local cltimeout=$(lctl get_param -n timeout)
1790 if [ $mdstimeout -ne $TIMEOUT ] || [ $mdstimeout -ne $cltimeout ]; then
1791 error "timeouts are wrong! mds: $mdstimeout, client: $cltimeout, TIMEOUT=$TIMEOUT"
1798 local mounted=$(mounted_lustre_filesystems)
1800 echo $mounted' ' | grep -w -q $mntpt' '
1804 [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0
1808 # empty lustre filesystem may have empty directories lost+found and .lustre
1810 [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \
1811 -print | wc -l) = 1 ] || return 1
1812 [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found && return 0
1813 [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre && return 0
1817 check_and_setup_lustre() {
1818 nfs_client_mode && return
1820 local MOUNTED=$(mounted_lustre_filesystems)
1824 # both MOUNT and MOUNT2 are not mounted
1825 if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then
1826 [ "$REFORMAT" ] && formatall
1827 # setupall mounts both MOUNT and MOUNT2 (if MOUNT_2 is set)
1829 is_mounted $MOUNT || error "NAME=$NAME not mounted"
1830 export I_MOUNTED=yes
1834 elif is_mounted $MOUNT2; then
1836 # MOUNT2 is mounted, while MOUNT_2 is not set
1837 if ! [ "$MOUNT_2" ]; then
1838 cleanup_mount $MOUNT2
1839 export I_UMOUNTED2=yes
1842 # MOUNT2 is mounted, MOUNT_2 is set
1844 # FIXME: what to do if check_config failed?
1846 # 1) remote client has mounted other Lustre fs ?
1847 # 2) it has insane env ?
1848 # let's try umount MOUNT2 on all clients and mount it again:
1849 if ! check_config_clients $MOUNT2; then
1850 cleanup_mount $MOUNT2
1851 restore_mount $MOUNT2
1852 export I_MOUNTED2=yes
1857 # MOUNT is mounted MOUNT2 is not mounted
1858 elif [ "$MOUNT_2" ]; then
1859 restore_mount $MOUNT2
1860 export I_MOUNTED2=yes
1864 # FIXME: what to do if check_config failed?
1866 # 1) remote client has mounted other Lustre fs?
1867 # 2) lustre is mounted on remote_clients atall ?
1868 check_config_clients $MOUNT
1872 do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=\\\"$PTLDEBUG\\\";
1873 lctl set_param subsystem_debug=\\\"${SUBSYSTEM# }\\\";
1874 lctl set_param debug_mb=${DEBUG_SIZE};
1877 if [ "$ONLY" == "setup" ]; then
1883 local clients=${CLIENTS:-$HOSTNAME}
1886 zconf_mount_clients $clients $mntpt
1890 local clients=${CLIENTS:-$HOSTNAME}
1893 zconf_umount_clients $clients $mntpt
1896 cleanup_and_setup_lustre() {
1897 if [ "$ONLY" == "cleanup" -o "`mount | grep $MOUNT`" ]; then
1898 lctl set_param debug=0 || true
1900 if [ "$ONLY" == "cleanup" ]; then
1904 check_and_setup_lustre
1907 # Get all of the server target devices from a given server node and type.
1916 mdt) obd_type="osd" ;;
1917 ost) obd_type="obdfilter" ;; # needs to be fixed when OST also uses an OSD
1918 *) echo "invalid server type" && return 1 ;;
1921 devs=$(do_node $node "lctl get_param -n $obd_type.*.mntdev")
1922 for dev in $devs; do
1924 *loop*) do_node $node "losetup $dev" | \
1925 sed -e "s/.*(//" -e "s/).*//" ;;
1931 # Get all of the server target devices.
1937 for node in $(osts_nodes); do
1938 OSTDEVS[i]=$(get_mnt_devs $node ost)
1943 # Run e2fsck on MDT or OST device.
1950 df > /dev/null # update statfs data on disk
1951 local cmd="$E2FSCK -d -v -f -n $MDSDB_OPT $ostdb_opt $target_dev"
1954 local rc=${PIPESTATUS[0]}
1955 [ $rc -le $FSCK_MAX_ERR ] || \
1956 error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
1960 # Run e2fsck on MDT and OST(s) to generate databases used for lfsck.
1967 tmp_file=$(mktemp -p $SHARED_DIRECTORY ||
1968 error "fail to create file in $SHARED_DIRECTORY")
1970 # make sure everything gets to the backing store
1971 local list=$(comma_list $CLIENTS $(facet_host mds) $(osts_nodes))
1972 do_nodes $list "sync; sleep 2; sync"
1974 do_nodes $list ls $tmp_file || \
1975 error "$SHARED_DIRECTORY is not a shared directory"
1978 run_e2fsck $(facet_host mds) $MDSDEV
1983 for node in $(osts_nodes); do
1984 for dev in ${OSTDEVS[i]}; do
1985 local ostdb_opt=`eval echo $OSTDB_OPT`
1986 run_e2fsck $node $dev $ostidx "$ostdb_opt"
1987 OSTDB_LIST="$OSTDB_LIST $OSTDB-$ostidx"
1988 ostidx=$((ostidx + 1))
1995 local cmd="$LFSCK1 -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT"
1998 local rc=${PIPESTATUS[0]}
1999 [ $rc -le $FSCK_MAX_ERR ] || \
2000 error "$cmd returned $rc, should be <= $FSCK_MAX_ERR"
2001 echo "lfsck finished with rc=$rc"
2003 rm -rvf $MDSDB* $OSTDB* || true
2008 check_and_cleanup_lustre() {
2009 if [ "$LFSCK_ALWAYS" = "yes" ]; then
2012 if [ "$SKIP_LFSCK" == "no" ]; then
2020 if is_mounted $MOUNT; then
2021 [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]*
2022 [ "$ENABLE_QUOTA" ] && restore_quota_type || true
2025 if [ "$I_UMOUNTED2" = "yes" ]; then
2026 restore_mount $MOUNT2 || error "restore $MOUNT2 failed"
2029 if [ "$I_MOUNTED2" = "yes" ]; then
2030 cleanup_mount $MOUNT2
2033 if [ "$I_MOUNTED" = "yes" ]; then
2034 cleanupall -f || error "cleanup failed"
2046 while [ $NETWORK -eq 0 ]; do
2047 if ping -c 1 -w 3 $1 > /dev/null; then
2051 echo "waiting for $1, $((MAX - WAIT)) secs left"
2054 if [ $WAIT -gt $MAX ]; then
2055 echo "Network not available"
2061 while( !($DSH2 $1 "netstat -tna | grep -q $2") ) ; do
2072 # the sed converts spaces to commas, but leaves the last space
2073 # alone, so the line doesn't end with a comma.
2074 echo "$*" | tr -s " " "\n" | sort -b -u | tr "\n" " " | sed 's/ \([^$]\)/,\1/g'
2077 # list, excluded are the comma separated lists
2078 exclude_items_from_list () {
2084 for item in ${excluded//,/ }; do
2085 list=$(echo " $list " | sed -re "s/\s+$item\s+/ /g")
2087 echo $(comma_list $list)
2090 # list, expand are the comma separated lists
2092 local list=${1//,/ }
2093 local expand=${2//,/ }
2096 expanded=$(for i in $list $expand; do echo $i; done | sort -u)
2097 echo $(comma_list $expanded)
2100 testslist_filter () {
2101 local script=$LUSTRE/tests/${TESTSUITE}.sh
2103 [ -f $script ] || return 0
2105 local start_at=$START_AT
2106 local stop_at=$STOP_AT
2108 local var=${TESTSUITE//-/_}_START_AT
2109 [ x"${!var}" != x ] && start_at=${!var}
2110 var=${TESTSUITE//-/_}_STOP_AT
2111 [ x"${!var}" != x ] && stop_at=${!var}
2113 sed -n 's/^test_\([^ (]*\).*/\1/p' $script | \
2114 awk ' BEGIN { if ("'${start_at:-0}'" != 0) flag = 1 }
2115 /^'${start_at}'$/ {flag = 0}
2116 {if (flag == 1) print $0}
2117 /^'${stop_at}'$/ { flag = 1 }'
2121 (cd `dirname $1`; echo $PWD/`basename $1`)
2125 local name=$(echo $1 | tr "[:upper:]" "[:lower:]")
2126 local type=$(echo $1 | tr "[:lower:]" "[:upper:]")
2132 OST ) for ((i=1; i<=$OSTCOUNT; i++)) do
2133 list="$list ${name}$i"
2135 * ) error "Invalid facet type"
2138 echo $(comma_list $list)
2141 ##################################
2142 # Adaptive Timeouts funcs
2145 # only check mds, we assume at_max is the same on all nodes
2146 local at_max=$(do_facet mds "lctl get_param -n at_max")
2147 if [ $at_max -eq 0 ]; then
2157 # suppose that all ost-s has the same at_max set
2158 if [ $facet == "ost" ]; then
2159 do_facet ost1 "lctl get_param -n at_max"
2161 do_facet $facet "lctl get_param -n at_max"
2171 if [ $facet == "ost" ]; then
2172 for i in `seq $OSTCOUNT`; do
2173 do_facet ost$i "lctl set_param at_max=$at_max"
2176 do_facet $facet "lctl set_param at_max=$at_max"
2181 ##################################
2185 # OBD_FAIL_MDS_ALL_REQUEST_NET
2187 do_facet mds lctl set_param fail_loc=0x123
2188 do_facet client "$1" || RC=$?
2189 do_facet mds lctl set_param fail_loc=0
2194 # OBD_FAIL_MDS_ALL_REPLY_NET
2196 do_facet mds lctl set_param fail_loc=0x122
2197 do_facet client "$@" || RC=$?
2198 do_facet mds lctl set_param fail_loc=0
2202 drop_reint_reply() {
2203 # OBD_FAIL_MDS_REINT_NET_REP
2205 do_facet mds lctl set_param fail_loc=0x119
2206 do_facet client "$@" || RC=$?
2207 do_facet mds lctl set_param fail_loc=0
2212 #define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214
2214 do_facet ost1 lctl set_param fail_loc=0x214
2215 do_facet client "$1" || RC=$?
2216 do_facet client "sync"
2217 do_facet ost1 lctl set_param fail_loc=0
2221 drop_ldlm_cancel() {
2222 #define OBD_FAIL_LDLM_CANCEL 0x304
2224 do_facet client lctl set_param fail_loc=0x304
2225 do_facet client "$@" || RC=$?
2226 do_facet client lctl set_param fail_loc=0
2230 drop_bl_callback() {
2231 #define OBD_FAIL_LDLM_BL_CALLBACK 0x305
2233 do_facet client lctl set_param fail_loc=0x305
2234 do_facet client "$@" || RC=$?
2235 do_facet client lctl set_param fail_loc=0
2240 #define OBD_FAIL_LDLM_REPLY 0x30c
2242 do_facet mds lctl set_param fail_loc=0x30c
2243 do_facet client "$@" || RC=$?
2244 do_facet mds lctl set_param fail_loc=0
2252 echo "clearing fail_loc on $facet"
2253 do_facet $facet "lctl set_param fail_loc=0 2>/dev/null || true"
2256 set_nodes_failloc () {
2257 do_nodes $(comma_list $1) lctl set_param fail_loc=$2
2260 cancel_lru_locks() {
2261 $LCTL mark "cancel_lru_locks $1 start"
2262 for d in `lctl get_param -N ldlm.namespaces.*.lru_size | egrep -i $1`; do
2263 $LCTL set_param -n $d=clear
2265 $LCTL get_param ldlm.namespaces.*.lock_unused_count | egrep -i $1 | grep -v '=0'
2266 $LCTL mark "cancel_lru_locks $1 stop"
2271 NR_CPU=$(grep -c "processor" /proc/cpuinfo)
2272 DEFAULT_LRU_SIZE=$((100 * NR_CPU))
2273 echo "$DEFAULT_LRU_SIZE"
2278 lctl set_param ldlm.namespaces.*$1*.lru_size=0
2281 lru_resize_disable()
2283 lctl set_param ldlm.namespaces.*$1*.lru_size $(default_lru_size)
2288 for FILE in `lctl get_param -N "llite.*.dump_page_cache"`; do
2289 if [ `lctl get_param -n $FILE | wc -l` -gt 1 ]; then
2290 echo there is still data in page cache $FILE ?
2291 lctl get_param -n $FILE
2298 create_fake_exports () {
2301 #obd_fail_val = num;
2302 #define OBD_FAIL_TGT_FAKE_EXP 0x708
2303 do_facet $facet "lctl set_param fail_val=$num"
2304 do_facet $facet "lctl set_param fail_loc=0x80000708"
2309 DEBUGSAVE="$(lctl get_param -n debug)"
2313 [ -n "$DEBUGSAVE" ] && lctl set_param debug="${DEBUGSAVE}"
2317 ##################################
2319 ##################################
2322 local TYPE=${TYPE:-"FAIL"}
2326 # do not dump logs if $1=false
2327 if [ "x$1" = "xfalse" ]; then
2332 log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ "
2335 ERRLOG=$TMP/lustre_${TESTSUITE}_${TESTNAME}.$(date +%s)
2336 echo "Dumping lctl log to $ERRLOG"
2337 # We need to dump the logs on all nodes
2338 do_nodes $(comma_list $(nodes_list)) $NODE $LCTL dk $ERRLOG
2341 [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG
2347 if $FAIL_ON_ERROR; then
2358 # use only if we are ignoring failures for this test, bugno required.
2359 # (like ALWAYS_EXCEPT, but run the test and ignore the results.)
2360 # e.g. error_ignore 5494 "your message"
2362 local TYPE="IGNORE (bz$1)"
2368 $FAIL_ON_SKIP_ENV && error false $@ || skip $@
2372 log " SKIP: ${TESTSUITE} ${TESTNAME} $@"
2373 [ "$TESTSUITELOG" ] && \
2374 echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true
2377 build_test_filter() {
2378 EXCEPT="$EXCEPT $(testslist_filter)"
2380 [ "$ONLY" ] && log "only running test `echo $ONLY`"
2384 [ "$EXCEPT$ALWAYS_EXCEPT" ] && \
2385 log "excepting tests: `echo $EXCEPT $ALWAYS_EXCEPT`"
2386 [ "$EXCEPT_SLOW" ] && \
2387 log "skipping tests SLOW=no: `echo $EXCEPT_SLOW`"
2388 for E in $EXCEPT $ALWAYS_EXCEPT; do
2389 eval EXCEPT_${E}=true
2391 for E in $EXCEPT_SLOW; do
2392 eval EXCEPT_SLOW_${E}=true
2394 for G in $GRANT_CHECK_LIST; do
2395 eval GCHECK_ONLY_${G}=true
2400 if [[ $1 = [a-z]* ]]; then
2407 # print a newline if the last test was skipped
2408 export LAST_SKIPPED=
2412 export base=`basetest $1`
2413 if [ ! -z "$ONLY" ]; then
2415 if [ ${!testname}x != x ]; then
2416 [ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
2421 if [ ${!testname}x != x ]; then
2422 [ "$LAST_SKIPPED" ] && echo "" && LAST_SKIPPED=
2431 if [ ${!testname}x != x ]; then
2433 TESTNAME=test_$1 skip "skipping excluded test $1"
2436 testname=EXCEPT_$base
2437 if [ ${!testname}x != x ]; then
2439 TESTNAME=test_$1 skip "skipping excluded test $1 (base $base)"
2442 testname=EXCEPT_SLOW_$1
2443 if [ ${!testname}x != x ]; then
2445 TESTNAME=test_$1 skip "skipping SLOW test $1"
2448 testname=EXCEPT_SLOW_$base
2449 if [ ${!testname}x != x ]; then
2451 TESTNAME=test_$1 skip "skipping SLOW test $1 (base $base)"
2461 EQUALS="======================================================================"
2465 local suffixlen=$((${#EQUALS} - ${#msg}))
2466 [ $suffixlen -lt 5 ] && suffixlen=5
2467 log `echo $(printf '===== %s %.*s\n' "$msg" $suffixlen $EQUALS)`
2472 module_loaded lnet || load_modules
2484 do_nodes $(comma_list $(nodes_list)) $LCTL mark "$MSG" 2> /dev/null || true
2489 strace -o $TMP/$1.strace -ttt $*
2491 log "FINISHED: $*: rc $RC"
2496 $TEST_FAILED && echo -n "FAIL " || echo -n "PASS "
2501 FFREE=`lctl get_param -n mds.*.filesfree`
2502 FTOTAL=`lctl get_param -n mds.*.filestotal`
2503 [ $FFREE -ge $FTOTAL ] && error "files free $FFREE > total $FTOTAL" || true
2507 echo -n "Resetting fail_loc on all nodes..."
2508 do_nodes $(comma_list $(nodes_list)) "lctl set_param -n fail_loc=0 2>/dev/null || true"
2516 export tdir=d0.${TESTSUITE}/d${base}
2518 local SAVE_UMASK=`umask`
2521 local BEFORE=`date +%s`
2523 log "== test $testnum: $message == `date +%H:%M:%S` ($BEFORE)"
2525 export TESTNAME=test_$testnum
2527 test_${testnum} || error "test_$testnum failed with $?"
2531 check_grant ${testnum} || error "check_grant $testnum failed with $?"
2532 check_catastrophe || error "LBUG/LASSERT detected"
2533 ps auxww | grep -v grep | grep -q multiop && error "multiop still running"
2534 pass "($((`date +%s` - $BEFORE))s)"
2542 (cd `dirname $1`; echo $PWD/`basename $1`)
2546 [ -d $DIR1 ] && cd $DIR1 && sync; sleep 1; sync
2547 [ -d $DIR2 ] && cd $DIR2 && sync; sleep 1; sync
2552 export base=`basetest $1`
2553 [ "$CHECK_GRANT" == "no" ] && return 0
2555 testname=GCHECK_ONLY_${base}
2556 [ ${!testname}x == x ] && return 0
2558 echo -n "checking grant......"
2560 # write some data to sync client lost_grant
2561 rm -f $DIR1/${tfile}_check_grant_* 2>&1
2562 for i in `seq $OSTCOUNT`; do
2563 $LFS setstripe $DIR1/${tfile}_check_grant_$i -i $(($i -1)) -c 1
2564 dd if=/dev/zero of=$DIR1/${tfile}_check_grant_$i bs=4k \
2565 count=1 > /dev/null 2>&1
2567 # sync all the data and make sure no pending data on server
2570 #get client grant and server grant
2572 for d in `lctl get_param -n osc.*.cur_grant_bytes`; do
2573 client_grant=$((client_grant + $d))
2576 for d in `lctl get_param -n obdfilter.*.tot_granted`; do
2577 server_grant=$((server_grant + $d))
2580 # cleanup the check_grant file
2581 for i in `seq $OSTCOUNT`; do
2582 rm $DIR1/${tfile}_check_grant_$i
2585 #check whether client grant == server grant
2586 if [ $client_grant != $server_grant ]; then
2587 echo "failed: client:${client_grant} server: ${server_grant}"
2594 ########################
2600 ost=`echo $1 | awk -F_ '{print $3}'`
2601 if [ -z $ost ]; then
2602 ost=`echo $1 | sed 's/-osc.*//'`
2609 [ "$node" != "$(hostname)" ]
2614 remote_node $mds_HOST
2619 [ "$CLIENTONLY" ] && return 0 || true
2620 remote_mds && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$mds_HOST" ]
2626 for node in $(osts_nodes) ; do
2627 remote_node $node && return 0
2634 [ "$CLIENTONLY" ] && return 0 || true
2635 remote_ost && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
2641 MGS=$(facet_host mgs)
2642 remote_node $MGS && [ "$PDSH" = "no_dsh" -o -z "$PDSH" -o -z "$ost_HOST" ]
2646 remote_ost && remote_mds
2651 remote_mds_nodsh || remote_ost_nodsh || \
2652 $(single_local_node $(comma_list $(nodes_list)))
2656 local OSTNODES=$(facet_host ost1)
2659 for num in `seq $OSTCOUNT`; do
2660 local myOST=$(facet_host ost$num)
2661 OSTNODES="$OSTNODES $myOST"
2663 NODES_sort=$(for i in $OSTNODES; do echo $i; done | sort -u)
2669 # FIXME. We need a list of clients
2670 local myNODES=$HOSTNAME
2673 # CLIENTS (if specified) contains the local client
2674 [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ }
2676 if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then
2677 myNODES="$myNODES $(osts_nodes) $mds_HOST"
2680 myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u)
2685 remote_nodes_list () {
2686 local rnodes=$(nodes_list)
2687 rnodes=$(echo " $rnodes " | sed -re "s/\s+$HOSTNAME\s+/ /g")
2691 init_clients_lists () {
2692 # Sanity check: exclude the local client from RCLIENTS
2693 local rclients=$(echo " $RCLIENTS " | sed -re "s/\s+$HOSTNAME\s+/ /g")
2695 # Sanity check: exclude the dup entries
2696 rclients=$(for i in $rclients; do echo $i; done | sort -u)
2698 local clients="$SINGLECLIENT $HOSTNAME $rclients"
2700 # Sanity check: exclude the dup entries from CLIENTS
2701 # for those configs which has SINGLCLIENT set to local client
2702 clients=$(for i in $clients; do echo $i; done | sort -u)
2704 CLIENTS=`comma_list $clients`
2705 local -a remoteclients=($rclients)
2706 for ((i=0; $i<${#remoteclients[@]}; i++)); do
2707 varname=CLIENT$((i + 2))
2708 eval $varname=${remoteclients[i]}
2711 CLIENTCOUNT=$((${#remoteclients[@]} + 1))
2714 get_random_entry () {
2717 rnodes=${rnodes//,/ }
2719 local -a nodes=($rnodes)
2720 local num=${#nodes[@]}
2721 local i=$((RANDOM * num * 2 / 65536))
2727 [ "$CLIENTONLY" ] || [ "$CLIENTMODSONLY" = yes ]
2732 lctl get_param version | grep -q patchless
2736 [ "$MDSVER" = "$CLIVER" -a "$OSTVER" = "$CLIVER" ]
2741 echo $nodes | wc -w || true
2745 local nodes=$(osts_nodes)
2746 local osscount=$(get_node_count "$nodes")
2747 [ ! "$OSTCOUNT" = "$osscount" ]
2750 generate_machine_file() {
2751 local nodes=${1//,/ }
2752 local machinefile=$2
2754 for node in $nodes; do
2755 echo $node >>$machinefile || \
2756 { echo "can not generate machinefile $machinefile" && return 1; }
2761 local file=$1/stripe
2763 $LFS getstripe -v $file || error
2767 setstripe_nfsserver () {
2770 local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \
2771 { print $1 }' /proc/mounts | cut -f 1 -d : | head -1)
2773 [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1
2775 do_nodev $nfsserver lfs setstripe "$@"
2778 check_runas_id_ret() {
2780 local myRUNAS_UID=$1
2781 local myRUNAS_GID=$2
2784 if [ -z "$myRUNAS" ]; then
2785 error_exit "myRUNAS command must be specified for check_runas_id"
2787 mkdir $DIR/d0_runas_test
2789 chown $myRUNAS_UID:$myRUNAS_GID $DIR/d0_runas_test
2790 if ! $myRUNAS touch $DIR/d0_runas_test/f$$ ; then
2791 do_nodesv $(comma_list $(nodes_list)) grep -w $myRUNAS_UID /etc/passwd
2794 rm -rf $DIR/d0_runas_test
2799 local myRUNAS_UID=$1
2800 local myRUNAS_GID=$2
2803 check_runas_id_ret $myRUNAS_UID $myRUNAS_GID $myRUNAS || \
2804 error "unable to write to $DIR/d0_runas_test as UID $myRUNAS_UID.
2805 Please set RUNAS_ID to some UID which exists on MDS and client or
2806 add user $myRUNAS_UID:$myRUNAS_GID on these nodes."
2809 # Run multiop in the background, but wait for it to print
2810 # "PAUSING" to its stdout before returning from this function.
2811 multiop_bg_pause() {
2812 MULTIOP_PROG=${MULTIOP_PROG:-multiop}
2816 TMPPIPE=/tmp/multiop_open_wait_pipe.$$
2819 echo "$MULTIOP_PROG $FILE v$ARGS"
2820 $MULTIOP_PROG $FILE v$ARGS > $TMPPIPE &
2822 echo "TMPPIPE=${TMPPIPE}"
2823 local multiop_output
2825 read -t 60 multiop_output < $TMPPIPE
2826 if [ $? -ne 0 ]; then
2831 if [ "$multiop_output" != "PAUSING" ]; then
2832 echo "Incorrect multiop output: $multiop_output"
2847 [ ${PIPESTATUS[0]} -eq 0 ] || rc=1
2853 inodes_available () {
2854 local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk '{print $4}' | sort -un | head -1) || return 1
2858 # reset llite stat counters
2859 clear_llite_stats(){
2860 lctl set_param -n llite.*.stats 0
2863 # sum llite stat items
2864 calc_llite_stats() {
2865 local res=$(lctl get_param -n llite.*.stats |
2866 awk 'BEGIN {s = 0} END {print s} /^'"$1"'/ {s += $2}')
2871 awk 'BEGIN {s = 0}; {s += $1}; END {print s}'
2874 calc_osc_kbytes () {
2875 df $MOUNT > /dev/null
2876 $LCTL get_param -n osc.*[oO][sS][cC][-_]*.$1 | calc_sum
2879 # save_lustre_params(node, parameter_mask)
2880 # generate a stream of formatted strings (<node> <param name>=<param value>)
2881 save_lustre_params() {
2883 do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done"
2886 # restore lustre parameters from input stream, produces by save_lustre_params
2887 restore_lustre_params() {
2891 while IFS=" =" read node name val; do
2892 do_node ${node//:/} "lctl set_param -n $name $val"
2896 check_catastrophe() {
2897 local rnodes=${1:-$(comma_list $(remote_nodes_list))}
2898 local C=$CATASTROPHE
2899 [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1
2901 if [ $rnodes ]; then
2902 do_nodes $rnodes "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0);
2903 if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi
2919 do_facet $1 $3 lfs getstripe -v $2 > $tmp_file
2921 stripe_size=`awk '$1 ~ /size/ {print $2}' $tmp_file`
2922 stripe_count=`awk '$1 ~ /count/ {print $2}' $tmp_file`
2923 stripe_index=`awk '$1 ~ /stripe_offset/ {print $2}' $tmp_file`
2927 mdsrate_cleanup () {
2928 mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6
2931 delayed_recovery_enabled () {
2932 do_facet mds "lctl get_param -n mds.${mds_svc}.stale_export_age" > /dev/null 2>&1
2935 ################################################################################
2936 # The following functions are used to enable interop testing between
2937 # 1.8 and 2.0. The lprocfs layout changed from 1.8 to 2.0 as the followings:
2939 # {blocksize filesfree filestotal fstype kbytesavail kbytesfree kbytestotal mntdev} moved from mds to osd
2940 # mdt lov: fsname-mdtlov -> fsname-MDTXXXX-mdtlov
2941 # mdt osc: fsname-OSTXXXX-osc -> fsname-OSTXXXX-osc-MDTXXXX
2942 ################################################################################
2944 get_lustre_version () {
2945 local node=${1:-"mds"}
2946 do_facet $node $LCTL get_param -n version | awk '/^lustre:/ {print $2}'
2949 get_mds_version_major () {
2950 local version=$(get_lustre_version mds)
2951 echo $version | awk -F. '{print $1}'
2954 get_mds_version_minor () {
2955 local version=$(get_lustre_version mds)
2956 echo $version | awk -F. '{print $2}'
2959 get_mds_version_patch () {
2960 local version=$(get_lustre_version mds)
2961 echo $version | awk -F. '{print $3}'
2964 get_mds_version_fix () {
2965 local version=$(get_lustre_version mds)
2966 echo $version | awk -F. '{print $4}'
2969 get_mds_fsstat_proc_path() {
2970 local major=$(get_mds_version_major)
2971 local minor=$(get_mds_version_minor)
2972 if [ $major -le 1 -a $minor -le 8 ] ; then
2979 get_mds_mntdev_proc_path() {
2980 local fsstat_dev=$(get_mds_fsstat_proc_path)
2981 echo "$fsstat_dev.*.mntdev"
2984 get_mdtlov_proc_path() {
2986 local major=$(get_mds_version_major)
2987 local minor=$(get_mds_version_minor)
2988 if [ $major -le 1 -a $minor -le 8 ] ; then
2989 echo "${fsname}-mdtlov"
2991 echo "${fsname}-MDT0000-mdtlov"
2995 get_mdtosc_proc_path() {
2997 local major=$(get_mds_version_major)
2998 local minor=$(get_mds_version_minor)
2999 if [ $major -le 1 -a $minor -le 8 ] ; then
3002 echo "${ost}-osc-MDT0000"
3006 get_mds_mdt_device_proc_path() {
3007 local major=$(get_mds_version_major)
3008 local minor=$(get_mds_version_minor)
3009 if [ $major -le 1 -a $minor -le 8 ] ; then
3017 local major=$(get_mds_version_major)
3018 local minor=$(get_mds_version_minor)
3019 if [ $major -le 1 -a $minor -le 8 ] ; then
3026 ########################
3028 convert_facet2label() {
3031 if [ x$facet = xost ]; then
3035 local varsvc=${facet}_svc
3037 if [ -n ${!varsvc} ]; then
3040 error "No lablel for $facet!"
3044 get_clientosc_proc_path() {
3050 get_osc_import_name() {
3053 local label=$(convert_facet2label $ost)
3055 if [ "$facet" == "mds" ]; then
3056 get_mdtosc_proc_path $label
3060 get_clientosc_proc_path $label
3064 wait_import_state () {
3070 CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
3071 while [ "${CONN_STATE}" != "${expected}" ]; do
3072 if [ "${expected}" == "DISCONN" ]; then
3073 # for disconn we can check after proc entry is removed
3074 [ "x${CONN_STATE}" == "x" ] && return 0
3075 # with AT we can have connect request timeout ~ reconnect timeout
3076 # and test can't see real disconnect
3077 [ "${CONN_STATE}" == "CONNECTING" ] && return 0
3079 # disconnect rpc should be wait not more obd_timeout
3080 [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \
3081 error "can't put import for $CONN_PROC into ${expected} state" && return 1
3083 CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2)
3087 log "$CONN_PROC now in ${CONN_STATE} state"
3091 wait_osc_import_state() {
3095 local ost=$(get_osc_import_name $facet $ost_facet)
3100 CONN_PROC="osc.${ost}.ost_server_uuid"
3101 CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
3102 while [ "${CONN_STATE}" != "${expected}" ]; do
3103 if [ "${expected}" == "DISCONN" ]; then
3104 # for disconn we can check after proc entry is removed
3105 [ "x${CONN_STATE}" == "x" ] && return 0
3106 # with AT enabled, we can have connect request timeout near of
3107 # reconnect timeout and test can't see real disconnect
3108 [ "${CONN_STATE}" == "CONNECTING" ] && return 0
3110 # disconnect rpc should be wait not more obd_timeout
3111 [ $i -ge $(($TIMEOUT * 3 / 2)) ] && \
3112 error "can't put import for ${ost}(${ost_facet}) into ${expected} state" && return 1
3114 CONN_STATE=$(do_facet $facet lctl get_param -n $CONN_PROC 2>/dev/null | cut -f2)
3118 log "${ost_facet} now in ${CONN_STATE} state"
3122 get_clientmdc_proc_path() {
3130 do_nodesv $list "PATH=$LUSTRE/tests/:$PATH sh rpc.sh $@ "
3133 wait_clients_import_state () {
3139 local label=$(convert_facet2label $facet)
3142 ost* ) proc_path="osc.$(get_clientosc_proc_path $label).ost_server_uuid" ;;
3143 mds* ) proc_path="mdc.$(get_clientmdc_proc_path $label).mds_server_uuid" ;;
3144 *) error "unknown facet!" ;;
3147 if ! do_rpc_nodes $list wait_import_state $expected $proc_path; then
3148 error "import is not in ${expected} state"
3153 destroy_pool_int() {
3155 local OSTS=$(do_facet mds lctl pool_list $1 | awk '$1 !~ /^Pool:/ {print $1}')
3156 for ost in $OSTS; do
3157 do_facet mgs lctl pool_remove $1 $ost
3159 do_facet mgs lctl pool_destroy $1
3162 # <fsname>.<poolname> or <poolname>
3164 local fsname=${1%%.*}
3165 local poolname=${1##$fsname.}
3167 [[ x$fsname = x$poolname ]] && fsname=$FSNAME
3171 pool_list $fsname.$poolname || return $?
3173 destroy_pool_int $fsname.$poolname
3175 [[ $RC -ne 0 ]] && return $RC
3177 wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \
3178 2>/dev/null || echo foo" "foo" || RC=1
3180 if [[ $RC -eq 0 ]]; then
3181 remove_pool_from_list $fsname.$poolname
3183 error "destroy pool failed $1"
3189 local fsname=${1:-$FSNAME}
3191 local listvar=${fsname}_CREATED_POOLS
3195 [ x${!listvar} = x ] && return 0
3197 echo destroy the created pools: ${!listvar}
3198 for poolname in ${!listvar//,/ }; do
3199 destroy_pool $fsname.$poolname
3204 local fsname=${1:-$FSNAME}
3206 destroy_pools $fsname
3212 local ts=$(date +%s)
3214 # bug 20237, comment 11
3215 # It would also be useful to provide the option
3216 # of writing the file to an NFS directory so it doesn't need to be copied.
3219 [ -d "$SHARED_DIR_LOGS" ] && tmp=$SHARED_DIR_LOGS && docp=false
3221 # dump lustre logs, dmesg
3222 do_nodes $list "log=$tmp/\\\$(hostname)-debug-$ts.log ;
3223 lctl dk \\\$log >/dev/null;
3224 log=$tmp/\\\$(hostname)-dmesg-$ts.log;
3227 # FIXME: does it make sense to collect the logs for $ts only, but all
3229 # rsync $TMP/*${TESTSUITE}* to gather the logs dumped by error fn
3230 local logs=$TMP/'*'${TESTSUITE}'*'
3232 logs=$logs' '$tmp/'*'$ts'*'
3234 for node in ${list//,/ }; do
3235 rsync -az $node:"$logs" $TMP
3238 local archive=$TMP/${TESTSUITE}-$ts.tar.bz2
3239 tar -jcf $archive $tmp/*$ts* $TMP/*${TESTSUITE}*
3245 local list=${1:-$(comma_list $(nodes_list))}
3247 [ -n ${TESTSUITE} ] && do_nodes $list "rm -f $TMP/*${TESTSUITE}*" || true
3259 for i in $(seq 0 $num_mntpts); do
3260 cmd="ls -laf ${mntpt_root}$i/$dir"
3266 for pid in $pids; do
3273 # target_start_and_reset_recovery_timer()
3274 # service_time = at_est2timeout(service_time);
3275 # service_time += 2 * (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC +
3276 # INITIAL_CONNECT_TIMEOUT);
3277 # CONNECTION_SWITCH_MAX : min(25U, max(CONNECTION_SWITCH_MIN,obd_timeout))
3278 #define CONNECTION_SWITCH_INC 1
3279 #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20)
3280 #define CONNECTION_SWITCH_MIN 5U
3282 max_recovery_time () {
3283 local init_connect_timeout=$(( TIMEOUT / 20 ))
3284 [[ $init_connect_timeout > 5 ]] || init_connect_timeout=5
3286 local service_time=$(( $(at_max_get client) + $(( 2 * $(( 25 + 1 + init_connect_timeout)) )) ))
3291 remove_mdt_files() {
3296 local mntpt=${MOUNT%/*}/$facet
3298 echo "removing files from $mdtdev on $facet: $files"
3299 mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $?
3302 rm $mntpt/ROOT/$f || { rc=$?; break; }
3304 umount -f $mntpt || return $?
3308 duplicate_mdt_files() {
3313 local mntpt=${MOUNT%/*}/$facet
3315 echo "duplicating files on $mdtdev on $facet: $files"
3316 mkdir -p $mntpt || return $?
3317 mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $?
3327 tmp=$(mktemp $TMP/setfattr.XXXXXXXXXX)
3328 pushd $mntpt/ROOT > /dev/null || return $?
3331 touch $f.bad || return $?
3332 getfattr -n trusted.lov $f | sed "s#$f#&.bad#" > $tmp
3334 [ $rc -eq 0 ] || return $rc
3335 setfattr --restore $tmp || return $?