3 trap 'print_summary && touch $TF_FAIL && \
4 echo "test-framework exiting on error"' ERR
9 export EJOURNAL=${EJOURNAL:-""}
10 export REFORMAT=${REFORMAT:-""}
11 export WRITECONF=${WRITECONF:-""}
12 export VERBOSE=${VERBOSE:-false}
13 export CATASTROPHE=${CATASTROPHE:-/proc/sys/lnet/catastrophe}
16 export GSS_PIPEFS=false
17 export IDENTITY_UPCALL=default
19 # specify environment variable containing batch job name for server statistics
20 export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
22 # LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or
23 # LOAD_LLOOP is true. LOAD_LLOOP is false by default.
24 export LOAD_LLOOP=${LOAD_LLOOP:-false}
26 #export PDSH="pdsh -S -Rssh -w"
28 # function used by scripts run on remote nodes
29 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
30 . $LUSTRE/tests/functions.sh
31 . $LUSTRE/tests/yaml.sh
33 export LD_LIBRARY_PATH=${LUSTRE}/utils:${LD_LIBRARY_PATH}
35 LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
37 EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh}
39 if [ -f "$EXCEPT_LIST_FILE" ]; then
40 echo "Reading test skip list from $EXCEPT_LIST_FILE"
45 # check config files for options in decreasing order of preference
46 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/lustre.conf ] &&
47 MODPROBECONF=/etc/modprobe.d/lustre.conf
48 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] &&
49 MODPROBECONF=/etc/modprobe.d/Lustre
50 [ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
51 MODPROBECONF=/etc/modprobe.conf
55 [[ $DIR/ = $MOUNT/* ]] || \
56 { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
57 [[ $DIR1/ = $MOUNT1/* ]] || \
58 { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
59 [[ $DIR2/ = $MOUNT2/* ]] || \
60 { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
62 [ -n "$failed" ] && exit 99 || true
66 echo "usage: $0 [-r] [-f cfgfile]"
74 [ "$TESTSUITE" == "lfsck" ] && return 0
75 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
77 local form="%-13s %-17s %-9s %s %s\n"
78 printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
79 echo "------------------------------------------------------------------------------------"
80 for O in $DEFAULT_SUITES; do
81 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
82 [ "${!O}" = "no" ] && continue || true
83 local o=$(echo $O | tr "[:upper:]_" "[:lower:]-")
84 local log=${TMP}/${o}.log
85 if is_sanity_benchmark $o; then
86 log=${TMP}/sanity-benchmark.log
91 local status=Unfinished
93 skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' | sed 's/test_//g')
94 slow=$(egrep "^PASS|^FAIL" $log | tr -d "("| sed s/s\)$//g | sort -nr -k 3 | head -5 | awk '{ print $2":"$3"s" }')
95 total=$(grep duration $log | awk '{ print $2}')
96 if [ "${!O}" = "done" ]; then
100 local durations=$(egrep "^PASS|^FAIL" $log | tr -d "("| sed s/s\)$//g | awk '{ print $2":"$3"|" }')
101 details=$(printf "%s\n%s %s %s\n" "$details" "DDETAILS" "$O" "$(echo $durations)")
104 printf "$form" $status "$O" "${total}" "E=$skipped"
105 printf "$form" "-" "-" "-" "S=$(echo $slow)"
108 for O in $DEFAULT_SUITES; do
109 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
110 if [ "${!O}" = "no" ]; then
111 printf "$form" "Skipped" "$O" ""
115 # print the detailed tests durations if DDETAILS=true
122 export LUSTRE=$(absolute_path $LUSTRE)
123 export TESTSUITE=$(basename $0 .sh)
124 export TEST_FAILED=false
125 export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
126 export RPC_MODE=${RPC_MODE:-false}
128 export MKE2FS=$MKE2FS
129 if [ -z "$MKE2FS" ]; then
130 if which mkfs.ldiskfs >/dev/null 2>&1; then
131 export MKE2FS=mkfs.ldiskfs
137 export DEBUGFS=$DEBUGFS
138 if [ -z "$DEBUGFS" ]; then
139 if which debugfs.ldiskfs >/dev/null 2>&1; then
140 export DEBUGFS=debugfs.ldiskfs
142 export DEBUGFS=debugfs
146 export TUNE2FS=$TUNE2FS
147 if [ -z "$TUNE2FS" ]; then
148 if which tunefs.ldiskfs >/dev/null 2>&1; then
149 export TUNE2FS=tunefs.ldiskfs
151 export TUNE2FS=tune2fs
155 export E2LABEL=$E2LABEL
156 if [ -z "$E2LABEL" ]; then
157 if which label.ldiskfs >/dev/null 2>&1; then
158 export E2LABEL=label.ldiskfs
160 export E2LABEL=e2label
164 export DUMPE2FS=$DUMPE2FS
165 if [ -z "$DUMPE2FS" ]; then
166 if which dumpfs.ldiskfs >/dev/null 2>&1; then
167 export DUMPE2FS=dumpfs.ldiskfs
169 export DUMPE2FS=dumpe2fs
173 export E2FSCK=$E2FSCK
174 if [ -z "$E2FSCK" ]; then
175 if which fsck.ldiskfs >/dev/null 2>&1; then
176 export E2FSCK=fsck.ldiskfs
182 export LFSCK_BIN=${LFSCK_BIN:-lfsck}
183 export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite
184 export FSCK_MAX_ERR=4 # File system errors left uncorrected
186 export ZFS=${ZFS:-zfs}
187 export ZPOOL=${ZPOOL:-zpool}
188 export ZDB=${ZDB:-zdb}
189 export PARTPROBE=${PARTPROBE:-partprobe}
191 #[ -d /r ] && export ROOT=${ROOT:-/r}
192 export TMP=${TMP:-$ROOT/tmp}
193 export TESTSUITELOG=${TMP}/${TESTSUITE}.log
194 export LOGDIR=${LOGDIR:-${TMP}/test_logs/$(date +%s)}
195 export TESTLOG_PREFIX=$LOGDIR/$TESTSUITE
197 export HOSTNAME=${HOSTNAME:-$(hostname -s)}
198 if ! echo $PATH | grep -q $LUSTRE/utils; then
199 export PATH=$LUSTRE/utils:$PATH
201 if ! echo $PATH | grep -q $LUSTRE/utils/gss; then
202 export PATH=$LUSTRE/utils/gss:$PATH
204 if ! echo $PATH | grep -q $LUSTRE/tests; then
205 export PATH=$LUSTRE/tests:$PATH
207 if ! echo $PATH | grep -q $LUSTRE/../lustre-iokit/sgpdd-survey; then
208 export PATH=$LUSTRE/../lustre-iokit/sgpdd-survey:$PATH
210 export LST=${LST:-"$LUSTRE/../lnet/utils/lst"}
211 [ ! -f "$LST" ] && export LST=$(which lst)
212 export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")}
213 [ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey)
214 # Ubuntu, at least, has a truncate command in /usr/bin
215 # so fully path our truncate command.
216 export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
217 export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
218 [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
219 if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
220 export PATH=$LUSTRE/tests/racer:$PATH:
222 if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then
223 export PATH=$LUSTRE/tests/mpi:$PATH
225 export RSYNC_RSH=${RSYNC_RSH:-rsh}
227 export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
228 [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
229 export LFS=${LFS:-"$LUSTRE/utils/lfs"}
230 [ ! -f "$LFS" ] && export LFS=$(which lfs)
231 SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"}
232 GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"}
234 export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
235 if [ ! -f "$L_GETIDENTITY" ]; then
236 if `which l_getidentity > /dev/null 2>&1`; then
237 export L_GETIDENTITY=$(which l_getidentity)
239 export L_GETIDENTITY=NONE
242 export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"}
243 [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid"
244 export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
245 [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre"
246 export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
247 [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre"
248 export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} "
249 export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod}
250 [ ! -f "$LUSTRE_RMMOD" ] &&
251 export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null)
252 export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate}
253 [ ! -f "$LFS_MIGRATE" ] &&
254 export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null)
255 export NAME=${NAME:-local}
256 export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"}
257 [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \
258 export LGSSD=$(which lgssd)
259 export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"}
260 [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null)
261 export KRB5DIR=${KRB5DIR:-"/usr/kerberos"}
263 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
266 if [ "$ACCEPTOR_PORT" ]; then
267 export PORT_OPT="--port $ACCEPTOR_PORT"
272 echo "Using GSS/krb5 ptlrpc security flavor"
273 which lgss_keyring > /dev/null 2>&1 || \
274 error_exit "built with gss disabled! SEC=$SEC"
285 IDENTITY_UPCALL=false
289 export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
291 # Paths on remote nodes, if different
292 export RLUSTRE=${RLUSTRE:-$LUSTRE}
293 export RPWD=${RPWD:-$PWD}
294 export I_MOUNTED=${I_MOUNTED:-"no"}
295 if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
296 ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
297 ! -f $LUSTRE/mdt/mdt.ko ]; then
298 export CLIENTMODSONLY=yes
301 export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
302 export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
306 while getopts "rvwf:" opt $*; do
309 r) REFORMAT=--reformat;;
311 w) WRITECONF=writeconf;;
316 shift $((OPTIND - 1))
319 # print the durations of each test if "true"
320 DDETAILS=${DDETAILS:-false}
321 [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
331 ncpts=$(do_facet $facet "lctl get_param -n " \
332 "cpu_partition_table 2>/dev/null| wc -l" || echo 1)
334 if [ $ncpts -eq 0 ]; then
342 # split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3"
343 eval set -- $(tr "[:punct:]" " " <<< $*)
345 echo -n "$((($1 << 16) | ($2 << 8) | $3))"
348 export LINUX_VERSION=$(uname -r | sed -e "s/[-.]/ /3" -e "s/ .*//")
349 export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ })
352 /sbin/lsmod | grep -q "^\<$1\>"
355 # Load a module on the system where this is running.
357 # Synopsis: load_module module_name [module arguments for insmod/modprobe]
359 # If module arguments are not given but MODOPTS_<MODULE> is set, then its value
360 # will be used as the arguments. Otherwise arguments will be obtained from
361 # /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used.
368 BASE=`basename $module $EXT`
370 module_loaded ${BASE} && return
372 # If no module arguments were passed, get them from $MODOPTS_<MODULE>, else from
374 if [ $# -eq 0 ]; then
375 # $MODOPTS_<MODULE>; we could use associative arrays, but that's not in
376 # Bash until 4.x, so we resort to eval.
377 optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
378 eval set -- \$$optvar
379 if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
380 # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
382 opt=$(awk -v var="^options $BASE" '$0 ~ var \
383 {gsub("'"options $BASE"'",""); print}' $MODPROBECONF)
384 set -- $(echo -n $opt)
386 # Ensure we have accept=all for lnet
387 if [ $(basename $module) = lnet ]; then
388 # OK, this is a bit wordy...
389 local arg accept_all_present=false
392 [ "$arg" = accept=all ] && \
393 accept_all_present=true
395 $accept_all_present || set -- "$@" accept=all
401 [ $# -gt 0 ] && echo "${module} options: '$*'"
403 # Note that insmod will ignore anything in modprobe.conf, which is why we're
404 # passing options on the command-line.
405 if [ "$BASE" == "lnet_selftest" ] && \
406 [ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]; then
407 insmod ${LUSTRE}/../lnet/selftest/${module}${EXT}
408 elif [ -f ${LUSTRE}/${module}${EXT} ]; then
409 insmod ${LUSTRE}/${module}${EXT} "$@"
411 # must be testing a "make install" or "rpm" installation
412 # note failed to load ptlrpc_gss is considered not fatal
413 if [ "$BASE" == "ptlrpc_gss" ]; then
414 modprobe $BASE "$@" 2>/dev/null || echo "gss/krb5 is not supported"
421 llite_lloop_enabled() {
422 local n1=$(uname -r | cut -d. -f1)
423 local n2=$(uname -r | cut -d. -f2)
424 local n3=$(uname -r | cut -d- -f1 | cut -d. -f3)
426 # load the llite_lloop module for < 2.6.32 kernels
427 if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \
428 [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \
435 load_modules_local() {
436 if [ -n "$MODPROBE" ]; then
438 echo "Using modprobe to load modules"
442 echo Loading modules from $LUSTRE
446 if [ -f /sys/devices/system/cpu/online ]; then
447 ncpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/online) + 1))
448 echo "detected $ncpus online CPUs by sysfs"
450 ncpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
452 if [ $rc -eq 0 ]; then
453 echo "detected $ncpus online CPUs by getconf"
455 echo "Can't detect number of CPUs"
460 # if there is only one CPU core, libcfs can only create one partition
461 # if there is more than 4 CPU cores, libcfs should create multiple CPU
462 # partitions. So we just force libcfs to create 2 partitions for
463 # system with 2 or 4 cores
464 if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then
465 # force to enable multiple CPU partitions
466 echo "Force libcfs to create 2 CPU partitions"
467 MODOPTS_LIBCFS="cpu_npartitions=2 $MODOPTS_LIBCFS"
469 echo "libcfs will create CPU partition based on online CPUs"
472 load_module ../libcfs/libcfs/libcfs
474 [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG"
475 [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }"
476 load_module ../lnet/lnet/lnet
477 LNETLND=${LNETLND:-"socklnd/ksocklnd"}
478 load_module ../lnet/klnds/$LNETLND
479 load_module obdclass/obdclass
480 load_module ptlrpc/ptlrpc
481 load_module ptlrpc/gss/ptlrpc_gss
489 load_module obdecho/obdecho
490 if ! client_only; then
491 SYMLIST=/proc/kallsyms
492 grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; }
493 grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; }
494 grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; }
495 load_module lfsck/lfsck
496 [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS
497 if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
499 load_module osd-zfs/osd_zfs
501 if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
502 grep -q exportfs_decode_fh $SYMLIST ||
503 { modprobe exportfs 2> /dev/null || true; }
504 load_module ../ldiskfs/ldiskfs
505 load_module osd-ldiskfs/osd_ldiskfs
518 load_module llite/lustre
519 llite_lloop_enabled && load_module llite/llite_lloop
520 [ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
522 rm -f $OGDB/ogdb-$HOSTNAME
523 $LCTL modules > $OGDB/ogdb-$HOSTNAME
525 # 'mount' doesn't look in $PATH, just sbin
526 if [ -f $LUSTRE/utils/mount.lustre ] && \
527 ! grep -qe "/sbin/mount\.lustre " /proc/mounts; then
528 [ ! -f /sbin/mount.lustre ] && touch /sbin/mount.lustre
529 mount --bind $LUSTRE/utils/mount.lustre /sbin/mount.lustre || true
536 # load modules on remote nodes optionally
537 # lustre-tests have to be installed on these nodes
538 if $LOAD_MODULES_REMOTE; then
539 local list=$(comma_list $(remote_nodes_list))
540 if [ -n "$list" ]; then
541 echo "loading modules on: '$list'"
542 do_rpc_nodes "$list" load_modules_local
548 LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
549 LEAK_PORTALS=$(dmesg | tail -n 20 | grep "Portals memory leaked" || true)
550 if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
551 echo "$LEAK_LUSTRE" 1>&2
552 echo "$LEAK_PORTALS" 1>&2
553 mv $TMP/debug $TMP/debug-leak.`date +%s` || true
554 echo "Memory leaks detected"
555 [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
561 wait_exit_ST client # bug 12845
563 $LUSTRE_RMMOD ldiskfs || return 2
565 if $LOAD_MODULES_REMOTE; then
566 local list=$(comma_list $(remote_nodes_list))
567 if [ -n "$list" ]; then
568 echo "unloading modules on: '$list'"
569 do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs
570 do_rpc_nodes "$list" check_mem_leak
574 if grep -qe "/sbin/mount\.lustre" /proc/mounts; then
575 umount /sbin/mount.lustre || true
576 [ -w /sbin/mount.lustre -a ! -s /sbin/mount.lustre ] && \
577 rm -f /sbin/mount.lustre || true
580 check_mem_leak || return 254
582 echo "modules unloaded."
587 local facet=${1:-$SINGLEMDS}
588 local fstype=$(facet_fstype $facet)
591 ldiskfs) size=50;; # largest seen is 44, leave some headroom
592 zfs) size=400;; # largest seen is 384
598 check_gss_daemon_nodes() {
602 do_nodesv $list "num=\\\$(ps -o cmd -C $dname | grep $dname | wc -l);
603 if [ \\\"\\\$num\\\" -ne 1 ]; then
604 echo \\\$num instance of $dname;
609 check_gss_daemon_facet() {
613 num=`do_facet $facet ps -o cmd -C $dname | grep $dname | wc -l`
614 if [ $num -ne 1 ]; then
615 echo "$num instance of $dname on $facet"
624 echo Stopping $@ on $list
625 do_nodes $list "killall -2 $@ 2>/dev/null || true"
628 # start gss daemons on all nodes, or
629 # "daemon" on "list" if set
630 start_gss_daemons() {
634 if [ "$list" ] && [ "$daemon" ] ; then
635 echo "Starting gss daemon on nodes: $list"
636 do_nodes $list "$daemon" || return 8
640 local list=$(comma_list $(mdts_nodes))
642 echo "Starting gss daemon on mds: $list"
643 do_nodes $list "$LSVCGSSD -v" || return 1
645 do_nodes $list "$LGSSD -v" || return 2
648 list=$(comma_list $(osts_nodes))
649 echo "Starting gss daemon on ost: $list"
650 do_nodes $list "$LSVCGSSD -v" || return 3
651 # starting on clients
653 local clients=${CLIENTS:-`hostname`}
655 echo "Starting $LGSSD on clients $clients "
656 do_nodes $clients "$LGSSD -v" || return 4
659 # wait daemons entering "stable" status
663 # check daemons are running
665 list=$(comma_list $(mdts_nodes) $(osts_nodes))
666 check_gss_daemon_nodes $list lsvcgssd || return 5
668 list=$(comma_list $(mdts_nodes))
669 check_gss_daemon_nodes $list lgssd || return 6
672 check_gss_daemon_nodes $clients lgssd || return 7
677 local list=$(comma_list $(mdts_nodes))
679 send_sigint $list lsvcgssd lgssd
681 list=$(comma_list $(osts_nodes))
682 send_sigint $list lsvcgssd
684 list=${CLIENTS:-`hostname`}
685 send_sigint $list lgssd
690 if ! module_loaded ptlrpc_gss; then
691 load_module ptlrpc/gss/ptlrpc_gss
692 module_loaded ptlrpc_gss ||
693 error_exit "init_gss : GSS=$GSS, but gss/krb5 is not supported!"
695 start_gss_daemons || error_exit "start gss daemon failed! rc=$?"
697 if [ -n "$LGSS_KEYRING_DEBUG" ]; then
698 echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level
706 # maybe cleanup credential cache?
712 local var=${facet}_svc
720 echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9_]\+//' |
721 tr '[:lower:]' '[:upper:]'
727 if [ $facet == mgs ]; then
731 echo -n $facet | sed -e 's/^fs[0-9]\+//' | sed -e 's/^[a-z]\+//'
739 if [ -n "${!var}" ]; then
744 var=$(facet_type $facet)FSTYPE
745 if [ -n "${!var}" ]; then
750 if [ -n "$FSTYPE" ]; then
755 if [[ $facet == mgs ]] && combined_mgs_mds; then
767 local facets=$(get_facets)
770 for facet in ${facets//,/ }; do
771 if [ $node == $(facet_host $facet) ] ||
772 [ $node == "$(facet_failover_host $facet)" ]; then
773 fstype=$(facet_fstype $facet)
774 if [[ $fstypes != *$fstype* ]]; then
775 fstypes+="${fstypes:+,}$fstype"
786 local fstype=$(facet_fstype $facet)
790 label=$(do_facet ${facet} "$E2LABEL ${dev} 2>/dev/null");;
792 label=$(do_facet ${facet} "$ZFS get -H -o value lustre:svname \
793 ${dev} 2>/dev/null");;
795 error "unknown fstype!";;
803 local device=$(mdsdevname $num)
804 local label=$(devicelabel mds$num ${device} | grep -v "CMD: ")
810 local device=$(ostdevname $num)
811 local label=$(devicelabel ost$num ${device} | grep -v "CMD: ")
816 # Get the device of a facet.
823 mgs) device=$(mgsdevname) ;;
824 mds*) device=$(mdsdevname $(facet_number $facet)) ;;
825 ost*) device=$(ostdevname $(facet_number $facet)) ;;
826 fs2mds) device=$(mdsdevname 1_2) ;;
827 fs2ost) device=$(ostdevname 1_2) ;;
828 fs3ost) device=$(ostdevname 2_2) ;;
836 # Get the virtual device of a facet.
843 mgs) device=$(mgsvdevname) ;;
844 mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
845 ost*) device=$(ostvdevname $(facet_number $facet)) ;;
846 fs2mds) device=$(mdsvdevname 1_2) ;;
847 fs2ost) device=$(ostvdevname 1_2) ;;
848 fs3ost) device=$(ostvdevname 2_2) ;;
856 # Re-read the partition table on failover partner host.
857 # After a ZFS storage pool is created on a shared device, the partition table
858 # on the device may change. However, the operating system on the failover
859 # host may not notice the change automatically. Without the up-to-date partition
860 # block devices, 'zpool import ..' cannot find the labels, whose positions are
861 # relative to partition rather than disk beginnings.
863 # This function performs partprobe on the failover host to make it re-read the
866 refresh_partition_table() {
871 host=$(facet_passive_host $facet)
872 if [[ -n "$host" ]]; then
873 do_node $host "$PARTPROBE $device"
878 # Get ZFS storage pool name.
885 device=$(facet_device $facet)
886 # poolname is string before "/"
887 poolname="${device%%/*}"
893 # Export ZFS storage pool.
894 # Before exporting the pool, all datasets within the pool should be unmounted.
902 poolname=$(zpool_name $facet)
904 if [[ -n "$poolname" ]]; then
905 do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
906 grep -q ^$poolname/ /proc/mounts ||
907 $ZPOOL export $opts $poolname"
912 # Import ZFS storage pool.
913 # Force importing, even if the pool appears to be potentially active.
918 local opts=${@:-"-o cachefile=none"}
921 poolname=$(zpool_name $facet)
923 if [[ -n "$poolname" ]]; then
924 opts+=" -d $(dirname $(facet_vdevice $facet))"
925 do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 ||
926 $ZPOOL import -f $opts $poolname"
931 # Set the "cachefile=none" property on ZFS storage pool so that the pool
932 # is not automatically imported on system startup.
934 # In a failover environment, this will provide resource level fencing which
935 # will ensure that the same ZFS storage pool will not be imported concurrently
936 # on different nodes.
938 disable_zpool_cache() {
942 poolname=$(zpool_name $facet)
944 if [[ -n "$poolname" ]]; then
945 do_facet $facet "$ZPOOL set cachefile=none $poolname"
950 # This and set_osd_param() shall be used to access OSD parameters
951 # once existed under "obdfilter":
956 # writethrough_cache_enable
960 local device=${2:-$FSNAME-OST*}
963 do_nodes $nodes "$LCTL get_param -n obdfilter.$device.$name \
964 osd-*.$device.$name 2>&1" | grep -v 'Found no match'
969 local device=${2:-$FSNAME-OST*}
973 do_nodes $nodes "$LCTL set_param -n obdfilter.$device.$name=$value \
974 osd-*.$device.$name=$value 2>&1" | grep -v 'Found no match'
978 local dz=${1:-$DEBUG_SIZE}
980 if [ -f /sys/devices/system/cpu/possible ]; then
981 local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
983 local cpus=$(getconf _NPROCESSORS_CONF)
986 # bug 19944, adjust size to be -gt num_possible_cpus()
987 # promise 2MB for every cpu at least
988 if [ -n "$cpus" ] && [ $((cpus * 2)) -gt $dz ]; then
991 lctl set_param debug_mb=$dz
994 set_default_debug () {
995 local debug=${1:-"$PTLDEBUG"}
996 local subsys=${2:-"$SUBSYSTEM"}
997 local debug_size=${3:-$DEBUG_SIZE}
999 [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
1000 [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
1002 [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
1005 set_default_debug_nodes () {
1008 if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
1009 nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
1013 do_rpc_nodes "$nodes" set_default_debug \
1014 \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true
1017 set_default_debug_facet () {
1019 local node=$(facet_active_host $facet)
1020 [ -z "$node" ] && echo "No host defined for facet $facet" && exit 1
1022 set_default_debug_nodes $node
1027 local facets=${1:-$(get_facets)}
1030 for facet in ${facets//,/ }; do
1033 [ $RC -eq 0 ] && continue
1035 if [ "$TESTSUITE.$TESTNAME" = "replay-dual.test_0a" ]; then
1036 skip "Restart of $facet failed!." && touch $LU482_FAILED
1038 error "Restart of $facet failed!"
1045 # Add argument "arg" (e.g., "loop") to the comma-separated list
1046 # of arguments for option "opt" (e.g., "-o") on command
1047 # line "opts" (e.g., "-o flock").
1053 local opt_pattern="\([[:space:]]\+\|^\)$opt"
1055 if echo "$opts" | grep -q $opt_pattern; then
1056 opts=$(echo "$opts" | sed -e \
1057 "s/$opt_pattern[[:space:]]*[^[:space:]]\+/&,$arg/")
1059 opts+="${opts:+ }$opt $arg"
1067 local dev=$(facet_active $facet)_dev
1068 local opt=${facet}_opt
1069 local mntpt=$(facet_mntpt $facet)
1070 local opts="${!opt} $@"
1072 if [ $(facet_fstype $facet) == ldiskfs ] &&
1073 ! do_facet $facet test -b ${!dev}; then
1074 opts=$(csa_add "$opts" -o loop)
1077 if [[ $(facet_fstype $facet) == zfs ]]; then
1078 # import ZFS storage pool
1079 import_zpool $facet || return ${PIPESTATUS[0]}
1082 echo "Starting ${facet}: $opts ${!dev} $mntpt"
1083 # for testing LU-482 error handling in mount_facets() and test_0a()
1084 if [ -f $TMP/test-lu482-trigger ]; then
1087 do_facet ${facet} "mkdir -p $mntpt; mount -t lustre $opts \
1091 if [ $RC -ne 0 ]; then
1092 echo "Start of ${!dev} on ${facet} failed ${RC}"
1094 set_default_debug_facet $facet
1096 label=$(devicelabel ${facet} ${!dev})
1097 [ -z "$label" ] && echo no label for ${!dev} && exit 1
1098 eval export ${facet}_svc=${label}
1099 echo Started ${label}
1104 # start facet device options
1110 eval export ${facet}_dev=${device}
1111 eval export ${facet}_opt=\"$@\"
1113 local varname=${facet}failover_dev
1114 if [ -n "${!varname}" ] ; then
1115 eval export ${facet}failover_dev=${!varname}
1117 eval export ${facet}failover_dev=$device
1120 local mntpt=$(facet_mntpt $facet)
1121 do_facet ${facet} mkdir -p $mntpt
1122 eval export ${facet}_MOUNT=$mntpt
1123 mount_facet ${facet}
1132 local HOST=`facet_active_host $facet`
1133 [ -z $HOST ] && echo stop: no host for $facet && return 0
1135 local mntpt=$(facet_mntpt $facet)
1136 running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts") || true
1137 if [ ${running} -ne 0 ]; then
1138 echo "Stopping $mntpt (opts:$@) on $HOST"
1139 do_facet ${facet} umount -d $@ $mntpt
1142 # umount should block, but we should wait for unrelated obd's
1143 # like the MGS or MGC to also stop.
1144 wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
1146 if [[ $(facet_fstype $facet) == zfs ]]; then
1147 # export ZFS storage pool
1152 # save quota version (both administrative and operational quotas)
1153 # add an additional parameter if mountpoint is ever different from $MOUNT
1155 # XXX This function is kept for interoperability with old server (< 2.3.50),
1156 # it should be removed whenever we drop the interoperability for such
1158 quota_save_version() {
1159 local fsname=${2:-$FSNAME}
1161 local ver=$(tr -c -d "123" <<< $spec)
1162 local type=$(tr -c -d "ug" <<< $spec)
1164 [ -n "$ver" -a "$ver" != "3" ] && error "wrong quota version specifier"
1166 [ -n "$type" ] && { $LFS quotacheck -$type $MOUNT || error "quotacheck has failed"; }
1168 do_facet mgs "lctl conf_param ${fsname}-MDT*.mdd.quota_type=$spec"
1170 local osts=$(get_facets OST)
1171 for ost in ${osts//,/ }; do
1173 do_facet mgs "lctl conf_param ${!varsvc}.ost.quota_type=$spec"
1177 # client could mount several lustre
1179 # XXX This function is kept for interoperability with old server (< 2.3.50),
1180 # it should be removed whenever we drop the interoperability for such
1183 local fsname=${1:-$FSNAME}
1185 do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type ||
1187 do_nodes $(comma_list $(osts_nodes)) \
1188 lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$?
1192 # XXX This function is kept for interoperability with old server (< 2.3.50),
1193 # it should be removed whenever we drop the interoperability for such
1195 restore_quota_old() {
1196 local mntpt=${1:-$MOUNT}
1197 local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2)
1198 if [ ! "$old_QUOTA_TYPE" ] ||
1199 [ "$quota_type" = "$old_QUOTA_TYPE" ]; then
1202 quota_save_version $old_QUOTA_TYPE
1205 # XXX This function is kept for interoperability with old server (< 2.3.50),
1206 # it should be removed whenever we drop the interoperability for such
1211 # no quota enforcement for now and accounting works out of the box
1214 # We need save the original quota_type params, and restore them after testing
1216 # Suppose that quota type the same on mds and ost
1217 local quota_type=$(quota_type | grep MDT | cut -d "=" -f2)
1218 [ ${PIPESTATUS[0]} -eq 0 ] || error "quota_type failed!"
1219 echo "[HOST:$HOSTNAME] [old_quota_type:$quota_type] [new_quota_type:$QUOTA_TYPE]"
1220 if [ "$quota_type" != "$QUOTA_TYPE" ]; then
1221 export old_QUOTA_TYPE=$quota_type
1222 quota_save_version $QUOTA_TYPE
1224 qtype=$(tr -c -d "ug" <<< $QUOTA_TYPE)
1225 $LFS quotacheck -$qtype $mntpt || error "quotacheck has failed for $type"
1228 local quota_usrs=$QUOTA_USERS
1230 # get_filesystem_size
1231 local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
1232 local blk_soft=$((disksz + 1024))
1233 local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
1235 local Inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
1236 local i_soft=$Inodes
1237 local i_hard=$((i_soft + i_soft / 20))
1239 echo "Total disk size: $disksz block-softlimit: $blk_soft block-hardlimit:
1240 $blk_hard inode-softlimit: $i_soft inode-hardlimit: $i_hard"
1243 for usr in $quota_usrs; do
1244 echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
1246 cmd="$LFS setquota -$type $usr -b $blk_soft -B $blk_hard -i $i_soft -I $i_hard $mntpt"
1248 eval $cmd || error "$cmd FAILED!"
1250 # display the quota status
1251 echo "Quota settings for $usr : "
1252 $LFS quota -v -u $usr $mntpt || true
1256 # get mdt quota type
1258 local varsvc=${SINGLEMDS}_svc
1259 do_facet $SINGLEMDS $LCTL get_param -n \
1260 osd-$(facet_fstype $SINGLEMDS).${!varsvc}.quota_slave.enabled
1263 # get ost quota type
1265 # All OSTs should have same quota type
1266 local varsvc=ost1_svc
1267 do_facet ost1 $LCTL get_param -n \
1268 osd-$(facet_fstype ost1).${!varsvc}.quota_slave.enabled
1271 # restore old quota type settings
1273 if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
1278 if [ "$old_MDT_QUOTA_TYPE" ]; then
1279 do_facet mgs $LCTL conf_param \
1280 $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
1282 if [ "$old_OST_QUOTA_TYPE" ]; then
1283 do_facet mgs $LCTL conf_param \
1284 $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
1288 # Handle the case when there is a space in the lfs df
1289 # "filesystem summary" line the same as when there is no space.
1290 # This will allow fixing the "lfs df" summary line in the future.
1292 $LFS df $* | sed -e 's/filesystem /filesystem_/'
1295 # Get free inodes on the MDT specified by mdt index, free indoes on
1296 # the whole filesystem will be returned when index == -1.
1302 if [ $index -eq -1 ]; then
1305 mdt_uuid=$(mdtuuid_from_index $index)
1308 free_inodes=$(lfs_df -i $MOUNT | grep $mdt_uuid | awk '{print $4}')
1313 if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then
1320 # save old quota type & set new quota type
1321 local mdt_qtype=$(mdt_quota_type)
1322 local ost_qtype=$(ost_quota_type)
1324 echo "[HOST:$HOSTNAME] [old_mdt_qtype:$mdt_qtype]" \
1325 "[old_ost_qtype:$ost_qtype] [new_qtype:$QUOTA_TYPE]"
1327 export old_MDT_QUOTA_TYPE=$mdt_qtype
1328 export old_OST_QUOTA_TYPE=$ost_qtype
1330 do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE ||
1331 error "set mdt quota type failed"
1332 do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE ||
1333 error "set ost quota type failed"
1335 local quota_usrs=$QUOTA_USERS
1337 # get_filesystem_size
1338 local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
1339 local blk_soft=$((disksz + 1024))
1340 local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
1342 local inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
1343 local i_soft=$inodes
1344 local i_hard=$((i_soft + i_soft / 20))
1346 echo "Total disk size: $disksz block-softlimit: $blk_soft" \
1347 "block-hardlimit: $blk_hard inode-softlimit: $i_soft" \
1348 "inode-hardlimit: $i_hard"
1351 for usr in $quota_usrs; do
1352 echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
1354 cmd="$LFS setquota -$type $usr -b $blk_soft"
1355 cmd="$cmd -B $blk_hard -i $i_soft -I $i_hard $mntpt"
1357 eval $cmd || error "$cmd FAILED!"
1359 # display the quota status
1360 echo "Quota settings for $usr : "
1361 $LFS quota -v -u $usr $mntpt || true
1368 local OPTIONS=${3:-$MOUNTOPT}
1370 local device=$MGSNID:/$FSNAME
1371 if [ -z "$mnt" -o -z "$FSNAME" ]; then
1372 echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
1376 echo "Starting client: $client: $OPTIONS $device $mnt"
1377 do_node $client mkdir -p $mnt
1378 do_node $client mount -t lustre $OPTIONS $device $mnt || return 1
1380 set_default_debug_nodes $client
1392 [ "$3" ] && force=-f
1393 local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
1394 if [ $running -ne 0 ]; then
1395 echo "Stopping client $client $mnt (opts:$force)"
1396 do_node $client lsof -t $mnt || need_kill=no
1397 if [ "x$force" != "x" -a "x$need_kill" != "xno" ]; then
1398 pids=$(do_node $client lsof -t $mnt | sort -u);
1399 if [ -n $pids ]; then
1400 do_node $client kill -9 $pids || true
1404 busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") || true
1405 if [ $busy -ne 0 ] ; then
1406 echo "$mnt is still busy, wait one second" && sleep 1
1407 do_node $client umount $force $mnt
1412 # nodes is comma list
1413 sanity_mount_check_nodes () {
1419 # FIXME: assume that all cluster nodes run the same os
1420 [ "$(uname)" = Linux ] || return 0
1423 for mnt in $mnts ; do
1424 do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts);
1425 mpts=\\\$(mount | grep -c $mnt' ');
1426 if [ \\\$running -ne \\\$mpts ]; then
1427 echo \\\$(hostname) env are INSANE!;
1430 [ $? -eq 0 ] || rc=1
1435 sanity_mount_check_servers () {
1436 [ "$CLIENTONLY" ] &&
1437 { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
1438 echo Checking servers environments
1440 # FIXME: modify get_facets to display all facets wo params
1441 local facets="$(get_facets OST),$(get_facets MDS),mgs"
1445 for facet in ${facets//,/ }; do
1446 node=$(facet_host ${facet})
1447 mntpt=$(facet_mntpt $facet)
1448 sanity_mount_check_nodes $node $mntpt ||
1449 { error "server $node environments are insane!"; return 1; }
1453 sanity_mount_check_clients () {
1454 local clients=${1:-$CLIENTS}
1455 local mntpt=${2:-$MOUNT}
1456 local mntpt2=${3:-$MOUNT2}
1458 [ -z $clients ] && clients=$(hostname)
1459 echo Checking clients $clients environments
1461 sanity_mount_check_nodes $clients $mntpt $mntpt2 ||
1462 error "clients environments are insane!"
1465 sanity_mount_check () {
1466 sanity_mount_check_servers || return 1
1467 sanity_mount_check_clients || return 2
1470 # mount clients if not mouted
1471 zconf_mount_clients() {
1474 local OPTIONS=${3:-$MOUNTOPT}
1476 local device=$MGSNID:/$FSNAME
1477 if [ -z "$mnt" -o -z "$FSNAME" ]; then
1478 echo Bad zconf mount command: opt=$OPTIONS dev=$device mnt=$mnt
1482 echo "Starting client $clients: $OPTIONS $device $mnt"
1485 running=\\\$(mount | grep -c $mnt' ');
1487 if [ \\\$running -eq 0 ] ; then
1489 mount -t lustre $OPTIONS $device $mnt;
1492 exit \\\$rc" || return ${PIPESTATUS[0]}
1494 echo "Started clients $clients: "
1495 do_nodes $clients "mount | grep $mnt' '"
1497 set_default_debug_nodes $clients
1502 zconf_umount_clients() {
1507 [ "$3" ] && force=-f
1509 echo "Stopping clients: $clients $mnt (opts:$force)"
1510 do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts);
1511 if [ \\\$running -ne 0 ] ; then
1512 echo Stopping client \\\$(hostname) $mnt opts:$force;
1513 lsof $mnt || need_kill=no;
1514 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
1515 pids=\\\$(lsof -t $mnt | sort -u);
1516 if [ -n \\\"\\\$pids\\\" ]; then
1520 while umount $force $mnt 2>&1 | grep -q "busy"; do
1521 echo "$mnt is still busy, wait one second" && sleep 1;
1528 echo + $POWER_DOWN $node
1532 shutdown_node_hard () {
1534 local attempts=$SHUTDOWN_ATTEMPTS
1536 for i in $(seq $attempts) ; do
1539 wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
1540 echo "waiting for $host to fail attempts=$attempts"
1541 [ $i -lt $attempts ] || \
1542 { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
1548 local mnt=${2:-$MOUNT}
1551 if [ "$FAILURE_MODE" = HARD ]; then
1552 shutdown_node_hard $client
1554 zconf_umount_clients $client $mnt -f
1560 local facets="$(get_facets OST),$(get_facets MDS)"
1563 combined_mgs_mds || facets="$facets,mgs"
1565 for facet in ${facets//,/ }; do
1566 if [ $(facet_active_host $facet) == $host ]; then
1567 affected="$affected $facet"
1571 echo $(comma_list $affected)
1576 local host=${2:-$(facet_host $facet)}
1578 local label=$(convert_facet2label $facet)
1579 do_node $host $LCTL dl | awk '{print $4}' | grep -q -x $label
1582 facets_up_on_host () {
1584 local facets=$(facets_on_host $host)
1587 for facet in ${facets//,/ }; do
1588 if $(facet_up $facet $host); then
1589 affected_up="$affected_up $facet"
1593 echo $(comma_list $affected_up)
1599 if [ "$FAILURE_MODE" = HARD ]; then
1600 shutdown_node_hard $(facet_active_host $facet)
1608 echo + $POWER_UP $node
1621 if [ "$FAILURE_MODE" = HARD ]; then
1622 reboot_node $(facet_active_host $facet)
1630 if [ "$FAILURE_MODE" = HARD ]; then
1640 for facet in ${facets//,/ }; do
1641 hosts=$(expand_list $hosts $(facet_host $facet) )
1647 _check_progs_installed () {
1651 for prog in $progs; do
1652 if ! [ "$(which $prog)" -o "${!prog}" ]; then
1653 echo $prog missing on $(hostname)
1660 check_progs_installed () {
1664 do_rpc_nodes "$nodes" _check_progs_installed $@
1667 # recovery-scale functions
1669 echo __$(echo $1 | tr '-' '_' | tr '.' '_')
1672 start_client_load() {
1675 local var=$(node_var_name $client)_load
1676 eval export ${var}=$load
1678 do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
1679 BREAK_ON_ERROR=$BREAK_ON_ERROR \
1680 END_RUN_FILE=$END_RUN_FILE \
1681 LOAD_PID_FILE=$LOAD_PID_FILE \
1682 TESTLOG_PREFIX=$TESTLOG_PREFIX \
1683 TESTNAME=$TESTNAME \
1684 DBENCH_LIB=$DBENCH_LIB \
1685 DBENCH_SRC=$DBENCH_SRC \
1686 CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
1690 log "Started client load: ${load} on $client"
1692 # get the children process IDs
1693 local pids=$(ps --ppid $ppid -o pid= | xargs)
1694 CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
1698 start_client_loads () {
1699 local -a clients=(${1//,/ })
1700 local numloads=${#CLIENT_LOADS[@]}
1703 for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
1704 testnum=$((nodenum % numloads))
1705 start_client_load ${clients[nodenum]} ${CLIENT_LOADS[testnum]}
1707 # bug 22169: wait the background threads to start
1711 # only for remote client
1712 check_client_load () {
1714 local var=$(node_var_name $client)_load
1715 local TESTLOAD=run_${!var}.sh
1717 ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1
1719 # bug 18914: try to connect several times not only when
1720 # check ps, but while check_catastrophe also
1723 while [ $RC = 254 -a $tries -gt 0 ]; do
1727 if ! check_catastrophe $client; then
1729 if [ $RC -eq 254 ]; then
1730 # FIXME: not sure how long we shuold sleep here
1734 echo "check catastrophe failed: RC=$RC "
1738 # We can continue try to connect if RC=254
1739 # Just print the warning about this
1740 if [ $RC = 254 ]; then
1741 echo "got a return status of $RC from do_node while checking catastrophe on $client"
1744 # see if the load is still on the client
1747 while [ $RC = 254 -a $tries -gt 0 ]; do
1751 if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then
1756 if [ $RC = 254 ]; then
1757 echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client"
1758 # see if we can diagnose a bit why this is
1763 check_client_loads () {
1764 local clients=${1//,/ }
1768 for client in $clients; do
1769 check_client_load $client
1771 if [ "$rc" != 0 ]; then
1772 log "Client load failed on node $client, rc=$rc"
1778 restart_client_loads () {
1779 local clients=${1//,/ }
1780 local expectedfail=${2:-""}
1784 for client in $clients; do
1785 check_client_load $client
1787 if [ "$rc" != 0 -a "$expectedfail" ]; then
1788 local var=$(node_var_name $client)_load
1789 start_client_load $client ${!var}
1790 echo "Restarted client load ${!var}: on $client. Checking ..."
1791 check_client_load $client
1793 if [ "$rc" != 0 ]; then
1794 log "Client load failed to restart on node $client, rc=$rc"
1795 # failure one client load means test fail
1796 # we do not need to check other
1805 # Start vmstat and save its process ID in a file.
1810 [ -z "$nodes" -o -z "$pid_file" ] && return 0
1813 "vmstat 1 > $TESTLOG_PREFIX.$TESTNAME.vmstat.\\\$(hostname -s).log \
1814 2>/dev/null </dev/null & echo \\\$! > $pid_file"
1817 # Display the nodes on which client loads failed.
1818 print_end_run_file() {
1822 [ -s $file ] || return 0
1824 echo "Found the END_RUN_FILE file: $file"
1827 # A client load will stop if it finds the END_RUN_FILE file.
1828 # That does not mean the client load actually failed though.
1829 # The first node in END_RUN_FILE is the one we are interested in.
1832 if [ -n "$node" ]; then
1833 local var=$(node_var_name $node)_load
1835 local prefix=$TESTLOG_PREFIX
1836 [ -n "$TESTNAME" ] && prefix=$prefix.$TESTNAME
1837 local stdout_log=$prefix.run_${!var}_stdout.$node.log
1838 local debug_log=$(echo $stdout_log | sed 's/\(.*\)stdout/\1debug/')
1840 echo "Client load ${!var} failed on node $node:"
1846 # Stop the process which had its PID saved in a file.
1851 [ -z "$nodes" -o -z "$pid_file" ] && return 0
1853 do_nodes $nodes "test -f $pid_file &&
1854 { kill -s TERM \\\$(cat $pid_file); rm -f $pid_file; }" || true
1857 # Stop all client loads.
1858 stop_client_loads() {
1859 local nodes=${1:-$CLIENTS}
1862 # stop the client loads
1863 stop_process $nodes $pid_file
1865 # clean up the processes that started them
1866 [ -n "$CLIENT_LOAD_PIDS" ] && kill -9 $CLIENT_LOAD_PIDS 2>/dev/null || true
1868 # End recovery-scale functions
1870 # verify that lustre actually cleaned up properly
1872 [ -f $CATASTROPHE ] && [ `cat $CATASTROPHE` -ne 0 ] && \
1873 error "LBUG/LASSERT detected"
1874 BUSY=`dmesg | grep -i destruct || true`
1875 if [ "$BUSY" ]; then
1877 [ -e $TMP/debug ] && mv $TMP/debug $TMP/debug-busy.`date +%s`
1881 check_mem_leak || exit 204
1883 [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl &&
1884 echo "$TESTSUITE: lustre didn't clean up..." 1>&2 &&
1887 if module_loaded lnet || module_loaded libcfs; then
1888 echo "$TESTSUITE: modules still loaded..." 1>&2
1897 if [[ "$1" == "--verbose" ]]; then
1913 RESULT=$(do_node $node "$TEST")
1914 if [[ "$RESULT" == "$FINAL" ]]; then
1915 [[ -z "$RESULT" || $WAIT -le $sleep ]] ||
1916 echo "Updated after ${WAIT}s: wanted '$FINAL'"\
1920 if [[ $verbose && "$RESULT" != "$PREV_RESULT" ]]; then
1921 echo "Changed after ${WAIT}s: from '$PREV_RESULT'"\
1925 [[ $WAIT -ge $MAX ]] && break
1926 [[ $((WAIT % print)) -eq 0 ]] &&
1927 echo "Waiting $((MAX - WAIT)) secs for update"
1928 WAIT=$((WAIT + sleep))
1931 echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'"
1935 wait_update_facet() {
1938 wait_update $(facet_active_host $facet) "$@"
1942 do_nodes $(comma_list $(mdts_nodes)) \
1943 "lctl set_param -n osd*.*MDT*.force_sync 1"
1944 do_nodes $(comma_list $(osts_nodes)) \
1945 "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 |
1946 grep -v 'Found no match'
1949 wait_delete_completed_mds() {
1950 local MAX_WAIT=${1:-20}
1952 local stime=`date +%s`
1957 # find MDS with pending deletions
1958 for node in $(mdts_nodes); do
1959 changes=$(do_node $node "lctl get_param -n osc.*MDT*.sync_*" \
1960 2>/dev/null | calc_sum)
1961 if [ -z "$changes" ] || [ $changes -eq 0 ]; then
1964 mds2sync="$mds2sync $node"
1966 if [ "$mds2sync" == "" ]; then
1969 mds2sync=$(comma_list $mds2sync)
1971 # sync MDS transactions
1972 do_nodes $mds2sync "lctl set_param -n osd*.*MD*.force_sync 1"
1974 # wait till all changes are sent and commmitted by OSTs
1975 # for ldiskfs space is released upon execution, but DMU
1976 # do this upon commit
1979 while [ "$WAIT" -ne "$MAX_WAIT" ]; do
1980 changes=$(do_nodes $mds2sync "lctl get_param -n osc.*MDT*.sync_*" \
1982 #echo "$node: $changes changes on all"
1983 if [ "$changes" -eq "0" ]; then
1985 #echo "delete took $((etime - stime)) seconds"
1993 echo "Delete is not completed in $((etime - stime)) seconds"
1994 do_nodes $mds2sync "lctl get_param osc.*MDT*.sync_*"
2000 # we can use "for" here because we are waiting the slowest
2001 for host in ${hostlist//,/ }; do
2002 check_network "$host" 900
2004 while ! do_nodes $hostlist hostname > /dev/null; do sleep 5; done
2011 for facet in ${facetlist//,/ }; do
2012 hostlist=$(expand_list $hostlist $(facet_active_host $facet))
2014 wait_for_host $hostlist
2017 _wait_recovery_complete () {
2020 # Use default policy if $2 is not passed by caller.
2021 local MAX=${2:-$(max_recovery_time)}
2026 while [ $WAIT -lt $MAX ]; do
2027 STATUS=$(lctl get_param -n $param | grep status)
2029 [[ $STATUS = "status: COMPLETE" || $STATUS = "status: INACTIVE" ]] && return 0
2032 echo "Waiting $((MAX - WAIT)) secs for $param recovery done. $STATUS"
2034 echo "$param recovery not done in $MAX sec. $STATUS"
2038 wait_recovery_complete () {
2041 # with an assumption that at_max is the same on all nodes
2042 local MAX=${2:-$(max_recovery_time)}
2045 if [ "$FAILURE_MODE" = HARD ]; then
2046 facets=$(facets_on_host $(facet_active_host $facet))
2048 echo affected facets: $facets
2050 # we can use "for" here because we are waiting the slowest
2051 for facet in ${facets//,/ }; do
2052 local var_svc=${facet}_svc
2053 local param="*.${!var_svc}.recovery_status"
2055 local host=$(facet_active_host $facet)
2056 do_rpc_nodes "$host" _wait_recovery_complete $param $MAX
2060 wait_mds_ost_sync () {
2061 # just because recovery is done doesn't mean we've finished
2062 # orphan cleanup. Wait for llogs to get synchronized.
2063 echo "Waiting for orphan cleanup..."
2064 # MAX value includes time needed for MDS-OST reconnection
2065 local MAX=$(( TIMEOUT * 2 ))
2066 local WAIT_TIMEOUT=${1:-$MAX}
2069 local list=$(comma_list $(mdts_nodes))
2070 local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed"
2071 if ! do_facet $SINGLEMDS \
2072 "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null"
2074 # old way, use mds_sync
2076 list=$(comma_list $(osts_nodes))
2077 cmd="$LCTL get_param -n obdfilter.*.mds_sync"
2080 echo "wait $WAIT_TIMEOUT secs maximumly for $list mds-ost sync done."
2081 while [ $WAIT -lt $WAIT_TIMEOUT ]; do
2082 local -a sync=($(do_nodes $list "$cmd"))
2085 for ((i=0; i<${#sync[@]}; i++)); do
2087 [ ${sync[$i]} -eq 1 ] && continue
2089 [ ${sync[$i]} -eq 0 ] && continue
2091 # there is a not finished MDS-OST synchronization
2095 sleep 2 # increase waiting time and cover statfs cache
2096 [ ${con} -eq 1 ] && return 0
2097 echo "Waiting $WAIT secs for $list $i mds-ost sync done."
2101 # show which nodes are not finished.
2102 do_nodes $list "$cmd"
2103 echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS"
2107 wait_destroy_complete () {
2108 echo "Waiting for local destroys to complete"
2109 # MAX value shouldn't be big as this mean server responsiveness
2110 # never increase this just to make test pass but investigate
2111 # why it takes so long time
2114 while [ $WAIT -lt $MAX ]; do
2115 local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight))
2119 for ((i=0; i<${#RPCs[@]}; i++)); do
2120 [ ${RPCs[$i]} -eq 0 ] && continue
2121 # there are still some destroy RPCs in flight
2126 [ ${con} -eq 1 ] && return 0 # done waiting
2127 echo "Waiting ${WAIT}s for local destroys to complete"
2130 echo "Local destroys weren't done in $MAX sec."
2134 wait_delete_completed() {
2135 wait_delete_completed_mds $1 || return $?
2136 wait_destroy_complete
2145 # conf-sanity 31 takes a long time cleanup
2146 while [ $WAIT -lt 300 ]; do
2147 running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null && lctl dl | grep ' ST '") || true
2148 [ -z "${running}" ] && return 0
2149 echo "waited $WAIT for${running}"
2150 [ $INTERVAL -lt 64 ] && INTERVAL=$((INTERVAL + INTERVAL))
2152 WAIT=$((WAIT + INTERVAL))
2154 echo "service didn't stop after $WAIT seconds. Still running:"
2159 wait_remote_prog () {
2165 [ "$PDSH" = "no_dsh" ] && return 0
2167 while [ $WAIT -lt $2 ]; do
2168 running=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep) || true
2169 [ -z "${running}" ] && return 0 || true
2170 echo "waited $WAIT for: "
2172 [ $INTERVAL -lt 60 ] && INTERVAL=$((INTERVAL + INTERVAL))
2174 WAIT=$((WAIT + INTERVAL))
2176 local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}')
2177 [ -z "$pids" ] && return 0
2178 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids"
2179 # FIXME: not portable
2180 for pid in $pids; do
2181 cat /proc/${pid}/status || true
2182 cat /proc/${pid}/wchan || true
2184 kill -9 $pid || true
2193 # not every config has many clients
2195 if [ ! -z "$CLIENTS" ]; then
2196 $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null
2198 stat -f $MOUNT > /dev/null
2204 # usually checked on particular client or locally
2206 if [ ! -z "$client" ]; then
2207 $PDSH $client "stat -f $MOUNT" > /dev/null
2209 stat -f $MOUNT > /dev/null
2217 client_reconnect() {
2218 uname -n >> $MOUNT/recon
2219 if [ -z "$CLIENTS" ]; then
2220 df $MOUNT; uname -n >> $MOUNT/recon
2222 do_nodes $CLIENTS "df $MOUNT; uname -n >> $MOUNT/recon" > /dev/null
2224 echo Connected clients:
2226 ls -l $MOUNT/recon > /dev/null
2230 affected_facets () {
2233 local host=$(facet_active_host $facet)
2234 local affected=$facet
2236 if [ "$FAILURE_MODE" = HARD ]; then
2237 affected=$(facets_up_on_host $host)
2251 #Because it will only get up facets, we need get affected
2252 #facets before shutdown
2253 #For HARD Failure mode, it needs make sure facets on the same
2254 #HOST will only be shutdown and reboot once
2255 for facet in ${facets//,/ }; do
2256 local affected_facet
2258 #check whether facet has been included in other affected facets
2259 for ((index=0; index<$total; index++)); do
2260 [[ *,$facet,* == ,${affecteds[index]}, ]] && skip=1
2263 if [ $skip -eq 0 ]; then
2264 affecteds[$total]=$(affected_facets $facet)
2269 for ((index=0; index<$total; index++)); do
2270 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
2271 local host=$(facet_active_host $facet)
2272 echo "Failing ${affecteds[index]} on $host"
2273 shutdown_facet $facet
2276 for ((index=0; index<$total; index++)); do
2277 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
2278 echo reboot facets: ${affecteds[index]}
2282 change_active ${affecteds[index]}
2284 wait_for_facet ${affecteds[index]}
2285 # start mgs first if it is affected
2286 if ! combined_mgs_mds &&
2287 list_member ${affecteds[index]} mgs; then
2288 mount_facet mgs || error "Restart of mgs failed"
2290 # FIXME; has to be changed to mount all facets concurrently
2291 affected=$(exclude_items_from_list ${affecteds[index]} mgs)
2292 echo mount facets: ${affecteds[index]}
2293 mount_facets ${affecteds[index]}
2303 do_facet $facet "sync; sync; sync"
2306 # make sure there will be no seq change
2307 local clients=${CLIENTS:-$HOSTNAME}
2308 local f=fsa-\\\$\(hostname\)
2309 do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
2310 do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi"
2312 local svc=${facet}_svc
2313 do_facet $facet $LCTL --device ${!svc} notransno
2315 # If a ZFS OSD is made read-only here, its pool is "freezed". This
2316 # in-memory state has to be cleared by either rebooting the host or
2317 # exporting and reimporting the pool.
2319 # Although the uberblocks are not updated when a pool is freezed,
2320 # transactions are still written to the disks. Modified blocks may be
2321 # cached in memory when tests try reading them back. The
2322 # export-and-reimport process also evicts any cached pool data from
2323 # memory to provide the correct "data loss" semantics.
2325 # In the test framework, the exporting and importing operations are
2326 # handled by stop() and mount_facet() separately, which are used
2327 # inside fail() and fail_abort().
2329 do_facet $facet $LCTL --device ${!svc} readonly
2330 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
2331 $LCTL mark "local REPLAY BARRIER on ${!svc}"
2334 replay_barrier_nodf() {
2335 local facet=$1 echo running=${running}
2336 do_facet $facet "sync; sync; sync"
2337 local svc=${facet}_svc
2338 echo Replay barrier on ${!svc}
2339 do_facet $facet $LCTL --device ${!svc} notransno
2340 do_facet $facet $LCTL --device ${!svc} readonly
2341 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
2342 $LCTL mark "local REPLAY BARRIER on ${!svc}"
2345 replay_barrier_nosync() {
2346 local facet=$1 echo running=${running}
2347 local svc=${facet}_svc
2348 echo Replay barrier on ${!svc}
2349 do_facet $facet $LCTL --device ${!svc} notransno
2350 do_facet $facet $LCTL --device ${!svc} readonly
2351 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
2352 $LCTL mark "local REPLAY BARRIER on ${!svc}"
2356 # Get Lustre client uuid for a given Lustre mount point.
2359 local mntpnt=${1:-$MOUNT}
2361 local name=$($LFS getname $mntpnt | cut -d' ' -f1)
2362 local uuid=$($LCTL get_param -n llite.$name.uuid)
2367 mds_evict_client() {
2368 local mntpnt=${1:-$MOUNT}
2369 local uuid=$(get_client_uuid $mntpnt)
2371 do_facet $SINGLEMDS \
2372 "$LCTL set_param -n mdt.${mds1_svc}.evict_client $uuid"
2375 ost_evict_client() {
2376 local mntpnt=${1:-$MOUNT}
2377 local uuid=$(get_client_uuid $mntpnt)
2380 "$LCTL set_param -n obdfilter.${ost1_svc}.evict_client $uuid"
2385 local clients=${CLIENTS:-$HOSTNAME}
2387 facet_failover $* || error "failover: $?"
2388 wait_clients_import_state "$clients" "$facets" FULL
2389 clients_up || error "post-failover df: $?"
2394 facet_failover $facet
2400 change_active $facet
2401 wait_for_facet $facet
2402 mount_facet $facet -o abort_recovery
2403 clients_up || echo "first df failed: $?"
2404 clients_up || error "post-failover df: $?"
2408 echo There is no lmc. This is mountconf, baby.
2412 host_nids_address() {
2416 if [ -n "$kind" ]; then
2417 nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'")
2419 nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'")
2425 if [ "$1" = "'*'" ]; then echo \'*\'; else
2431 if [ "$1" = "'*'" ]; then echo \'*\'; else
2432 ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \
2434 if [ -z "$ID" ]; then
2435 echo "Could not get a ptl id for $1..."
2444 h2name_or_ip "$1" "tcp"
2449 if [ "$1" = "'*'" ]; then echo \'*\'; else
2450 if type __h2elan >/dev/null 2>&1; then
2453 ID=`echo $1 | sed 's/[^0-9]*//g'`
2461 h2name_or_ip "$1" "o2ib"
2465 # This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST
2466 # expressions format. As a bonus we can then just pass in those variables
2467 # to pdsh. What this function does is take a HOSTLIST type string and
2468 # expand it into a space deliminated list for us.
2476 [ -z "$hostlist" ] && return
2478 # Translate the case of [..],..,[..] to [..] .. [..]
2479 list="${hostlist/],/] }"
2481 [[ "$front" == *,* ]] && {
2484 list=${list/${old}/${new}}
2487 for item in $list; do
2488 # Test if we have any []'s at all
2489 if [ "$item" != "${item/\[/}" ]; then {
2490 # Expand the [*] into list
2494 if [ "$name" != "$item" ]; then
2495 group=${item#$name[*}
2498 for range in ${group//,/ }; do
2502 # Number of leading zeros
2505 end=$(echo $end | sed 's/0*//')
2506 [[ -z "$end" ]] && end=0
2507 [[ $padlen2 -gt $padlen ]] && {
2508 [[ $padlen2 -eq ${#end} ]] && padlen2=0
2511 begin=$(echo $begin | sed 's/0*//')
2512 [ -z $begin ] && begin=0
2514 for num in $(seq -f "%0${padlen}g" $begin $end); do
2515 value="${name#*,}${num}${back}"
2516 [ "$value" != "${value/\[/}" ] && {
2517 value=$(hostlist_expand "$value")
2519 myList="$myList $value"
2524 myList="$myList $item"
2527 myList=${myList//,/ }
2528 myList=${myList:1} # Remove first character which is a space
2530 # Filter any duplicates without sorting
2532 myList="${list%% *}"
2534 while [[ "$list" != ${myList##* } ]]; do
2535 list=${list//${list%% *} /}
2536 myList="$myList ${list%% *}"
2538 myList="${myList%* }";
2540 # We can select an object at a offset in the list
2543 for item in $myList; do
2545 [ $cnt -eq $offset ] && {
2549 [ $(get_node_count $myList) -ne 1 ] && myList=""
2558 [ "$facet" == client ] && echo -n $HOSTNAME && return
2559 varname=${facet}_HOST
2560 if [ -z "${!varname}" ]; then
2561 if [ "${facet:0:3}" == "ost" ]; then
2562 eval export ${facet}_HOST=${ost_HOST}
2563 elif [ "${facet:0:3}" == "mdt" -o \
2564 "${facet:0:3}" == "mds" -o \
2565 "${facet:0:3}" == "mgs" ]; then
2566 eval export ${facet}_HOST=${mds_HOST}
2572 facet_failover_host() {
2576 var=${facet}failover_HOST
2577 if [ -n "${!var}" ]; then
2582 if [ "${facet:0:3}" == "mdt" -o "${facet:0:3}" == "mds" -o \
2583 "${facet:0:3}" == "mgs" ]; then
2585 eval export ${facet}failover_host=${mds_HOST}
2590 if [[ $facet == ost* ]]; then
2591 eval export ${facet}failover_host=${ost_HOST}
2599 local activevar=${facet}active
2601 if [ -f $TMP/${facet}active ] ; then
2602 source $TMP/${facet}active
2605 active=${!activevar}
2606 if [ -z "$active" ] ; then
2613 facet_active_host() {
2615 local active=`facet_active $facet`
2616 if [ "$facet" == client ]; then
2619 echo `facet_host $active`
2623 # Get the passive failover partner host of facet.
2624 facet_passive_host() {
2626 [[ $facet = client ]] && return
2628 local host=${facet}_HOST
2629 local failover_host=${facet}failover_HOST
2630 local active_host=$(facet_active_host $facet)
2632 [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return
2634 if [[ $active_host = ${!host} ]]; then
2635 echo -n ${!failover_host}
2645 facetlist=$(exclude_items_from_list $facetlist mgs)
2647 for facet in ${facetlist//,/ }; do
2648 local failover=${facet}failover
2649 local host=`facet_host $failover`
2650 [ -z "$host" ] && return
2652 local curactive=`facet_active $facet`
2653 if [ -z "${curactive}" -o "$curactive" == "$failover" ] ; then
2654 eval export ${facet}active=$facet
2656 eval export ${facet}active=$failover
2658 # save the active host for this facet
2659 local activevar=${facet}active
2660 echo "$activevar=${!activevar}" > $TMP/$activevar
2661 [[ $facet = mds1 ]] && combined_mgs_mds && \
2662 echo "mgsactive=${!activevar}" > $TMP/mgsactive
2663 local TO=`facet_active_host $facet`
2664 echo "Failover $facet to $TO"
2670 # do not stripe off hostname if verbose, bug 19215
2671 if [ x$1 = x--verbose ]; then
2679 if [ "$HOST" = "$HOSTNAME" ]; then
2681 elif [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" ]; then
2682 echo "cannot run remote command on $HOST with $myPDSH"
2686 echo "CMD: $HOST $@" >&2
2687 $myPDSH $HOST "$LCTL mark \"$@\"" > /dev/null 2>&1 || :
2690 if [ "$myPDSH" = "rsh" ]; then
2691 # we need this because rsh does not return exit code of an executed command
2692 local command_status="$TMP/cs"
2693 rsh $HOST ":> $command_status"
2694 rsh $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin;
2695 cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\") ||
2696 echo command failed >$command_status"
2697 [ -n "$($myPDSH $HOST cat $command_status)" ] && return 1 || true
2702 # print HOSTNAME for myPDSH="no_dsh"
2703 if [[ $myPDSH = no_dsh ]]; then
2704 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed -e "s/^/${HOSTNAME}: /"
2706 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")"
2709 $myPDSH $HOST "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" sh -c \"$@\")" | sed "s/^${HOST}: //"
2711 return ${PIPESTATUS[0]}
2715 do_node --verbose "$@"
2718 single_local_node () {
2719 [ "$1" = "$HOSTNAME" ]
2722 # Outputs environment variable assignments that should be passed to remote nodes
2726 local facets=$(get_facets)
2729 for var in ${!MODOPTS_*}; do
2731 echo -n " ${var}=\"$value\""
2734 for facet in ${facets//,/ }; do
2736 if [ -n "${!var}" ]; then
2737 echo -n " $var=${!var}"
2741 for var in MGSFSTYPE MDSFSTYPE OSTFSTYPE; do
2742 if [ -n "${!var}" ]; then
2743 echo -n " $var=${!var}"
2747 if [ -n "$FSTYPE" ]; then
2748 echo -n " FSTYPE=$FSTYPE"
2754 # do not stripe off hostname if verbose, bug 19215
2755 if [ x$1 = x--verbose ]; then
2763 if single_local_node $rnodes; then
2765 do_nodev $rnodes "$@"
2767 do_node $rnodes "$@"
2772 # This is part from do_node
2775 [ -z "$myPDSH" -o "$myPDSH" = "no_dsh" -o "$myPDSH" = "rsh" ] && \
2776 echo "cannot run remote command on $rnodes with $myPDSH" && return 128
2778 export FANOUT=$(get_node_count "${rnodes//,/ }")
2780 echo "CMD: $rnodes $@" >&2
2781 $myPDSH $rnodes "$LCTL mark \"$@\"" > /dev/null 2>&1 || :
2784 # do not replace anything from pdsh output if -N is used
2785 # -N Disable hostname: prefix on lines of output.
2786 if $verbose || [[ $myPDSH = *-N* ]]; then
2787 $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")"
2789 $myPDSH $rnodes "(PATH=\$PATH:$RLUSTRE/utils:$RLUSTRE/tests:/sbin:/usr/sbin; cd $RPWD; LUSTRE=\"$RLUSTRE\" $(get_env_vars) sh -c \"$@\")" | sed -re "s/^[^:]*: //g"
2791 return ${PIPESTATUS[0]}
2797 local HOST=`facet_active_host $facet`
2798 [ -z $HOST ] && echo No host defined for facet ${facet} && exit 1
2802 # Function: do_facet_random_file $FACET $FILE $SIZE
2803 # Creates FILE with random content on the given FACET of given SIZE
2805 do_facet_random_file() {
2809 local cmd="dd if=/dev/urandom of='$fpath' bs=$fsize count=1"
2810 do_facet $facet "$cmd 2>/dev/null"
2813 do_facet_create_file() {
2817 local cmd="dd if=/dev/zero of='$fpath' bs=$fsize count=1"
2818 do_facet $facet "$cmd 2>/dev/null"
2822 do_nodes --verbose "$@"
2828 # make sure its not already running
2830 rm -f $TMP/${facet}active
2831 [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive
2832 do_facet ${facet} $MKFS $* || return ${PIPESTATUS[0]}
2834 if [[ $(facet_fstype $facet) == zfs ]]; then
2836 # After formatting a ZFS target, "cachefile=none" property will
2837 # be set on the ZFS storage pool so that the pool is not
2838 # automatically imported on system startup. And then the pool
2839 # will be exported so as to leave the importing and exporting
2840 # operations handled by mount_facet() and stop() separately.
2842 refresh_partition_table $facet $(facet_vdevice $facet)
2843 disable_zpool_cache $facet
2852 local fstype=$(facet_fstype ost$num)
2856 #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
2857 eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
2859 #dataset name is independent of vdev device names
2860 eval DEVPTR=${FSNAME}-ost${num}/ost${num};;
2862 error "unknown fstype!";;
2872 local fstype=$(facet_fstype ost$num)
2876 # vdevs are not supported by ldiskfs
2879 #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num
2880 eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
2882 error "unknown fstype!";;
2892 local fstype=$(facet_fstype mds$num)
2896 #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num
2897 eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
2899 #dataset name is independent of vdev device names
2900 eval DEVPTR=${FSNAME}-mdt${num}/mdt${num};;
2902 error "unknown fstype!";;
2912 local fstype=$(facet_fstype mds$num)
2916 # vdevs are not supported by ldiskfs
2919 #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num
2920 eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
2922 error "unknown fstype!";;
2930 local fstype=$(facet_fstype mgs)
2934 if [ $(facet_host mgs) = $(facet_host mds1) ] &&
2935 ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsdevname 1) ] ); then
2936 DEVPTR=$(mdsdevname 1)
2941 if [ $(facet_host mgs) = $(facet_host mds1) ] &&
2942 ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsvdevname 1) ] ); then
2943 DEVPTR=$(mdsdevname 1)
2945 DEVPTR=${FSNAME}-mgs/mgs
2948 error "unknown fstype!";;
2958 local fstype=$(facet_fstype mgs)
2962 # vdevs are not supported by ldiskfs
2965 if [ $(facet_host mgs) = $(facet_host mds1) ] &&
2966 ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsvdevname 1) ] ); then
2967 VDEVPTR=$(mdsvdevname 1)
2972 error "unknown fstype!";;
2980 [[ $facet = mgs ]] && combined_mgs_mds && facet="mds1"
2982 local var=${facet}_MOUNT
2983 eval mntpt=${!var:-${MOUNT%/*}/$facet}
2992 # make sure we are using the primary server, so test-framework will
2993 # be able to clean up properly.
2994 activemds=`facet_active mds1`
2995 if [ $activemds != "mds1" ]; then
2999 local clients=$CLIENTS
3000 [ -z $clients ] && clients=$(hostname)
3002 zconf_umount_clients $clients $MOUNT "$*" || true
3003 [ -n "$MOUNT2" ] && zconf_umount_clients $clients $MOUNT2 "$*" || true
3005 [ "$CLIENTONLY" ] && return
3006 # The add fn does rm ${facet}active file, this would be enough
3007 # if we use do_facet <facet> only after the facet added, but
3008 # currently we use do_facet mds in local.sh
3009 for num in `seq $MDSCOUNT`; do
3011 rm -f ${TMP}/mds${num}active
3013 combined_mgs_mds && rm -f $TMP/mgsactive
3015 for num in `seq $OSTCOUNT`; do
3017 rm -f $TMP/ost${num}active
3020 if ! combined_mgs_mds ; then
3027 cleanup_echo_devs () {
3028 local devs=$($LCTL dl | grep echo | awk '{print $4}')
3030 for dev in $devs; do
3031 $LCTL --device $dev cleanup
3032 $LCTL --device $dev detach
3037 nfs_client_mode && return
3046 combined_mgs_mds () {
3047 [[ "$(mdsdevname 1)" = "$(mgsdevname)" ]] &&
3048 [[ "$(facet_host mds1)" = "$(facet_host mgs)" ]]
3052 echo -n "$1" | tr '[:upper:]' '[:lower:]'
3056 echo -n "$1" | tr '[:lower:]' '[:upper:]'
3062 local fsname=${3:-"$FSNAME"}
3063 local type=$(facet_type $facet)
3064 local index=$(($(facet_number $facet) - 1))
3065 local fstype=$(facet_fstype $facet)
3066 local host=$(facet_host $facet)
3071 if [ $type == MGS ] && combined_mgs_mds; then
3075 if [ $type == MGS ] || ( [ $type == MDS ] &&
3076 [ "$dev" == $(mgsdevname) ] &&
3077 [ "$host" == "$(facet_host mgs)" ] ); then
3080 opts="--mgsnode=$MGSNID"
3083 if [ $type != MGS ]; then
3084 opts+=" --fsname=$fsname --$(lower ${type/MDS/MDT}) \
3088 var=${facet}failover_HOST
3089 if [ -n "${!var}" ] && [ ${!var} != $(facet_host $facet) ]; then
3090 opts+=" --failnode=$(h2$NETTYPE ${!var})"
3093 opts+=${TIMEOUT:+" --param=sys.timeout=$TIMEOUT"}
3094 opts+=${LDLM_TIMEOUT:+" --param=sys.ldlm_timeout=$LDLM_TIMEOUT"}
3096 if [ $type == MDS ]; then
3097 opts+=${SECLEVEL:+" --param=mdt.sec_level"}
3098 opts+=${MDSCAPA:+" --param-mdt.capa=$MDSCAPA"}
3099 opts+=${STRIPE_BYTES:+" --param=lov.stripesize=$STRIPE_BYTES"}
3100 opts+=${STRIPES_PER_OBJ:+" --param=lov.stripecount=$STRIPES_PER_OBJ"}
3101 opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"}
3103 if [ $fstype == ldiskfs ]; then
3104 fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"}
3105 if [ ! -z $EJOURNAL ]; then
3106 fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"}
3108 fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"}
3112 if [ $type == OST ]; then
3113 opts+=${SECLEVEL:+" --param=ost.sec_level"}
3114 opts+=${OSSCAPA:+" --param=ost.capa=$OSSCAPA"}
3116 if [ $fstype == ldiskfs ]; then
3117 fs_mkfs_opts+=${OSTJOURNALSIZE:+" -J size=$OSTJOURNALSIZE"}
3121 opts+=" --backfstype=$fstype"
3124 if [ -n "${!var}" ]; then
3125 opts+=" --device-size=${!var}"
3128 var=$(upper $fstype)_MKFS_OPTS
3129 fs_mkfs_opts+=${!var:+" ${!var}"}
3131 var=${type}_FS_MKFS_OPTS
3132 fs_mkfs_opts+=${!var:+" ${!var}"}
3134 if [ -n "${fs_mkfs_opts## }" ]; then
3135 opts+=" --mkfsoptions=\\\"${fs_mkfs_opts## }\\\""
3139 opts+=${!var:+" ${!var}"}
3152 # We need ldiskfs here, may as well load them all
3154 [ "$CLIENTONLY" ] && return
3155 echo Formatting mgs, mds, osts
3156 if ! combined_mgs_mds ; then
3157 echo "Format mgs: $(mgsdevname)"
3158 add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \
3159 $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} ||
3163 for num in $(seq $MDSCOUNT); do
3164 echo "Format mds$num: $(mdsdevname $num)"
3165 add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \
3166 --reformat $(mdsdevname $num) $(mdsvdevname $num) \
3167 ${quiet:+>/dev/null} || exit 10
3170 for num in $(seq $OSTCOUNT); do
3171 echo "Format ost$num: $(ostdevname $num)"
3172 add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \
3173 --reformat $(ostdevname $num) $(ostvdevname ${num}) \
3174 ${quiet:+>/dev/null} || exit 10
3179 grep " $1 " /proc/mounts || zconf_mount $HOSTNAME $*
3183 grep " $1 " /proc/mounts && zconf_umount `hostname` $*
3187 # 0: success, the old identity set already.
3188 # 1: success, the old identity does not set.
3193 local j=`expr $num - 1`
3194 local MDT="`(do_facet mds$num lctl get_param -N mdt.*MDT*$j 2>/dev/null | cut -d"." -f2 2>/dev/null) || true`"
3196 if [ -z "$MDT" ]; then
3200 local old="`do_facet mds$num "lctl get_param -n mdt.$MDT.identity_upcall"`"
3203 do_facet mds$num "lctl set_param -n mdt.$MDT.identity_upcall \"$L_GETIDENTITY\""
3205 do_facet mds$num "lctl set_param -n mdt.$MDT.identity_upcall \"NONE\""
3208 do_facet mds$num "lctl set_param -n mdt/$MDT/identity_flush=-1"
3210 if [ $old = "NONE" ]; then
3219 zconf_umount `hostname` $1 || error "umount failed"
3220 zconf_mount `hostname` $1 || error "mount failed"
3228 rm -f $TMP/${facet}active
3229 do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1
3234 local mdt_count=${1:-$MDSCOUNT}
3235 local ost_count=${2:-$OSTCOUNT}
3238 for num in $(seq $mdt_count); do
3239 DEVNAME=$(mdsdevname $num)
3240 writeconf_facet mds$num $DEVNAME || rc=$?
3243 for num in $(seq $ost_count); do
3244 DEVNAME=$(ostdevname $num)
3245 writeconf_facet ost$num $DEVNAME || rc=$?
3251 nfs_client_mode && return
3253 sanity_mount_check ||
3254 error "environments are insane!"
3258 if [ -z "$CLIENTONLY" ]; then
3259 echo Setup mgs, mdt, osts
3260 echo $WRITECONF | grep -q "writeconf" && \
3262 if ! combined_mgs_mds ; then
3263 start mgs $(mgsdevname) $MGS_MOUNT_OPTS
3266 for num in `seq $MDSCOUNT`; do
3267 DEVNAME=$(mdsdevname $num)
3268 start mds$num $DEVNAME $MDS_MOUNT_OPTS
3270 # We started mds, now we should set failover variables properly.
3271 # Set mds${num}failover_HOST if it is not set (the default failnode).
3272 local varname=mds${num}failover_HOST
3273 if [ -z "${!varname}" ]; then
3274 eval mds${num}failover_HOST=$(facet_host mds$num)
3277 if [ $IDENTITY_UPCALL != "default" ]; then
3278 switch_identity $num $IDENTITY_UPCALL
3281 for num in `seq $OSTCOUNT`; do
3282 DEVNAME=$(ostdevname $num)
3283 start ost$num $DEVNAME $OST_MOUNT_OPTS
3285 # We started ost$num, now we should set ost${num}failover variable properly.
3286 # Set ost${num}failover_HOST if it is not set (the default failnode).
3287 varname=ost${num}failover_HOST
3288 if [ -z "${!varname}" ]; then
3289 eval ost${num}failover_HOST=$(facet_host ost${num})
3297 # wait a while to allow sptlrpc configuration be propogated to targets,
3298 # only needed when mounting new target devices.
3303 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
3305 [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT
3308 if [ "$MOUNT_2" ]; then
3309 mount_client $MOUNT2
3310 [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2
3315 # by remounting mdt before ost, initial connect from mdt to ost might
3316 # timeout because ost is not ready yet. wait some time to its fully
3317 # recovery. initial obd_connect timeout is 5s; in GSS case it's preceeded
3318 # by a context negotiation rpc with $TIMEOUT.
3319 # FIXME better by monitoring import status.
3322 sleep $((TIMEOUT + 5))
3328 mounted_lustre_filesystems() {
3329 awk '($3 ~ "lustre" && $1 ~ ":") { print $2 }' /proc/mounts
3332 init_facet_vars () {
3333 [ "$CLIENTONLY" ] && return 0
3340 eval export ${facet}_dev=${device}
3341 eval export ${facet}_opt=\"$@\"
3343 local dev=${facet}_dev
3345 # We need to loop for the label
3346 # in case its not initialized yet.
3347 for wait_time in {0,1,3,5,10}; do
3349 if [ $wait_time -gt 0 ]; then
3350 echo "${!dev} not yet initialized,"\
3351 "waiting ${wait_time} seconds."
3355 local label=$(devicelabel ${facet} ${!dev})
3357 # Check to make sure the label does
3358 # not include ffff at the end of the label.
3359 # This indicates it has not been initialized yet.
3361 if [[ $label =~ [f|F]{4}$ ]]; then
3362 # label is not initialized, unset the result
3363 # and either try again or fail
3370 [ -z "$label" ] && echo no label for ${!dev} && exit 1
3372 eval export ${facet}_svc=${label}
3374 local varname=${facet}failover_HOST
3375 if [ -z "${!varname}" ]; then
3376 eval export $varname=$(facet_host $facet)
3379 varname=${facet}_HOST
3380 if [ -z "${!varname}" ]; then
3381 eval export $varname=$(facet_host $facet)
3384 # ${facet}failover_dev is set in cfg file
3385 varname=${facet}failover_dev
3386 if [ -n "${!varname}" ] ; then
3387 eval export ${facet}failover_dev=${!varname}
3389 eval export ${facet}failover_dev=$device
3392 # get mount point of already mounted device
3393 # is facet_dev is already mounted then use the real
3394 # mount point of this facet; otherwise use $(facet_mntpt $facet)
3395 # i.e. ${facet}_MOUNT if specified by user or default
3396 local mntpt=$(do_facet ${facet} cat /proc/mounts | \
3397 awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }')
3398 if [ -z $mntpt ]; then
3399 mntpt=$(facet_mntpt $facet)
3401 eval export ${facet}_MOUNT=$mntpt
3404 init_facets_vars () {
3407 if ! remote_mds_nodsh; then
3408 for num in $(seq $MDSCOUNT); do
3409 DEVNAME=`mdsdevname $num`
3410 init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS
3414 combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS
3416 if ! remote_ost_nodsh; then
3417 for num in $(seq $OSTCOUNT); do
3418 DEVNAME=$(ostdevname $num)
3419 init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS
3424 osc_ensure_active () {
3429 while [ $period -lt $timeout ]; do
3430 count=$(do_facet $facet "lctl dl | grep ' IN osc ' 2>/dev/null | wc -l")
3431 if [ $count -eq 0 ]; then
3435 echo "There are $count OST are inactive, wait $period seconds, and try again"
3437 period=$((period+3))
3440 [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up"
3443 set_conf_param_and_check() {
3447 local ORIG=$(do_facet $myfacet "$TEST")
3448 if [ $# -gt 3 ]; then<