3 trap 'print_summary && print_stack_trace | tee $TF_FAIL && \
4 echo "$TESTSUITE: FAIL: test-framework exiting on error"' ERR
8 export REFORMAT=${REFORMAT:-""}
9 export WRITECONF=${WRITECONF:-""}
10 export VERBOSE=${VERBOSE:-false}
11 export GSS=${GSS:-false}
12 export GSS_SK=${GSS_SK:-false}
14 export GSS_PIPEFS=false
15 export SHARED_KEY=${SHARED_KEY:-false}
16 export SK_PATH=${SK_PATH:-/tmp/test-framework-keys}
17 export SK_OM_PATH=$SK_PATH'/tmp-request-mount'
18 export SK_MOUNTED=${SK_MOUNTED:-false}
19 export SK_FLAVOR=${SK_FLAVOR:-ski}
20 export SK_NO_KEY=${SK_NO_KEY:-true}
21 export SK_UNIQUE_NM=${SK_UNIQUE_NM:-false}
22 export SK_S2S=${SK_S2S:-false}
23 export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
24 export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
25 export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
26 export IDENTITY_UPCALL=default
28 export FLAKEY=${FLAKEY:-true}
29 # specify environment variable containing batch job name for server statistics
30 export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
32 #export PDSH="pdsh -S -Rssh -w"
33 export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
34 export UMOUNT=${UMOUNT:-"umount -d"}
36 export LSNAPSHOT_CONF="/etc/ldev.conf"
37 export LSNAPSHOT_LOG="/var/log/lsnapshot.log"
39 # sles12 umount has a issue with -d option
40 [ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
41 export UMOUNT="umount"
44 # function used by scripts run on remote nodes
45 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
46 . $LUSTRE/tests/functions.sh
47 . $LUSTRE/tests/yaml.sh
49 export LD_LIBRARY_PATH=${LUSTRE}/utils/.libs:${LUSTRE}/utils:${LD_LIBRARY_PATH}
51 LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
53 EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh}
55 if [ -f "$EXCEPT_LIST_FILE" ]; then
56 echo "Reading test skip list from $EXCEPT_LIST_FILE"
61 # check config files for options in decreasing order of preference
62 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/lustre.conf ] &&
63 MODPROBECONF=/etc/modprobe.d/lustre.conf
64 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] &&
65 MODPROBECONF=/etc/modprobe.d/Lustre
66 [ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
67 MODPROBECONF=/etc/modprobe.conf
69 sanitize_parameters() {
70 for i in DIR DIR1 DIR2 MOUNT MOUNT1 MOUNT2
73 if [ -d "$path" ]; then
74 eval export $i=$(echo $path | sed -r 's/\/+$//g')
80 [[ $DIR/ = $MOUNT/* ]] ||
81 { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
82 [[ $DIR1/ = $MOUNT1/* ]] ||
83 { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
84 [[ $DIR2/ = $MOUNT2/* ]] ||
85 { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
87 [ -n "$failed" ] && exit 99 || true
91 echo "usage: $0 [-r] [-f cfgfile]"
99 [ -z "$DEFAULT_SUITES" ] && return 0
100 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
102 local form="%-13s %-17s %-9s %s %s\n"
104 printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
105 echo "---------------------------------------------------------------"
106 for O in $DEFAULT_SUITES; do
107 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
108 [ "${!O}" = "no" ] && continue || true
109 local o=$(echo $O | tr "[:upper:]_" "[:lower:]-")
110 local log=${TMP}/${o}.log
111 if is_sanity_benchmark $o; then
112 log=${TMP}/sanity-benchmark.log
117 local status=Unfinished
119 skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' |
121 slow=$(egrep "^PASS|^FAIL" $log | tr -d "("| sed s/s\)$//g |
122 sort -nr -k 3 | head -n5 | awk '{ print $2":"$3"s" }')
123 total=$(grep duration $log | awk '{ print $2 }')
124 if [ "${!O}" = "done" ]; then
128 local durations=$(egrep "^PASS|^FAIL" $log |
129 tr -d "("| sed s/s\)$//g |
130 awk '{ print $2":"$3"|" }')
131 details=$(printf "%s\n%s %s %s\n" "$details" \
132 "DDETAILS" "$O" "$(echo $durations)")
135 printf "$form" $status "$O" "${total}" "E=$skipped"
136 printf "$form" "-" "-" "-" "S=$(echo $slow)"
139 for O in $DEFAULT_SUITES; do
140 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
141 if [ "${!O}" = "no" ]; then
142 printf "$form" "Skipped" "$O" ""
146 # print the detailed tests durations if DDETAILS=true
152 # Get information about the Lustre environment. The information collected
153 # will be used in Lustre tests.
154 # usage: get_lustre_env
155 # input: No required or optional arguments
156 # output: No return values, environment variables are exported
160 export mds1_FSTYPE=${mds1_FSTYPE:-$(facet_fstype mds1)}
161 export ost1_FSTYPE=${ost1_FSTYPE:-$(facet_fstype ost1)}
163 export MGS_VERSION=$(lustre_version_code mgs)
164 export MDS1_VERSION=$(lustre_version_code mds1)
165 export OST1_VERSION=$(lustre_version_code ost1)
166 export CLIENT_VERSION=$(lustre_version_code client)
168 # Prefer using "mds1" directly instead of SINGLEMDS.
169 # Keep this for compat until it is removed from scripts.
170 export SINGLEMDS=${SINGLEMDS:-mds1}
174 export LUSTRE=$(absolute_path $LUSTRE)
175 export TESTSUITE=$(basename $0 .sh)
176 export TEST_FAILED=false
177 export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
178 export RPC_MODE=${RPC_MODE:-false}
179 export DO_CLEANUP=${DO_CLEANUP:-true}
180 export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
181 export CLEANUP_DM_DEV=false
182 export PAGE_SIZE=$(get_page_size client)
184 export MKE2FS=$MKE2FS
185 if [ -z "$MKE2FS" ]; then
186 if which mkfs.ldiskfs >/dev/null 2>&1; then
187 export MKE2FS=mkfs.ldiskfs
193 export DEBUGFS=$DEBUGFS
194 if [ -z "$DEBUGFS" ]; then
195 if which debugfs.ldiskfs >/dev/null 2>&1; then
196 export DEBUGFS=debugfs.ldiskfs
198 export DEBUGFS=debugfs
202 export TUNE2FS=$TUNE2FS
203 if [ -z "$TUNE2FS" ]; then
204 if which tunefs.ldiskfs >/dev/null 2>&1; then
205 export TUNE2FS=tunefs.ldiskfs
207 export TUNE2FS=tune2fs
211 export E2LABEL=$E2LABEL
212 if [ -z "$E2LABEL" ]; then
213 if which label.ldiskfs >/dev/null 2>&1; then
214 export E2LABEL=label.ldiskfs
216 export E2LABEL=e2label
220 export DUMPE2FS=$DUMPE2FS
221 if [ -z "$DUMPE2FS" ]; then
222 if which dumpfs.ldiskfs >/dev/null 2>&1; then
223 export DUMPE2FS=dumpfs.ldiskfs
225 export DUMPE2FS=dumpe2fs
229 export E2FSCK=$E2FSCK
230 if [ -z "$E2FSCK" ]; then
231 if which fsck.ldiskfs >/dev/null 2>&1; then
232 export E2FSCK=fsck.ldiskfs
238 export RESIZE2FS=$RESIZE2FS
239 if [ -z "$RESIZE2FS" ]; then
240 if which resizefs.ldiskfs >/dev/null 2>&1; then
241 export RESIZE2FS=resizefs.ldiskfs
243 export RESIZE2FS=resize2fs
247 export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after test suite
248 export FSCK_MAX_ERR=4 # File system errors left uncorrected
250 export ZFS=${ZFS:-zfs}
251 export ZPOOL=${ZPOOL:-zpool}
252 export ZDB=${ZDB:-zdb}
253 export PARTPROBE=${PARTPROBE:-partprobe}
255 #[ -d /r ] && export ROOT=${ROOT:-/r}
256 export TMP=${TMP:-$ROOT/tmp}
257 export TESTSUITELOG=${TMP}/${TESTSUITE}.log
258 export LOGDIR=${LOGDIR:-${TMP}/test_logs/$(date +%s)}
259 export TESTLOG_PREFIX=$LOGDIR/$TESTSUITE
261 export HOSTNAME=${HOSTNAME:-$(hostname -s)}
262 if ! echo $PATH | grep -q $LUSTRE/utils; then
263 export PATH=$LUSTRE/utils:$PATH
265 if ! echo $PATH | grep -q $LUSTRE/utils/gss; then
266 export PATH=$LUSTRE/utils/gss:$PATH
268 if ! echo $PATH | grep -q $LUSTRE/tests; then
269 export PATH=$LUSTRE/tests:$PATH
271 if ! echo $PATH | grep -q $LUSTRE/../lustre-iokit/sgpdd-survey; then
272 export PATH=$LUSTRE/../lustre-iokit/sgpdd-survey:$PATH
274 export LST=${LST:-"$LUSTRE/../lnet/utils/lst"}
275 [ ! -f "$LST" ] && export LST=$(which lst)
276 export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")}
277 [ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey)
278 export MCREATE=${MCREATE:-mcreate}
279 export MULTIOP=${MULTIOP:-multiop}
280 export MMAP_CAT=${MMAP_CAT:-mmap_cat}
281 export STATX=${STATX:-statx}
282 # Ubuntu, at least, has a truncate command in /usr/bin
283 # so fully path our truncate command.
284 export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
285 export FSX=${FSX:-$LUSTRE/tests/fsx}
286 export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
287 [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
288 if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
289 export PATH=$LUSTRE/tests/racer:$PATH:
291 if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then
292 export PATH=$LUSTRE/tests/mpi:$PATH
294 export RSYNC_RSH=${RSYNC_RSH:-rsh}
296 export LNETCTL=${LNETCTL:-"$LUSTRE/../lnet/utils/lnetctl"}
297 [ ! -f "$LNETCTL" ] && export LNETCTL=$(which lnetctl 2> /dev/null)
298 export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
299 [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
300 export LFS=${LFS:-"$LUSTRE/utils/lfs"}
301 [ ! -f "$LFS" ] && export LFS=$(which lfs)
303 export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
305 export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
306 if [ ! -f "$L_GETIDENTITY" ]; then
307 if `which l_getidentity > /dev/null 2>&1`; then
308 export L_GETIDENTITY=$(which l_getidentity)
310 export L_GETIDENTITY=NONE
313 export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"}
314 [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid"
315 export LL_DECODE_LINKEA=${LL_DECODE_LINKEA:-"$LUSTRE/utils/ll_decode_linkea"}
316 [ ! -f "$LL_DECODE_LINKEA" ] && export LL_DECODE_LINKEA="ll_decode_linkea"
317 export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
318 [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre"
319 export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
320 [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre"
321 export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} "
322 export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod}
323 [ ! -f "$LUSTRE_RMMOD" ] &&
324 export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null)
325 export LUSTRE_ROUTES_CONVERSION=${LUSTRE_ROUTES_CONVERSION:-$LUSTRE/scripts/lustre_routes_conversion}
326 [ ! -f "$LUSTRE_ROUTES_CONVERSION" ] &&
327 export LUSTRE_ROUTES_CONVERSION=$(which lustre_routes_conversion 2> /dev/null)
328 export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate}
329 [ ! -f "$LFS_MIGRATE" ] &&
330 export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null)
331 export LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"}
332 [ ! -f "$LR_READER" ] &&
333 export LR_READER=$(which lr_reader 2> /dev/null)
334 [ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader"
335 export LSOM_SYNC=${LSOM_SYNC:-"$LUSTRE/utils/llsom_sync"}
336 [ ! -f "$LSOM_SYNC" ] &&
337 export LSOM_SYNC=$(which llsom_sync 2> /dev/null)
338 [ -z "$LSOM_SYNC" ] && export LSOM_SYNC="/usr/sbin/llsom_sync"
339 export NAME=${NAME:-local}
340 export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"}
341 [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] &&
342 export LGSSD=$(which lgssd)
343 export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"}
344 [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null)
345 export KRB5DIR=${KRB5DIR:-"/usr/kerberos"}
347 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
349 export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"}
350 [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null)
352 export DMSETUP=${DMSETUP:-dmsetup}
353 export DM_DEV_PATH=${DM_DEV_PATH:-/dev/mapper}
354 export LOSETUP=${LOSETUP:-losetup}
356 if [ "$ACCEPTOR_PORT" ]; then
357 export PORT_OPT="--port $ACCEPTOR_PORT"
361 $RPC_MODE || echo "Using GSS shared-key feature"
362 which lgss_sk > /dev/null 2>&1 ||
363 error_exit "built with lgss_sk disabled! SEC=$SEC"
371 $RPC_MODE || echo "Using GSS/krb5 ptlrpc security flavor"
372 which lgss_keyring > /dev/null 2>&1 ||
373 error_exit "built with gss disabled! SEC=$SEC"
384 IDENTITY_UPCALL=false
388 export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
390 # Paths on remote nodes, if different
391 export RLUSTRE=${RLUSTRE:-$LUSTRE}
392 export RPWD=${RPWD:-$PWD}
393 export I_MOUNTED=${I_MOUNTED:-"no"}
394 export AUSTER_CLEANUP=${AUSTER_CLEANUP:-false}
395 if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
396 ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
397 ! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \
398 ! -f $LUSTRE/mdt/mdt.ko ]; then
399 export CLIENTMODSONLY=yes
402 export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
403 export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
407 while getopts "rvwf:" opt $*; do
412 w) WRITECONF=writeconf;;
417 shift $((OPTIND - 1))
420 # print the durations of each test if "true"
421 DDETAILS=${DDETAILS:-false}
422 [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
427 export TF_FAIL=${TF_FAIL:-$TMP/tf.fail}
429 # Constants used in more than one test script
430 export LOV_MAX_STRIPE_COUNT=2000
431 export DELETE_OLD_POOLS=${DELETE_OLD_POOLS:-false}
432 export KEEP_POOLS=${KEEP_POOLS:-false}
434 export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
435 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
438 # use localrecov to enable recovery for local clients, LU-12722
439 [[ $MDS1_VERSION -lt $(version_code 2.13.52) ]] || {
440 export MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-"-o localrecov"}
441 export MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-"-o localrecov"}
444 [[ $OST1_VERSION -lt $(version_code 2.13.52) ]] ||
445 export OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-"-o localrecov"}
452 ncpts=$(do_facet $facet "lctl get_param -n " \
453 "cpu_partition_table 2>/dev/null| wc -l" || echo 1)
455 if [ $ncpts -eq 0 ]; then
462 # Return a numeric version code based on a version string. The version
463 # code is useful for comparison two version strings to see which is newer.
465 # split arguments like "1.8.6-wc3" into "1", "8", "6", "3"
466 eval set -- $(tr "[:punct:][a-z]" " " <<< $*)
468 echo -n $(((${1:-0}<<24) | (${2:-0}<<16) | (${3:-0}<<8) | (${4:-0})))
471 export LINUX_VERSION=$(uname -r | sed -e "s/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1/")
472 export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ })
474 # Report the Lustre build version string (e.g. 1.8.7.3 or 2.4.1).
476 # usage: lustre_build_version
478 # All Lustre versions support "lctl get_param" to report the version of the
479 # code running in the kernel (what our tests are interested in), but it
480 # doesn't work without modules loaded. After 2.9.53 and in upstream kernels
481 # the "version" parameter doesn't include "lustre: " at the beginning.
482 # If that fails, call "lctl lustre_build_version" which prints either (or both)
483 # the userspace and kernel build versions, but until 2.8.55 required root
484 # access to get the Lustre kernel version. If that also fails, fall back to
485 # using "lctl --version", which is easy to parse and works without the kernel
486 # modules, but was only added in 2.6.50 and only prints the lctl tool version,
487 # not the module version, though they are usually the same.
489 # Various commands and their output format for different Lustre versions:
490 # lctl get_param version: 2.9.55
491 # lctl get_param version: lustre: 2.8.53
492 # lctl get_param version: lustre: 2.6.52
493 # kernel: patchless_client
494 # build: v2_6_92_0-2.6.32-431.el6_lustre.x86_64
495 # lctl lustre_build_version: Lustre version: 2.8.53_27_gae67fc01
496 # lctl lustre_build_version: error: lustre_build_version: Permission denied
497 # (as non-root user) lctl version: v2_6_92_0-2.6.32-431.el6.x86_64
498 # lctl lustre_build_version: Lustre version: 2.5.3-2.6.32.26-175.fc12.x86_64
499 # lctl version: 2.5.3-2.6.32..26-175fc12.x86_64
500 # lctl --version: lctl 2.6.50
502 # output: prints version string to stdout in (up to 4) dotted-decimal values
503 lustre_build_version() {
504 local facet=${1:-client}
505 local facet_version=${facet}_VERSION
507 # if the global variable is already set, then use that
508 [ -n "${!facet_version}" ] && echo ${!facet_version} && return
510 # this is the currently-running version of the kernel modules
511 local ver=$(do_facet $facet "$LCTL get_param -n version 2>/dev/null")
512 # we mostly test 2.10+ systems, only try others if the above fails
513 if [ -z "$ver" ]; then
514 ver=$(do_facet $facet "$LCTL lustre_build_version 2>/dev/null")
516 if [ -z "$ver" ]; then
517 ver=$(do_facet $facet "$LCTL --version 2>/dev/null" |
520 local lver=$(egrep -i "lustre: |version: " <<<"$ver" | head -n 1)
521 [ -n "$lver" ] && ver="$lver"
523 lver=$(sed -e 's/[^:]*: //' -e 's/^v//' -e 's/[ -].*//' <<<$ver |
524 tr _ . | cut -d. -f1-4)
526 # save in global variable for the future
527 export $facet_version=$lver
532 # Report the Lustre numeric build version code for the supplied facet.
533 lustre_version_code() {
534 version_code $(lustre_build_version $1)
538 /sbin/lsmod | grep -q "^\<$1\>"
550 msg="$(insmod $module $args 2>&1)" && return 0 || rc=$?
553 # parallels can't load modules directly from prlfs, use /tmp instead
554 if $PRLFS || [[ "$(stat -f -c%t $module)" == "7c7c6673" ]]; then
555 local target="$(mktemp)"
557 cp "$module" "$target"
560 [[ $rc == 0 ]] && PRLFS=true
568 # Load a module on the system where this is running.
570 # usage: load_module module_name [module arguments for insmod/modprobe]
572 # If module arguments are not given but MODOPTS_<MODULE> is set, then its value
573 # will be used as the arguments. Otherwise arguments will be obtained from
574 # /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used.
577 local module=$1 # '../libcfs/libcfs/libcfs', 'obdclass/obdclass', ...
580 local base=$(basename $module $ext)
582 local -A module_is_loaded_aa
586 for mod in $(lsmod | awk '{ print $1; }'); do
587 module_is_loaded_aa[${mod//-/_}]=true
591 ${module_is_loaded_aa[${1//-/_}]:-false}
594 if module_is_loaded $base; then
598 if [[ -f $LUSTRE/$module$ext ]]; then
599 path=$LUSTRE/$module$ext
600 elif [[ "$base" == lnet_selftest ]] &&
601 [[ -f $LUSTRE/../lnet/selftest/$base$ext ]]; then
602 path=$LUSTRE/../lnet/selftest/$base$ext
607 if [[ -n "$path" ]]; then
608 # Try to load any non-Lustre modules that $module depends on.
609 for mod in $(modinfo --field=depends $path | tr ',' ' '); do
610 if ! module_is_loaded $mod; then
616 # If no module arguments were passed then get them from
617 # $MODOPTS_<MODULE>, otherwise from modprobe.conf.
618 if [ $# -eq 0 ]; then
619 # $MODOPTS_<MODULE>; we could use associative arrays, but that's
620 # not in Bash until 4.x, so we resort to eval.
621 optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
622 eval set -- \$$optvar
623 if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
624 # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
626 opt=$(awk -v var="^options $base" '$0 ~ var \
627 {gsub("'"options $base"'",""); print}' \
629 set -- $(echo -n $opt)
631 # Ensure we have accept=all for lnet
632 if [[ "$base" == lnet ]]; then
633 # OK, this is a bit wordy...
634 local arg accept_all_present=false
637 [[ "$arg" == accept=all ]] &&
638 accept_all_present=true
641 $accept_all_present || set -- "$@" accept=all
648 [ $# -gt 0 ] && echo "${module} options: '$*'"
650 # Note that insmod will ignore anything in modprobe.conf, which is why
651 # we're passing options on the command-line. If $path does not exist
652 # then we must be testing a "make install" or"rpm" installation. Also
653 # note that failing to load ptlrpc_gss is not considered fatal.
654 if [[ -n "$path" ]]; then
655 lustre_insmod $path "$@"
656 elif [[ "$base" == ptlrpc_gss ]]; then
657 if ! modprobe $base "$@" 2>/dev/null; then
658 echo "gss/krb5 is not supported"
665 load_modules_local() {
666 if [ -n "$MODPROBE" ]; then
668 echo "Using modprobe to load modules"
672 # Create special udev test rules on every node
673 if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
674 sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
676 echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
678 udevadm control --reload-rules
681 # For kmemleak-enabled kernels we need clear all past state
682 # that obviously has nothing to do with this Lustre run
683 # Disable automatic memory scanning to avoid perf hit.
684 if [ -f /sys/kernel/debug/kmemleak ] ; then
685 echo scan=off > /sys/kernel/debug/kmemleak || true
686 echo scan > /sys/kernel/debug/kmemleak || true
687 echo clear > /sys/kernel/debug/kmemleak || true
690 echo Loading modules from $LUSTRE
694 if [ -f /sys/devices/system/cpu/online ]; then
695 ncpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/online) + 1))
696 echo "detected $ncpus online CPUs by sysfs"
698 ncpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
700 if [ $rc -eq 0 ]; then
701 echo "detected $ncpus online CPUs by getconf"
703 echo "Can't detect number of CPUs"
708 # if there is only one CPU core, libcfs can only create one partition
709 # if there is more than 4 CPU cores, libcfs should create multiple CPU
710 # partitions. So we just force libcfs to create 2 partitions for
711 # system with 2 or 4 cores
712 local saved_opts="$MODOPTS_LIBCFS"
713 if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then
714 # force to enable multiple CPU partitions
715 echo "Force libcfs to create 2 CPU partitions"
716 MODOPTS_LIBCFS="cpu_npartitions=2 $MODOPTS_LIBCFS"
718 echo "libcfs will create CPU partition based on online CPUs"
721 load_module ../libcfs/libcfs/libcfs
722 # Prevent local MODOPTS_LIBCFS being passed as part of environment
723 # variable to remote nodes
724 MODOPTS_LIBCFS=$saved_opts
727 load_module ../lnet/lnet/lnet
729 LNDPATH=${LNDPATH:-"../lnet/klnds"}
730 if [ -z "$LNETLND" ]; then
732 o2ib*) LNETLND="o2iblnd/ko2iblnd" ;;
733 tcp*) LNETLND="socklnd/ksocklnd" ;;
734 *) local lnd="${NETTYPE%%[0-9]}lnd"
735 [ -f "$LNDPATH/$lnd/k$lnd.ko" ] &&
736 LNETLND="$lnd/k$lnd" ||
737 LNETLND="socklnd/ksocklnd"
740 load_module ../lnet/klnds/$LNETLND
741 load_module obdclass/obdclass
742 load_module ptlrpc/ptlrpc
743 load_module ptlrpc/gss/ptlrpc_gss
751 load_module obdecho/obdecho
752 if ! client_only; then
753 load_module lfsck/lfsck
754 [ "$LQUOTA" != "no" ] &&
755 load_module quota/lquota $LQUOTAOPTS
756 if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
757 load_module osd-zfs/osd_zfs
758 elif [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
759 load_module ../ldiskfs/ldiskfs
760 load_module osd-ldiskfs/osd_ldiskfs
772 load_module llite/lustre
773 [ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
775 rm -f $OGDB/ogdb-$HOSTNAME
776 $LCTL modules > $OGDB/ogdb-$HOSTNAME
778 # 'mount' doesn't look in $PATH, just sbin
779 local mount_lustre=$LUSTRE/utils/mount.lustre
780 if [ -f $mount_lustre ]; then
781 local sbin_mount=$(readlink -f /sbin)/mount.lustre
782 if grep -qw "$sbin_mount" /proc/mounts; then
783 cmp -s $mount_lustre $sbin_mount || umount $sbin_mount
785 if ! grep -qw "$sbin_mount" /proc/mounts; then
786 [ ! -f "$sbin_mount" ] && touch "$sbin_mount"
787 if [ ! -s "$sbin_mount" -a -w "$sbin_mount" ]; then
788 cat <<- EOF > "$sbin_mount"
791 echo "This $sbin_mount just a mountpoint." 1>&2
792 echo "It is never supposed to be run." 1>&2
793 logger -p emerg -- "using stub $sbin_mount $@"
796 chmod a+x $sbin_mount
798 mount --bind $mount_lustre $sbin_mount ||
799 error "can't bind $mount_lustre to $sbin_mount"
807 # load modules on remote nodes optionally
808 # lustre-tests have to be installed on these nodes
809 if $LOAD_MODULES_REMOTE; then
810 local list=$(comma_list $(remote_nodes_list))
811 if [ -n "$list" ]; then
812 echo "loading modules on: '$list'"
813 do_rpc_nodes "$list" load_modules_local
819 LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
820 LEAK_PORTALS=$(dmesg | tail -n 20 | egrep -i "libcfs.*memory leaked" || true)
821 if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
822 echo "$LEAK_LUSTRE" 1>&2
823 echo "$LEAK_PORTALS" 1>&2
824 mv $TMP/debug $TMP/debug-leak.`date +%s` || true
825 echo "Memory leaks detected"
826 [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
831 unload_modules_local() {
832 $LUSTRE_RMMOD ldiskfs || return 2
834 [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
835 rm /etc/udev/rules.d/99-lustre-test.rules
836 udevadm control --reload-rules
839 check_mem_leak || return 254
847 wait_exit_ST client # bug 12845
849 unload_modules_local || rc=$?
851 if $LOAD_MODULES_REMOTE; then
852 local list=$(comma_list $(remote_nodes_list))
853 if [ -n "$list" ]; then
854 echo "unloading modules on: '$list'"
855 do_rpc_nodes "$list" unload_modules_local
859 local sbin_mount=$(readlink -f /sbin)/mount.lustre
860 if grep -qe "$sbin_mount " /proc/mounts; then
861 umount $sbin_mount || true
862 [ -s $sbin_mount ] && ! grep -q "STUB MARK" $sbin_mount ||
866 [[ $rc -eq 0 ]] && echo "modules unloaded."
872 local facet=${1:-$SINGLEMDS}
875 case $(facet_fstype $facet) in
876 ldiskfs) size=50;; # largest seen is 44, leave some headroom
877 # grant_block_size is in bytes, allow at least 2x max blocksize
878 zfs) size=$(lctl get_param osc.$FSNAME*.import |
879 awk '/grant_block_size:/ {print $2/512; exit;}')
883 echo -n $((size * MDSCOUNT))
887 local facet=${1:-$SINGLEMDS}
888 local fstype=$(facet_fstype $facet)
891 ldiskfs) size=4;; # ~4KB per inode
892 zfs) size=11;; # 10 to 11KB per inode
898 check_gss_daemon_nodes() {
902 do_nodesv $list "num=\\\$(ps -o cmd -C $dname | grep $dname | wc -l);
903 if [ \\\"\\\$num\\\" -ne 1 ]; then
904 echo \\\$num instance of $dname;
909 check_gss_daemon_facet() {
913 num=`do_facet $facet ps -o cmd -C $dname | grep $dname | wc -l`
914 if [ $num -ne 1 ]; then
915 echo "$num instance of $dname on $facet"
924 echo Stopping $@ on $list
925 do_nodes $list "killall -2 $@ 2>/dev/null || true"
928 # start gss daemons on all nodes, or "daemon" on "nodes" if set
929 start_gss_daemons() {
933 if [ "$nodes" ] && [ "$daemon" ] ; then
934 echo "Starting gss daemon on nodes: $nodes"
935 do_nodes $nodes "$daemon" || return 8
939 nodes=$(comma_list $(mdts_nodes))
940 echo "Starting gss daemon on mds: $nodes"
942 # Start all versions, in case of switching
943 do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 1
945 do_nodes $nodes "$LSVCGSSD -v" || return 1
948 do_nodes $nodes "$LGSSD -v" || return 2
951 nodes=$(comma_list $(osts_nodes))
952 echo "Starting gss daemon on ost: $nodes"
954 # Start all versions, in case of switching
955 do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 3
957 do_nodes $nodes "$LSVCGSSD -v" || return 3
959 # starting on clients
961 local clients=${CLIENTS:-$HOSTNAME}
963 echo "Starting $LGSSD on clients $clients "
964 do_nodes $clients "$LGSSD -v" || return 4
967 # wait daemons entering "stable" status
971 # check daemons are running
973 nodes=$(comma_list $(mdts_nodes) $(osts_nodes))
974 check_gss_daemon_nodes $nodes lsvcgssd || return 5
976 nodes=$(comma_list $(mdts_nodes))
977 check_gss_daemon_nodes $nodes lgssd || return 6
980 check_gss_daemon_nodes $clients lgssd || return 7
985 local nodes=$(comma_list $(mdts_nodes))
987 send_sigint $nodes lsvcgssd lgssd
989 nodes=$(comma_list $(osts_nodes))
990 send_sigint $nodes lsvcgssd
992 nodes=${CLIENTS:-$HOSTNAME}
993 send_sigint $nodes lgssd
997 # Add mount flags for shared key
999 if grep -q skpath <<< "$mt_opts" ; then
1000 mt_opts=$(echo $mt_opts |
1001 sed -e "s#skpath=[^ ,]*#skpath=$SK_PATH#")
1003 if [ -z "$mt_opts" ]; then
1004 mt_opts="-o skpath=$SK_PATH"
1006 mt_opts="$mt_opts,skpath=$SK_PATH"
1016 /usr/lib/lustre/* | /usr/lib64/lustre/* | /usr/lib/lustre | \
1025 [ $from_tree = true ]
1029 if $SHARED_KEY; then
1038 if ! module_loaded ptlrpc_gss; then
1039 load_module ptlrpc/gss/ptlrpc_gss
1040 module_loaded ptlrpc_gss ||
1041 error_exit "init_gss: GSS=$GSS, but gss/krb5 missing"
1044 if $GSS_KRB5 || $GSS_SK; then
1045 start_gss_daemons || error_exit "start gss daemon failed! rc=$?"
1048 if $GSS_SK && ! $SK_NO_KEY; then
1049 echo "Loading basic SSK keys on all servers"
1050 do_nodes $(comma_list $(all_server_nodes)) \
1051 "lgss_sk -t server -l $SK_PATH/$FSNAME.key || true"
1052 do_nodes $(comma_list $(all_server_nodes)) \
1053 "keyctl show | grep lustre | cut -c1-11 |
1055 xargs -IX keyctl setperm X 0x3f3f3f3f"
1058 if $GSS_SK && $SK_NO_KEY; then
1059 local numclients=${1:-$CLIENTCOUNT}
1060 local clients=${CLIENTS:-$HOSTNAME}
1062 # security ctx config for keyring
1064 local lgssc_conf_file="/etc/request-key.d/lgssc.conf"
1066 if from_build_tree; then
1067 mkdir -p $SK_OM_PATH
1068 if grep -q request-key /proc/mounts > /dev/null; then
1069 echo "SSK: Request key already mounted."
1071 mount -o bind $SK_OM_PATH /etc/request-key.d/
1073 local lgssc_conf_line='create lgssc * * '
1074 lgssc_conf_line+=$(which lgss_keyring)
1075 lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
1076 echo "$lgssc_conf_line" > $lgssc_conf_file
1079 [ -e $lgssc_conf_file ] ||
1080 error_exit "Could not find key options in $lgssc_conf_file"
1081 echo "$lgssc_conf_file content is:"
1082 cat $lgssc_conf_file
1084 if ! local_mode; then
1085 if from_build_tree; then
1086 do_nodes $(comma_list $(all_nodes)) "mkdir -p \
1088 do_nodes $(comma_list $(all_nodes)) "mount \
1089 -o bind $SK_OM_PATH \
1090 /etc/request-key.d/"
1091 do_nodes $(comma_list $(all_nodes)) "rsync \
1092 -aqv $HOSTNAME:$lgssc_conf_file \
1093 $lgssc_conf_file >/dev/null 2>&1"
1095 do_nodes $(comma_list $(all_nodes)) \
1096 "echo $lgssc_conf_file: ; \
1097 cat $lgssc_conf_file"
1101 # create shared key on all nodes
1102 mkdir -p $SK_PATH/nodemap
1103 rm -f $SK_PATH/$FSNAME.key $SK_PATH/nodemap/c*.key \
1104 $SK_PATH/$FSNAME-*.key
1105 # for nodemap testing each client may need own key,
1106 # and S2S now requires keys as well, both for "client"
1109 lgss_sk -t server -f$FSNAME -n $SK_S2SNMCLI \
1110 -w $SK_PATH/$FSNAME-nmclient.key \
1111 -d /dev/urandom >/dev/null 2>&1
1112 lgss_sk -t mgs,server -f$FSNAME -n $SK_S2SNM \
1113 -w $SK_PATH/$FSNAME-s2s-server.key \
1114 -d /dev/urandom >/dev/null 2>&1
1117 lgss_sk -t server -f$FSNAME -w $SK_PATH/$FSNAME.key \
1118 -d /dev/urandom >/dev/null 2>&1
1120 for i in $(seq 0 $((numclients - 1))); do
1121 lgss_sk -t server -f$FSNAME -n c$i \
1122 -w $SK_PATH/nodemap/c$i.key -d /dev/urandom \
1126 if ! local_mode; then
1127 for lnode in $(all_nodes); do
1128 scp -r $SK_PATH ${lnode}:$(dirname $SK_PATH)/
1131 # Set client keys to client type to generate prime P
1133 do_nodes $(all_nodes) "lgss_sk -t client,server -m \
1134 $SK_PATH/$FSNAME.key >/dev/null 2>&1"
1136 do_nodes $clients "lgss_sk -t client -m \
1137 $SK_PATH/$FSNAME.key >/dev/null 2>&1"
1138 do_nodes $clients "find $SK_PATH/nodemap -name \*.key | \
1139 xargs -IX lgss_sk -t client -m X >/dev/null 2>&1"
1141 # This is required for servers as well, if S2S in use
1143 do_nodes $(comma_list $(mdts_nodes)) \
1144 "cp $SK_PATH/$FSNAME-s2s-server.key \
1145 $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \
1146 -t client -m $SK_PATH/$FSNAME-s2s-client.key \
1148 do_nodes $(comma_list $(osts_nodes)) \
1149 "cp $SK_PATH/$FSNAME-s2s-server.key \
1150 $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \
1151 -t client -m $SK_PATH/$FSNAME-s2s-client.key \
1153 do_nodes $clients "lgss_sk -t client \
1154 -m $SK_PATH/$FSNAME-nmclient.key \
1159 # mount options for servers and clients
1160 MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
1161 MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
1162 OST_MOUNT_OPTS=$(add_sk_mntflag $OST_MOUNT_OPTS)
1163 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
1165 if [ -z "$LGSS_KEYRING_DEBUG" ]; then
1166 LGSS_KEYRING_DEBUG=4
1170 if [ -n "$LGSS_KEYRING_DEBUG" ] && \
1171 ( local_mode || from_build_tree ); then
1173 sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG
1174 elif [ -n "$LGSS_KEYRING_DEBUG" ]; then
1175 do_nodes $(comma_list $(all_nodes)) "modprobe ptlrpc_gss && \
1177 sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG"
1184 # maybe cleanup credential cache?
1191 do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNM"
1192 do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNMCLI"
1193 $RPC_MODE || echo "Sleeping for 10 sec for Nodemap.."
1197 $RPC_MODE || echo "Cleaning up Shared Key.."
1198 do_nodes $(comma_list $(all_nodes)) "rm -f \
1199 $SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
1200 do_nodes $(comma_list $(all_nodes)) "keyctl show | \
1201 awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
1202 if from_build_tree; then
1203 # Remove the mount and clean up the files we added to
1205 do_nodes $(comma_list $(all_nodes)) "while grep -q \
1206 request-key.d /proc/mounts; do umount \
1207 /etc/request-key.d/; done"
1208 do_nodes $(comma_list $(all_nodes)) "rm -f \
1209 $SK_OM_PATH/lgssc.conf"
1210 do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
1218 local var=${facet}_svc
1226 echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9_]\+//' |
1227 tr '[:lower:]' '[:upper:]'
1233 if [ $facet == mgs ] || [ $facet == client ]; then
1237 echo -n $facet | sed -e 's/^fs[0-9]\+//' | sed -e 's/^[a-z]\+//'
1245 if [ -n "${!var}" ]; then
1250 var=$(facet_type $facet)FSTYPE
1251 if [ -n "${!var}" ]; then
1256 if [ -n "$FSTYPE" ]; then
1261 if [[ $facet == mgs ]] && combined_mgs_mds; then
1273 local facets=$(get_facets)
1276 for facet in ${facets//,/ }; do
1277 if [ $node == $(facet_host $facet) ] ||
1278 [ $node == "$(facet_failover_host $facet)" ]; then
1279 fstype=$(facet_fstype $facet)
1280 if [[ $fstypes != *$fstype* ]]; then
1281 fstypes+="${fstypes:+,}$fstype"
1290 local num=$(facet_number $facet)
1293 if [[ $(facet_type $facet) = OST ]]; then
1294 index=OSTINDEX${num}
1295 if [[ -n "${!index}" ]]; then
1300 index=${OST_INDICES[num - 1]}
1303 [[ -n "$index" ]] || index=$((num - 1))
1311 local fstype=$(facet_fstype $facet)
1315 label=$(do_facet ${facet} "$E2LABEL ${dev} 2>/dev/null");;
1317 label=$(do_facet ${facet} "$ZFS get -H -o value lustre:svname \
1318 ${dev} 2>/dev/null");;
1320 error "unknown fstype!";;
1327 # Get the device of a facet.
1334 mgs) device=$(mgsdevname) ;;
1335 mds*) device=$(mdsdevname $(facet_number $facet)) ;;
1336 ost*) device=$(ostdevname $(facet_number $facet)) ;;
1337 fs2mds) device=$(mdsdevname 1_2) ;;
1338 fs2ost) device=$(ostdevname 1_2) ;;
1339 fs3ost) device=$(ostdevname 2_2) ;;
1347 # Get the virtual device of a facet.
1354 mgs) device=$(mgsvdevname) ;;
1355 mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
1356 ost*) device=$(ostvdevname $(facet_number $facet)) ;;
1357 fs2mds) device=$(mdsvdevname 1_2) ;;
1358 fs2ost) device=$(ostvdevname 1_2) ;;
1359 fs3ost) device=$(ostvdevname 2_2) ;;
1367 local virt=$(virt-what 2> /dev/null)
1369 [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
1371 virt=$(dmidecode -s system-product-name | awk '{print $1}')
1374 VMware|KVM|VirtualBox|Parallels|Bochs)
1375 echo $virt | tr '[A-Z]' '[a-z]' ;;
1381 # Re-read the partition table on failover partner host.
1382 # After a ZFS storage pool is created on a shared device, the partition table
1383 # on the device may change. However, the operating system on the failover
1384 # host may not notice the change automatically. Without the up-to-date partition
1385 # block devices, 'zpool import ..' cannot find the labels, whose positions are
1386 # relative to partition rather than disk beginnings.
1388 # This function performs partprobe on the failover host to make it re-read the
1391 refresh_partition_table() {
1396 host=$(facet_passive_host $facet)
1397 if [[ -n "$host" ]]; then
1398 do_node $host "$PARTPROBE $device"
1403 # Get ZFS storage pool name.
1410 device=$(facet_device $facet)
1411 # poolname is string before "/"
1412 poolname="${device%%/*}"
1419 # Get ZFS local fsname.
1421 zfs_local_fsname() {
1423 local lfsname=$(basename $(facet_device $facet))
1429 # Create ZFS storage pool.
1436 local opts=${@:-"-o cachefile=none"}
1438 do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
1439 $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1440 $ZPOOL create -f $opts $poolname $vdev"
1444 # Create ZFS file system.
1450 local opts=${@:-"-o mountpoint=legacy"}
1452 do_facet $facet "$ZFS list -H $dataset >/dev/null 2>&1 ||
1453 $ZFS create $opts $dataset"
1457 # Export ZFS storage pool.
1458 # Before exporting the pool, all datasets within the pool should be unmounted.
1466 poolname=$(zpool_name $facet)
1468 if [[ -n "$poolname" ]]; then
1469 do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1470 grep -q ^$poolname/ /proc/mounts ||
1471 $ZPOOL export $opts $poolname"
1476 # Destroy ZFS storage pool.
1477 # Destroy the given pool and free up any devices for other use. This command
1478 # tries to unmount any active datasets before destroying the pool.
1479 # -f Force any active datasets contained within the pool to be unmounted.
1483 local poolname=${2:-$(zpool_name $facet)}
1485 if [[ -n "$poolname" ]]; then
1486 do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1487 $ZPOOL destroy -f $poolname"
1492 # Import ZFS storage pool.
1493 # Force importing, even if the pool appears to be potentially active.
1498 local opts=${@:-"-o cachefile=none -o failmode=panic"}
1501 poolname=$(zpool_name $facet)
1503 if [[ -n "$poolname" ]]; then
1504 opts+=" -d $(dirname $(facet_vdevice $facet))"
1505 do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
1506 $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1507 $ZPOOL import -f $opts $poolname"
1512 # Reimport ZFS storage pool with new name
1517 local opts="-o cachefile=none"
1518 local poolname=$(zpool_name $facet)
1520 opts+=" -d $(dirname $(facet_vdevice $facet))"
1521 do_facet $facet "$ZPOOL export $poolname;
1522 $ZPOOL import $opts $poolname $newpool"
1526 # Set the "cachefile=none" property on ZFS storage pool so that the pool
1527 # is not automatically imported on system startup.
1529 # In a failover environment, this will provide resource level fencing which
1530 # will ensure that the same ZFS storage pool will not be imported concurrently
1531 # on different nodes.
1533 disable_zpool_cache() {
1537 poolname=$(zpool_name $facet)
1539 if [[ -n "$poolname" ]]; then
1540 do_facet $facet "$ZPOOL set cachefile=none $poolname"
1545 # This and set_osd_param() shall be used to access OSD parameters
1546 # once existed under "obdfilter":
1551 # writethrough_cache_enable
1555 local device=${2:-$FSNAME-OST*}
1558 do_nodes $nodes "$LCTL get_param -n obdfilter.$device.$name \
1559 osd-*.$device.$name 2>&1" | grep -v 'error:'
1564 local device=${2:-$FSNAME-OST*}
1568 do_nodes $nodes "$LCTL set_param -n obdfilter.$device.$name=$value \
1569 osd-*.$device.$name=$value 2>&1" | grep -v 'error:'
1573 local dz=${1:-$DEBUG_SIZE}
1575 if [ -f /sys/devices/system/cpu/possible ]; then
1576 local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
1578 local cpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
1581 # bug 19944, adjust size to be -gt num_possible_cpus()
1582 # promise 2MB for every cpu at least
1583 if [ -n "$cpus" ] && [ $((cpus * 2)) -gt $dz ]; then
1586 lctl set_param debug_mb=$dz
1589 set_default_debug () {
1590 local debug=${1:-"$PTLDEBUG"}
1591 local subsys=${2:-"$SUBSYSTEM"}
1592 local debug_size=${3:-$DEBUG_SIZE}
1594 [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
1595 [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
1597 [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
1600 set_default_debug_nodes () {
1602 local debug="${2:-"$PTLDEBUG"}"
1603 local subsys="${3:-"$SUBSYSTEM"}"
1604 local debug_size="${4:-$DEBUG_SIZE}"
1606 if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
1607 nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
1611 [[ -z "$nodes" ]] ||
1612 do_rpc_nodes "$nodes" set_default_debug \
1613 \\\"$debug\\\" \\\"$subsys\\\" $debug_size || true
1616 set_default_debug_facet () {
1618 local debug="${2:-"$PTLDEBUG"}"
1619 local subsys="${3:-"$SUBSYSTEM"}"
1620 local debug_size="${4:-$DEBUG_SIZE}"
1621 local node=$(facet_active_host $facet)
1623 [ -n "$node" ] || error "No host defined for facet $facet"
1625 set_default_debug_nodes $node "$debug" "$subsys" $debug_size
1628 set_params_nodes () {
1629 [[ $# -ge 2 ]] || return 0
1633 do_nodes $nodes $LCTL set_param $@
1636 set_params_clients () {
1637 local clients=${1:-$CLIENTS}
1638 local params=${2:-$CLIENT_LCTL_SETPARAM_PARAM}
1640 [[ -n $params ]] || return 0
1641 set_params_nodes $clients $params
1645 local hostid=${1:-$(hostid)}
1647 if [ ! -s /etc/hostid ]; then
1648 printf $(echo -n $hostid |
1649 sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/') >/etc/hostid
1655 local facets=${1:-$(get_facets)}
1658 for facet in ${facets//,/ }; do
1661 [ $RC -eq 0 ] && continue
1663 if [ "$TESTSUITE.$TESTNAME" = "replay-dual.test_0a" ]; then
1664 skip_noexit "Restart of $facet failed!." &&
1667 error "Restart of $facet failed!"
1674 # Add argument "arg" (e.g., "loop") to the comma-separated list
1675 # of arguments for option "opt" (e.g., "-o") on command
1676 # line "opts" (e.g., "-o flock").
1682 local opt_pattern="\([[:space:]]\+\|^\)$opt"
1684 if echo "$opts" | grep -q $opt_pattern; then
1685 opts=$(echo "$opts" | sed -e \
1686 "s/$opt_pattern[[:space:]]*[^[:space:]]\+/&,$arg/")
1688 opts+="${opts:+ }$opt $arg"
1694 # Associate loop device with a given regular file.
1695 # Return the loop device.
1697 setup_loop_device() {
1701 do_facet $facet "loop_dev=\\\$($LOSETUP -j $file | cut -d : -f 1);
1702 if [[ -z \\\$loop_dev ]]; then
1703 loop_dev=\\\$($LOSETUP -f);
1704 $LOSETUP \\\$loop_dev $file || loop_dev=;
1706 echo -n \\\$loop_dev"
1710 # Detach a loop device.
1712 cleanup_loop_device() {
1716 do_facet $facet "! $LOSETUP $loop_dev >/dev/null 2>&1 ||
1717 $LOSETUP -d $loop_dev"
1721 # Check if a given device is a block device.
1728 [[ -n "$dev" ]] || return 1
1729 do_facet $facet "test -b $dev" || return 1
1730 if [[ -n "$size" ]]; then
1731 local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
1732 count=1 skip=$size 2>&1" |
1733 awk '($3 == "in") { print $1 }')
1734 [[ "$in" = "1+0" ]] || return 1
1739 # Check if a given device is a device-mapper device.
1745 [[ -n "$dev" ]] || return 1
1746 do_facet $facet "$DMSETUP status $dev >/dev/null 2>&1"
1750 # Check if a given device is a device-mapper flakey device.
1752 is_dm_flakey_dev() {
1757 [[ -n "$dev" ]] || return 1
1759 type=$(do_facet $facet "$DMSETUP status $dev 2>&1" |
1761 [[ $type = flakey ]] && return 0 || return 1
1765 # Check if device-mapper flakey device is supported by the kernel
1766 # of $facet node or not.
1768 dm_flakey_supported() {
1772 do_facet $facet "modprobe dm-flakey;
1773 $DMSETUP targets | grep -q flakey" &> /dev/null
1777 # Get the device-mapper flakey device name of a given facet.
1779 dm_facet_devname() {
1781 [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
1783 echo -n ${facet}_flakey
1787 # Get the device-mapper flakey device of a given facet.
1788 # A device created by dmsetup will appear as /dev/mapper/<device-name>.
1790 dm_facet_devpath() {
1793 echo -n $DM_DEV_PATH/$(dm_facet_devname $facet)
1797 # Set a device-mapper device with a new table.
1799 # The table has the following format:
1800 # <logical_start_sector> <num_sectors> <target_type> <target_args>
1802 # flakey <target_args> includes:
1803 # <destination_device> <offset> <up_interval> <down_interval> \
1804 # [<num_features> [<feature_arguments>]]
1806 # linear <target_args> includes:
1807 # <destination_device> <start_sector>
1809 dm_set_dev_table() {
1812 local target_type=$3
1818 read tmp num_sectors tmp real_dev tmp \
1819 <<< $(do_facet $facet "$DMSETUP table $dm_dev")
1821 case $target_type in
1823 table="0 $num_sectors flakey $real_dev 0 0 1800 1 drop_writes"
1826 table="0 $num_sectors linear $real_dev 0"
1828 *) error "invalid target type $target_type" ;;
1831 do_facet $facet "$DMSETUP suspend --nolockfs --noflush $dm_dev" ||
1832 error "failed to suspend $dm_dev"
1833 do_facet $facet "$DMSETUP load $dm_dev --table \\\"$table\\\"" ||
1834 error "failed to load $target_type table into $dm_dev"
1835 do_facet $facet "$DMSETUP resume $dm_dev" ||
1836 error "failed to resume $dm_dev"
1840 # Set a device-mapper flakey device as "read-only" by using the "drop_writes"
1841 # feature parameter.
1844 # All write I/O is silently ignored.
1845 # Read I/O is handled correctly.
1847 dm_set_dev_readonly() {
1849 local dm_dev=${2:-$(dm_facet_devpath $facet)}
1851 dm_set_dev_table $facet $dm_dev flakey
1855 # Set a device-mapper device to traditional linear mapping mode.
1857 dm_clear_dev_readonly() {
1859 local dm_dev=${2:-$(dm_facet_devpath $facet)}
1861 dm_set_dev_table $facet $dm_dev linear
1865 # Set the device of a given facet as "read-only".
1867 set_dev_readonly() {
1869 local svc=${facet}_svc
1871 if [[ $(facet_fstype $facet) = zfs ]] ||
1872 ! dm_flakey_supported $facet; then
1873 do_facet $facet $LCTL --device ${!svc} readonly
1875 dm_set_dev_readonly $facet
1880 # Get size in 512-byte sectors (BLKGETSIZE64 / 512) of a given device.
1887 num_sectors=$(do_facet $facet "blockdev --getsz $dev 2>/dev/null")
1888 [[ ${PIPESTATUS[0]} = 0 && -n "$num_sectors" ]] || num_sectors=0
1889 echo -n $num_sectors
1893 # Create a device-mapper device with a given block device or regular file (will
1894 # be associated with loop device).
1895 # Return the full path of the device-mapper device.
1899 local real_dev=$2 # destination device
1900 local dm_dev_name=${3:-$(dm_facet_devname $facet)} # device name
1901 local dm_dev=$DM_DEV_PATH/$dm_dev_name # device-mapper device
1903 # check if the device-mapper device to be created already exists
1904 if is_dm_dev $facet $dm_dev; then
1905 # if the existing device was set to "read-only", then clear it
1906 ! is_dm_flakey_dev $facet $dm_dev ||
1907 dm_clear_dev_readonly $facet $dm_dev
1913 # check if the destination device is a block device, and if not,
1914 # associate it with a loop device
1915 is_blkdev $facet $real_dev ||
1916 real_dev=$(setup_loop_device $facet $real_dev)
1917 [[ -n "$real_dev" ]] || { echo -n $real_dev; return 2; }
1919 # now create the device-mapper device
1920 local num_sectors=$(get_num_sectors $facet $real_dev)
1921 local table="0 $num_sectors linear $real_dev 0"
1924 do_facet $facet "$DMSETUP create $dm_dev_name --table \\\"$table\\\"" ||
1925 { rc=${PIPESTATUS[0]}; dm_dev=; }
1926 do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
1933 # Map the facet name to its device variable name.
1935 facet_device_alias() {
1937 local dev_alias=$facet
1940 fs2mds) dev_alias=mds1_2 ;;
1941 fs2ost) dev_alias=ost1_2 ;;
1942 fs3ost) dev_alias=ost2_2 ;;
1950 # Save the original value of the facet device and export the new value.
1956 local active_facet=$(facet_active $facet)
1957 local dev_alias=$(facet_device_alias $active_facet)
1958 local dev_name=${dev_alias}_dev
1959 local dev=${!dev_name}
1961 if [[ $active_facet = $facet ]]; then
1962 local failover_dev=${dev_alias}failover_dev
1963 if [[ ${!failover_dev} = $dev ]]; then
1964 eval export ${failover_dev}_saved=$dev
1965 eval export ${failover_dev}=$dm_dev
1968 dev_alias=$(facet_device_alias $facet)
1969 local facet_dev=${dev_alias}_dev
1970 if [[ ${!facet_dev} = $dev ]]; then
1971 eval export ${facet_dev}_saved=$dev
1972 eval export ${facet_dev}=$dm_dev
1976 eval export ${dev_name}_saved=$dev
1977 eval export ${dev_name}=$dm_dev
1981 # Restore the saved value of the facet device.
1986 [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
1987 local dev_alias=$(facet_device_alias $facet)
1989 local saved_dev=${dev_alias}_dev_saved
1990 [[ -z ${!saved_dev} ]] ||
1991 eval export ${dev_alias}_dev=${!saved_dev}
1993 saved_dev=${dev_alias}failover_dev_saved
1994 [[ -z ${!saved_dev} ]] ||
1995 eval export ${dev_alias}failover_dev=${!saved_dev}
1999 # Remove a device-mapper device.
2000 # If the destination device is a loop device, then also detach it.
2004 local dm_dev=${2:-$(dm_facet_devpath $facet)}
2008 is_dm_dev $facet $dm_dev || return 0
2010 read major minor <<< $(do_facet $facet "$DMSETUP table $dm_dev" |
2011 awk '{ print $4 }' | awk -F: '{ print $1" "$2 }')
2013 do_facet $facet "$DMSETUP remove $dm_dev"
2014 do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
2016 unexport_dm_dev $facet
2018 # detach a loop device
2019 [[ $major -ne 7 ]] || cleanup_loop_device $facet /dev/loop$minor
2021 # unload dm-flakey module
2022 do_facet $facet "modprobe -r dm-flakey" || true
2028 local active_facet=$(facet_active $facet)
2029 local dev_alias=$(facet_device_alias $active_facet)
2030 local dev=${dev_alias}_dev
2031 local opt=${facet}_opt
2032 local mntpt=$(facet_mntpt $facet)
2033 local opts="${!opt} $@"
2034 local fstype=$(facet_fstype $facet)
2036 local dm_dev=${!dev}
2038 module_loaded lustre || load_modules
2042 if dm_flakey_supported $facet; then
2043 dm_dev=$(dm_create_dev $facet ${!dev})
2044 [[ -n "$dm_dev" ]] || dm_dev=${!dev}
2047 is_blkdev $facet $dm_dev || opts=$(csa_add "$opts" -o loop)
2049 devicelabel=$(do_facet ${facet} "$E2LABEL $dm_dev");;
2051 # import ZFS storage pool
2052 import_zpool $facet || return ${PIPESTATUS[0]}
2054 devicelabel=$(do_facet ${facet} "$ZFS get -H -o value \
2055 lustre:svname $dm_dev");;
2057 error "unknown fstype!";;
2060 echo "Starting ${facet}: $opts $dm_dev $mntpt"
2061 # for testing LU-482 error handling in mount_facets() and test_0a()
2062 if [ -f $TMP/test-lu482-trigger ]; then
2066 "mkdir -p $mntpt; $MOUNT_CMD $opts $dm_dev $mntpt"
2070 if [ $RC -ne 0 ]; then
2071 echo "Start of $dm_dev on ${facet} failed ${RC}"
2075 health=$(do_facet ${facet} "$LCTL get_param -n health_check")
2076 if [[ "$health" != "healthy" ]]; then
2077 error "$facet is in a unhealthy state"
2080 set_default_debug_facet $facet
2082 if [[ $opts =~ .*nosvc.* ]]; then
2083 echo "Start $dm_dev without service"
2088 wait_update_facet ${facet} "$E2LABEL $dm_dev \
2089 2>/dev/null | grep -E ':[a-zA-Z]{3}[0-9]{4}'" \
2090 "" || error "$dm_dev failed to initialize!";;
2092 wait_update_facet ${facet} "$ZFS get -H -o value \
2093 lustre:svname $dm_dev 2>/dev/null | \
2094 grep -E ':[a-zA-Z]{3}[0-9]{4}'" "" ||
2095 error "$dm_dev failed to initialize!";;
2098 error "unknown fstype!";;
2102 # commit the device label change to disk
2103 if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
2104 echo "Commit the device label on ${!dev}"
2105 do_facet $facet "sync; sleep 1; sync"
2109 label=$(devicelabel ${facet} $dm_dev)
2110 [ -z "$label" ] && echo no label for $dm_dev && exit 1
2111 eval export ${facet}_svc=${label}
2112 echo Started ${label}
2114 export_dm_dev $facet $dm_dev
2119 # start facet device options
2125 local dev_alias=$(facet_device_alias $facet)
2127 eval export ${dev_alias}_dev=${device}
2128 eval export ${facet}_opt=\"$@\"
2130 local varname=${dev_alias}failover_dev
2131 if [ -n "${!varname}" ] ; then
2132 eval export ${dev_alias}failover_dev=${!varname}
2134 eval export ${dev_alias}failover_dev=$device
2137 local mntpt=$(facet_mntpt $facet)
2138 do_facet ${facet} mkdir -p $mntpt
2139 eval export ${facet}_MOUNT=$mntpt
2140 mount_facet ${facet}
2150 local HOST=$(facet_active_host $facet)
2151 [[ -z $HOST ]] && echo stop: no host for $facet && return 0
2153 local mntpt=$(facet_mntpt $facet)
2154 running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts || true")
2155 if [ ${running} -ne 0 ]; then
2156 echo "Stopping $mntpt (opts:$@) on $HOST"
2157 do_facet ${facet} $UMOUNT $@ $mntpt
2160 # umount should block, but we should wait for unrelated obd's
2161 # like the MGS or MGC to also stop.
2162 wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
2164 if [[ $(facet_fstype $facet) == zfs ]]; then
2165 # export ZFS storage pool
2166 [ "$KEEP_ZPOOL" = "true" ] || export_zpool $facet
2167 elif dm_flakey_supported $facet; then
2168 local host=${facet}_HOST
2169 local failover_host=${facet}failover_HOST
2170 if [[ -n ${!failover_host} && ${!failover_host} != ${!host} ]]||
2171 $CLEANUP_DM_DEV || [[ $facet = fs* ]]; then
2172 dm_cleanup_dev $facet
2177 # get mdt quota type
2179 local varsvc=${SINGLEMDS}_svc
2180 do_facet $SINGLEMDS $LCTL get_param -n \
2181 osd-$(facet_fstype $SINGLEMDS).${!varsvc}.quota_slave.enabled
2184 # get ost quota type
2186 # All OSTs should have same quota type
2187 local varsvc=ost1_svc
2188 do_facet ost1 $LCTL get_param -n \
2189 osd-$(facet_fstype ost1).${!varsvc}.quota_slave.enabled
2192 # restore old quota type settings
2194 if [ "$old_MDT_QUOTA_TYPE" ]; then
2195 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2196 do_facet mgs $PERM_CMD \
2197 osd-*.$FSNAME-MDT*.quota_slave.enable = \
2200 do_facet mgs $PERM_CMD \
2201 $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
2204 if [ "$old_OST_QUOTA_TYPE" ]; then
2205 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2206 do_facet mgs $PERM_CMD \
2207 osd-*.$FSNAME-OST*.quota_slave.enable = \
2210 do_facet mgs $LCTL conf_param \
2211 $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
2216 # Handle the case when there is a space in the lfs df
2217 # "filesystem summary" line the same as when there is no space.
2218 # This will allow fixing the "lfs df" summary line in the future.
2220 $LFS df $* | sed -e 's/filesystem /filesystem_/'
2223 # Get free inodes on the MDT specified by mdt index, free indoes on
2224 # the whole filesystem will be returned when index == -1.
2230 if [ $index -eq -1 ]; then
2233 mdt_uuid=$(mdtuuid_from_index $index)
2236 free_inodes=$(lfs_df -i $MOUNT | grep $mdt_uuid | awk '{print $4}')
2241 # Get the OST device status from 'lfs df' with a given OST index.
2245 local mnt_pnt=${2:-$MOUNT}
2249 ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt)
2250 lfs_df $opts $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
2256 # save old quota type & set new quota type
2257 local mdt_qtype=$(mdt_quota_type)
2258 local ost_qtype=$(ost_quota_type)
2260 echo "[HOST:$HOSTNAME] [old_mdt_qtype:$mdt_qtype]" \
2261 "[old_ost_qtype:$ost_qtype] [new_qtype:$QUOTA_TYPE]"
2263 export old_MDT_QUOTA_TYPE=$mdt_qtype
2264 export old_OST_QUOTA_TYPE=$ost_qtype
2266 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2267 do_facet mgs $PERM_CMD \
2268 osd-*.$FSNAME-MDT*.quota_slave.enable=$QUOTA_TYPE
2269 do_facet mgs $PERM_CMD \
2270 osd-*.$FSNAME-OST*.quota_slave.enable=$QUOTA_TYPE
2272 do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE ||
2273 error "set mdt quota type failed"
2274 do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
2275 error "set ost quota type failed"
2278 local quota_usrs=$QUOTA_USERS
2280 # get_filesystem_size
2281 local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
2282 local blk_soft=$((disksz + 1024))
2283 local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
2285 local inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
2286 local i_soft=$inodes
2287 local i_hard=$((i_soft + i_soft / 20))
2289 echo "Total disk size: $disksz block-softlimit: $blk_soft" \
2290 "block-hardlimit: $blk_hard inode-softlimit: $i_soft" \
2291 "inode-hardlimit: $i_hard"
2294 for usr in $quota_usrs; do
2295 echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
2297 cmd="$LFS setquota -$type $usr -b $blk_soft"
2298 cmd="$cmd -B $blk_hard -i $i_soft -I $i_hard $mntpt"
2300 eval $cmd || error "$cmd FAILED!"
2302 # display the quota status
2303 echo "Quota settings for $usr : "
2304 $LFS quota -v -u $usr $mntpt || true
2311 local opts=${3:-$MOUNT_OPTS}
2312 opts=${opts:+-o $opts}
2313 local flags=${4:-$MOUNT_FLAGS}
2315 local device=$MGSNID:/$FSNAME$FILESET
2316 if [ -z "$mnt" -o -z "$FSNAME" ]; then
2317 echo "Bad mount command: opt=$flags $opts dev=$device " \
2323 # update mount option with skpath
2324 opts=$(add_sk_mntflag $opts)
2327 echo "Starting client: $client: $flags $opts $device $mnt"
2328 do_node $client mkdir -p $mnt
2329 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
2330 do_node $client $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
2332 #disable FILESET if not supported
2333 do_nodes $client lctl get_param -n \
2334 mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
2335 device=$MGSNID:/$FSNAME
2336 do_node $client mkdir -p $mnt/$FILESET
2337 do_node $client "! grep -q $mnt' ' /proc/mounts ||
2340 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2341 # Mount using nodemap key
2342 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2343 if $SK_UNIQUE_NM; then
2344 mountkey=$SK_PATH/nodemap/c0.key
2346 local prunedopts=$(echo $opts |
2347 sed -e "s#skpath=[^,^ ]*#skpath=$mountkey#g")
2348 do_node $client $MOUNT_CMD $flags $prunedopts $device $mnt ||
2351 do_node $client $MOUNT_CMD $flags $opts $device $mnt ||
2355 set_default_debug_nodes $client
2356 set_params_clients $client
2367 local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
2369 [ "$3" ] && force=-f
2370 [ $running -eq 0 ] && return 0
2372 echo "Stopping client $client $mnt (opts:$force)"
2373 do_node $client lsof -t $mnt || need_kill=no
2374 if [ "x$force" != "x" ] && [ "x$need_kill" != "xno" ]; then
2375 pids=$(do_node $client lsof -t $mnt | sort -u);
2376 if [ -n "$pids" ]; then
2377 do_node $client kill -9 $pids || true
2381 busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") ||
2383 if [ $busy -ne 0 ] ; then
2384 echo "$mnt is still busy, wait one second" && sleep 1
2385 do_node $client umount $force $mnt
2389 # Mount the file system on the MDS
2390 mount_mds_client() {
2391 local mds_HOST=${SINGLEMDS}_HOST
2393 zconf_mount $mds1_HOST $MOUNT2 $MOUNT_OPTS ||
2394 error "unable to mount $MOUNT2 on MDS"
2397 # Unmount the file system on the MDS
2398 umount_mds_client() {
2399 local mds_HOST=${SINGLEMDS}_HOST
2400 zconf_umount $mds1_HOST $MOUNT2
2401 do_facet $SINGLEMDS "rmdir $MOUNT2"
2404 # nodes is comma list
2405 sanity_mount_check_nodes () {
2411 # FIXME: assume that all cluster nodes run the same os
2412 [ "$(uname)" = Linux ] || return 0
2415 for mnt in $mnts ; do
2416 do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts);
2417 mpts=\\\$(mount | grep -c $mnt' ');
2418 if [ \\\$running -ne \\\$mpts ]; then
2419 echo \\\$(hostname) env are INSANE!;
2422 [ $? -eq 0 ] || rc=1
2427 sanity_mount_check_servers () {
2428 [ -n "$CLIENTONLY" ] &&
2429 { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
2430 echo Checking servers environments
2432 # FIXME: modify get_facets to display all facets wo params
2433 local facets="$(get_facets OST),$(get_facets MDS),mgs"
2437 for facet in ${facets//,/ }; do
2438 node=$(facet_host ${facet})
2439 mntpt=$(facet_mntpt $facet)
2440 sanity_mount_check_nodes $node $mntpt ||
2441 { error "server $node environments are insane!"; return 1; }
2445 sanity_mount_check_clients () {
2446 local clients=${1:-$CLIENTS}
2447 local mntpt=${2:-$MOUNT}
2448 local mntpt2=${3:-$MOUNT2}
2450 [ -z $clients ] && clients=$(hostname)
2451 echo Checking clients $clients environments
2453 sanity_mount_check_nodes $clients $mntpt $mntpt2 ||
2454 error "clients environments are insane!"
2457 sanity_mount_check () {
2458 sanity_mount_check_servers || return 1
2459 sanity_mount_check_clients || return 2
2462 # mount clients if not mouted
2463 zconf_mount_clients() {
2466 local opts=${3:-$MOUNT_OPTS}
2467 opts=${opts:+-o $opts}
2468 local flags=${4:-$MOUNT_FLAGS}
2469 local device=$MGSNID:/$FSNAME$FILESET
2470 if [ -z "$mnt" -o -z "$FSNAME" ]; then
2471 echo "Bad conf mount command: opt=$flags $opts dev=$device " \
2476 echo "Starting client $clients: $flags $opts $device $mnt"
2477 do_nodes $clients mkdir -p $mnt
2478 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
2479 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2480 # Mount with own nodemap key
2482 # Mount all server nodes first with per-NM keys
2483 for nmclient in ${clients//,/ }; do
2484 # do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key -n c$i"
2485 do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key"
2488 # set perms for per-nodemap keys else permission denied
2489 do_nodes $(comma_list $(all_nodes)) \
2490 "keyctl show | grep lustre | cut -c1-11 |
2492 xargs -IX keyctl setperm X 0x3f3f3f3f"
2493 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2495 for nmclient in ${clients//,/ }; do
2496 if $SK_UNIQUE_NM; then
2497 mountkey=$SK_PATH/nodemap/c$i.key
2499 do_node $nmclient "! grep -q $mnt' ' \
2500 /proc/mounts || umount $mnt"
2501 local prunedopts=$(add_sk_mntflag $prunedopts);
2502 prunedopts=$(echo $prunedopts | sed -e \
2503 "s#skpath=[^ ^,]*#skpath=$mountkey#g")
2505 do_nodes $(comma_list $(all_server_nodes)) \
2508 do_node $nmclient $MOUNT_CMD $flags \
2509 $prunedopts $MGSNID:/$FSNAME $mnt ||
2514 do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
2516 do_nodes $clients $MOUNT_CMD $flags $opts \
2517 $MGSNID:/$FSNAME $mnt || return 1
2519 #disable FILESET if not supported
2520 do_nodes $clients lctl get_param -n \
2521 mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
2522 device=$MGSNID:/$FSNAME
2523 do_nodes $clients mkdir -p $mnt/$FILESET
2524 do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
2528 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2529 # Mount with nodemap key
2531 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2532 for nmclient in ${clients//,/ }; do
2533 if $SK_UNIQUE_NM; then
2534 mountkey=$SK_PATH/nodemap/c$i.key
2536 local prunedopts=$(echo $opts | sed -e \
2537 "s#skpath=[^ ^,]*#skpath=$mountkey#g");
2538 do_node $nmclient "! grep -q $mnt' ' /proc/mounts ||
2541 running=\\\$(mount | grep -c $mnt' ');
2543 if [ \\\$running -eq 0 ] ; then
2545 $MOUNT_CMD $flags $prunedopts $device $mnt;
2548 lustre_mnt_count=\\\$(mount | grep $mnt' ' | \
2549 grep 'type lustre' | wc -l);
2550 if [ \\\$running -ne \\\$lustre_mnt_count ] ; then
2551 echo zconf_mount_clients FAILED: \
2552 mount count \\\$running, not matching \
2553 with mount count of 'type lustre' \
2554 \\\$lustre_mnt_count;
2558 exit \\\$rc" || return ${PIPESTATUS[0]}
2565 if $SHARED_KEY; then
2566 tmpopts=$(add_sk_mntflag $opts)
2569 running=\\\$(mount | grep -c $mnt' ');
2571 if [ \\\$running -eq 0 ] ; then
2573 $MOUNT_CMD $flags $tmpopts $device $mnt;
2576 exit \\\$rc" || return ${PIPESTATUS[0]}
2579 echo "Started clients $clients: "
2580 do_nodes $clients "mount | grep $mnt' '"
2582 set_default_debug_nodes $clients
2583 set_params_clients $clients
2588 zconf_umount_clients() {
2593 [ "$3" ] && force=-f
2595 echo "Stopping clients: $clients $mnt (opts:$force)"
2596 do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts);
2597 if [ \\\$running -ne 0 ] ; then
2598 echo Stopping client \\\$(hostname) $mnt opts:$force;
2599 lsof $mnt || need_kill=no;
2600 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
2601 pids=\\\$(lsof -t $mnt | sort -u);
2602 if [ -n \\\"\\\$pids\\\" ]; then
2606 while umount $force $mnt 2>&1 | grep -q "busy"; do
2607 echo "$mnt is still busy, wait one second" && sleep 1;
2614 echo + $POWER_DOWN $node
2618 shutdown_node_hard () {
2620 local attempts=$SHUTDOWN_ATTEMPTS
2622 for i in $(seq $attempts) ; do
2625 wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
2626 echo "waiting for $host to fail attempts=$attempts"
2627 [ $i -lt $attempts ] || \
2628 { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
2634 local mnt=${2:-$MOUNT}
2637 if [ "$FAILURE_MODE" = HARD ]; then
2638 shutdown_node_hard $client
2640 zconf_umount_clients $client $mnt -f
2646 local facets="$(get_facets OST),$(get_facets MDS)"
2649 combined_mgs_mds || facets="$facets,mgs"
2651 for facet in ${facets//,/ }; do
2652 if [ $(facet_active_host $facet) == $host ]; then
2653 affected="$affected $facet"
2657 echo $(comma_list $affected)
2662 local host=${2:-$(facet_host $facet)}
2664 local label=$(convert_facet2label $facet)
2665 do_node $host $LCTL dl | awk '{ print $4 }' | grep -q "^$label\$"
2668 facets_up_on_host () {
2670 local facets=$(facets_on_host $host)
2673 for facet in ${facets//,/ }; do
2674 if $(facet_up $facet $host); then
2675 affected_up="$affected_up $facet"
2679 echo $(comma_list $affected_up)
2684 local affected_facet
2685 local affected_facets
2687 if [[ "$FAILURE_MODE" = HARD ]]; then
2688 if [[ $(facet_fstype $facet) = ldiskfs ]] &&
2689 dm_flakey_supported $facet; then
2690 affected_facets=$(affected_facets $facet)
2691 for affected_facet in ${affected_facets//,/ }; do
2692 unexport_dm_dev $affected_facet
2696 shutdown_node_hard $(facet_active_host $facet)
2704 echo + $POWER_UP $node
2717 local node=$(facet_active_host $facet)
2719 if [ "$FAILURE_MODE" = HARD ]; then
2729 if [ "$FAILURE_MODE" = HARD ]; then
2732 if $LOAD_MODULES_REMOTE; then
2733 echo "loading modules on $node: $facet"
2734 do_rpc_nodes $node load_modules_local
2743 for facet in ${facets//,/ }; do
2744 hosts=$(expand_list $hosts $(facet_host $facet) )
2750 _check_progs_installed () {
2754 for prog in $progs; do
2755 if ! [ "$(which $prog)" -o "${!prog}" ]; then
2756 echo $prog missing on $(hostname)
2763 check_progs_installed () {
2767 do_rpc_nodes "$nodes" _check_progs_installed $@
2770 # recovery-scale functions
2772 echo __$(echo $1 | tr '-' '_' | tr '.' '_')
2775 start_client_load() {
2778 local var=$(node_var_name $client)_load
2779 eval export ${var}=$load
2781 do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
2782 BREAK_ON_ERROR=$BREAK_ON_ERROR \
2783 END_RUN_FILE=$END_RUN_FILE \
2784 LOAD_PID_FILE=$LOAD_PID_FILE \
2785 TESTLOG_PREFIX=$TESTLOG_PREFIX \
2786 TESTNAME=$TESTNAME \
2787 DBENCH_LIB=$DBENCH_LIB \
2788 DBENCH_SRC=$DBENCH_SRC \
2789 CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
2794 MPIRUN_OPTIONS=\\\"$MPIRUN_OPTIONS\\\" \
2795 MACHINEFILE_OPTION=\\\"$MACHINEFILE_OPTION\\\" \
2796 num_clients=$(get_node_count ${CLIENTS//,/ }) \
2797 ior_THREADS=$ior_THREADS ior_iteration=$ior_iteration \
2798 ior_blockSize=$ior_blockSize \
2799 ior_blockUnit=$ior_blockUnit \
2800 ior_xferSize=$ior_xferSize ior_type=$ior_type \
2801 ior_DURATION=$ior_DURATION \
2802 ior_stripe_params=\\\"$ior_stripe_params\\\" \
2803 ior_custom_params=\\\"$ior_custom_param\\\" \
2804 mpi_ior_custom_threads=$mpi_ior_custom_threads \
2807 log "Started client load: ${load} on $client"
2809 # get the children process IDs
2810 local pids=$(ps --ppid $ppid -o pid= | xargs)
2811 CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
2815 start_client_loads () {
2816 local -a clients=(${1//,/ })
2817 local numloads=${#CLIENT_LOADS[@]}
2819 for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
2820 local load=$((nodenum % numloads))
2821 start_client_load ${clients[nodenum]} ${CLIENT_LOADS[load]}
2823 # bug 22169: wait the background threads to start
2827 # only for remote client
2828 check_client_load () {
2830 local var=$(node_var_name $client)_load
2831 local testload=run_${!var}.sh
2833 ps auxww | grep -v grep | grep $client | grep -q $testload || return 1
2835 # bug 18914: try to connect several times not only when
2836 # check ps, but while check_node_health also
2840 while [ $RC = 254 -a $tries -gt 0 ]; do
2844 if ! check_node_health $client; then
2846 if [ $RC -eq 254 ]; then
2847 # FIXME: not sure how long we shuold sleep here
2851 echo "check node health failed: RC=$RC "
2855 # We can continue try to connect if RC=254
2856 # Just print the warning about this
2857 if [ $RC = 254 ]; then
2858 echo "got a return status of $RC from do_node while checking " \
2859 "node health on $client"
2862 # see if the load is still on the client
2865 while [ $RC = 254 -a $tries -gt 0 ]; do
2869 if ! do_node $client \
2870 "ps auxwww | grep -v grep | grep -q $testload"; then
2875 if [ $RC = 254 ]; then
2876 echo "got a return status of $RC from do_node while checking " \
2877 "(node health and 'ps') the client load on $client"
2878 # see if we can diagnose a bit why this is
2883 check_client_loads () {
2884 local clients=${1//,/ }
2888 for client in $clients; do
2889 check_client_load $client
2891 if [ "$rc" != 0 ]; then
2892 log "Client load failed on node $client, rc=$rc"
2898 restart_client_loads () {
2899 local clients=${1//,/ }
2900 local expectedfail=${2:-""}
2904 for client in $clients; do
2905 check_client_load $client
2907 if [ "$rc" != 0 -a "$expectedfail" ]; then
2908 local var=$(node_var_name $client)_load
2909 start_client_load $client ${!var}
2910 echo "Restarted client load ${!var}: on $client. Checking ..."
2911 check_client_load $client
2913 if [ "$rc" != 0 ]; then
2914 log "Client load failed to restart on node $client, rc=$rc"
2915 # failure one client load means test fail
2916 # we do not need to check other
2925 # Start vmstat and save its process ID in a file.
2930 [ -z "$nodes" -o -z "$pid_file" ] && return 0
2933 "vmstat 1 > $TESTLOG_PREFIX.$TESTNAME.vmstat.\\\$(hostname -s).log \
2934 2>/dev/null </dev/null & echo \\\$! > $pid_file"
2937 # Display the nodes on which client loads failed.
2938 print_end_run_file() {
2942 [ -s $file ] || return 0
2944 echo "Found the END_RUN_FILE file: $file"
2947 # A client load will stop if it finds the END_RUN_FILE file.
2948 # That does not mean the client load actually failed though.
2949 # The first node in END_RUN_FILE is the one we are interested in.
2952 if [ -n "$node" ]; then
2953 local var=$(node_var_name $node)_load
2955 local prefix=$TESTLOG_PREFIX
2956 [ -n "$TESTNAME" ] && prefix=$prefix.$TESTNAME
2957 local stdout_log=$prefix.run_${!var}_stdout.$node.log
2958 local debug_log=$(echo $stdout_log | sed 's/\(.*\)stdout/\1debug/')
2960 echo "Client load ${!var} failed on node $node:"
2966 # Stop the process which had its PID saved in a file.
2971 [ -z "$nodes" -o -z "$pid_file" ] && return 0
2973 do_nodes $nodes "test -f $pid_file &&
2974 { kill -s TERM \\\$(cat $pid_file); rm -f $pid_file; }" || true
2977 # Stop all client loads.
2978 stop_client_loads() {
2979 local nodes=${1:-$CLIENTS}
2982 # stop the client loads
2983 stop_process $nodes $pid_file
2985 # clean up the processes that started them
2986 [ -n "$CLIENT_LOAD_PIDS" ] && kill -9 $CLIENT_LOAD_PIDS 2>/dev/null || true
2988 # End recovery-scale functions
2991 # wait for a command to return the expected result
2993 # This will run @check on @node repeatedly until the output matches @expect
2994 # based on the supplied condition, or until @max_wait seconds have elapsed,
2995 # whichever comes first. @cond may be one of the normal bash operators,
2996 # "-gt", "-ge", "-eq", "-le", "-lt", "==", "!=", or "=~", and must be quoted
2997 # in the caller to avoid unintentional evaluation by the shell in the caller.
2999 # If @max_wait is not specified, the condition will be checked for up to 90s.
3001 # If --verbose is passed as the first argument, the result is printed on each
3002 # value change, otherwise it is only printed after every 10s interval.
3004 # If --quiet is passed as the first/second argument, the do_node() command
3005 # will not print the remote command before executing it each time.
3007 # Using wait_update_cond() or related helper function is preferable to adding
3008 # a "long enough" wait for some state to change in the background, since
3009 # "long enough" may be too short due to tunables, system config, or running in
3010 # a VM, and must by necessity wait too long for most cases or risk failure.
3012 # usage: wait_update_cond [--verbose] [--quiet] node check cond expect [max_wait]
3013 wait_update_cond() {
3017 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3018 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3024 local max_wait=${5:-90}
3028 local begin=$SECONDS
3032 while (( $waited <= $max_wait )); do
3033 result=$(do_node $quiet $node "$check")
3035 eval [[ "'$result'" $cond "'$expect'" ]]
3036 if [[ $? == 0 ]]; then
3037 [[ -z "$result" || $waited -le $sleep ]] ||
3038 echo "Updated after ${waited}s: want '$expect' got '$result'"
3041 if [[ -n "$verbose" && "$result" != "$prev_result" ]]; then
3042 [[ -n "$prev_result" ]] &&
3043 echo "Changed after ${waited}s: from '$prev_result' to '$result'"
3044 prev_result="$result"
3046 (( $waited % $print == 0 )) &&
3047 echo "Waiting $((max_wait - waited))s for '$expect'"
3049 waited=$((SECONDS - begin))
3051 echo "Update not seen after ${max_wait}s: want '$expect' got '$result'"
3055 # usage: wait_update [--verbose] [--quiet] node check expect [max_wait]
3060 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3061 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3068 wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait
3071 # usage: wait_update_facet_cond [--verbose] facet check cond expect [max_wait]
3072 wait_update_facet_cond() {
3076 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3077 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3079 local node=$(facet_active_host $1)
3085 wait_update_cond $verbose $quiet $node "$check" "$cond" "$expect" $max_wait
3088 # usage: wait_update_facet [--verbose] facet check expect [max_wait]
3089 wait_update_facet() {
3093 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3094 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3096 local node=$(facet_active_host $1)
3101 wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait
3105 do_nodes $(comma_list $(mdts_nodes)) \
3106 "lctl set_param -n os[cd]*.*MDT*.force_sync=1"
3107 do_nodes $(comma_list $(osts_nodes)) \
3108 "lctl set_param -n osd*.*OS*.force_sync=1" 2>&1 |
3109 grep -v 'Found no match'
3113 local zfs_wait=${2:-5}
3115 # the occupied disk space will be released
3116 # only after TXGs are committed
3117 if [[ $(facet_fstype $1) == zfs ]]; then
3118 echo "sleep $zfs_wait for ZFS $(facet_fstype $1)"
3126 local lwm=$3 #low watermark
3127 local size_mb #how many MB should we write to pass watermark
3128 local ost_name=$(ostname_from_index $ost_idx)
3130 free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3132 if (( $free_kb / 1024 > lwm )); then
3133 size_mb=$((free_kb / 1024 - lwm))
3135 #If 10% of free space cross low watermark use it
3136 if (( $free_kb / 10240 > size_mb )); then
3137 size_mb=$((free_kb / 10240))
3139 #At least we need to store 1.1 of difference between
3140 #free space and low watermark
3141 size_mb=$((size_mb + size_mb / 10))
3143 if (( lwm <= $free_kb / 1024 )) ||
3144 [ ! -f $DIR/${filename}.fill_ost$ost_idx ]; then
3145 $LFS setstripe -i $ost_idx -c1 $DIR/${filename}.fill_ost$ost_idx
3146 dd if=/dev/zero of=$DIR/${filename}.fill_ost$ost_idx bs=1M \
3147 count=$size_mb oflag=append conv=notrunc
3152 free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3153 echo "OST still has $((free_kb / 1024)) MB free"
3156 # This checks only the primary MDS
3157 ost_watermarks_get() {
3159 local ost_name=$(ostname_from_index $ost_idx)
3160 local mdtosc_proc=$(get_mdtosc_proc_path $SINGLEMDS $ost_name)
3162 local hwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
3163 osp.$mdtosc_proc.reserved_mb_high)
3164 local lwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
3165 osp.$mdtosc_proc.reserved_mb_low)
3170 # Note that we set watermarks on all MDSes (necessary for striped dirs)
3171 ost_watermarks_set() {
3175 local ost_name=$(ostname_from_index $ost_idx)
3176 local facets=$(get_facets MDS)
3178 do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
3179 osp.*$ost_name*.reserved_mb_low=$lwm \
3180 osp.*$ost_name*.reserved_mb_high=$hwm > /dev/null
3182 # sleep to ensure we see the change
3186 ost_watermarks_set_low_space() {
3188 local wms=$(ost_watermarks_get $ost_idx)
3189 local ost_name=$(ostname_from_index $ost_idx)
3191 local old_lwm=$(echo $wms | awk '{ print $1 }')
3192 local old_hwm=$(echo $wms | awk '{ print $2 }')
3194 local blocks=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3195 # minimal extension size is 64M
3197 if (( $blocks / 1024 > 50 )); then
3198 new_lwm=$((blocks / 1024 - 50))
3200 local new_hwm=$((new_lwm + 5))
3202 ost_watermarks_set $ost_idx $new_lwm $new_hwm
3203 echo "watermarks: $old_lwm $old_hwm $new_lwm $new_hwm"
3206 # Set watermarks to ~current available space & then write data to fill it
3207 # Note OST is not *actually* full after this, it just reports ENOSPC in the
3208 # internal statfs used by the stripe allocator
3210 # first parameter is the filename-prefix, which must get under t-f cleanup
3211 # requirements (rm -rf $DIR/[Rdfs][0-9]*), i.e. $tfile work fine
3212 ost_watermarks_set_enospc() {
3216 local ost_name=$(ostname_from_index $ost_idx)
3217 local facets=$(get_facets MDS)
3221 for MDS in ${facets//,/ }; do
3222 local mdtosc_proc=$(get_mdtosc_proc_path $MDS $ost_name)
3224 do_facet $MDS $LCTL get_param -n \
3225 osp.$mdtosc_proc.reserved_mb_high ||
3226 skip "remote MDS does not support reserved_mb_high"
3229 wms=$(ost_watermarks_set_low_space $ost_idx)
3230 local new_lwm=$(echo $wms | awk '{ print $4 }')
3231 fill_ost $filename $ost_idx $new_lwm
3232 #First enospc could execute orphan deletion so repeat
3233 fill_ost $filename $ost_idx $new_lwm
3237 ost_watermarks_enospc_delete_files() {
3241 rm -f $DIR/${filename}.fill_ost$ost_idx
3243 wait_delete_completed
3247 # clean up from "ost_watermarks_set_enospc"
3248 ost_watermarks_clear_enospc() {
3254 ost_watermarks_enospc_delete_files $filename $ost_idx
3255 ost_watermarks_set $ost_idx $old_lwm $old_hwm
3256 echo "set OST$ost_idx lwm back to $old_lwm, hwm back to $old_hwm"
3259 wait_delete_completed_mds() {
3260 local max_wait=${1:-20}
3262 local stime=$(date +%s)
3267 # find MDS with pending deletions
3268 for node in $(mdts_nodes); do
3269 changes=$(do_node $node "$LCTL get_param -n osc.*MDT*.sync_*" \
3270 2>/dev/null | calc_sum)
3271 if [[ $changes -eq 0 ]]; then
3274 mds2sync="$mds2sync $node"
3276 if [ -z "$mds2sync" ]; then
3277 wait_zfs_commit $SINGLEMDS
3280 mds2sync=$(comma_list $mds2sync)
3282 # sync MDS transactions
3283 do_nodes $mds2sync "$LCTL set_param -n os[cd]*.*MD*.force_sync 1"
3285 # wait till all changes are sent and commmitted by OSTs
3286 # for ldiskfs space is released upon execution, but DMU
3287 # do this upon commit
3290 while [[ $WAIT -ne $max_wait ]]; do
3291 changes=$(do_nodes $mds2sync \
3292 "$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum)
3293 #echo "$node: $changes changes on all"
3294 if [[ $changes -eq 0 ]]; then
3295 wait_zfs_commit $SINGLEMDS
3297 # the occupied disk space will be released
3298 # only after TXGs are committed
3299 wait_zfs_commit ost1
3307 echo "Delete is not completed in $((etime - stime)) seconds"
3308 do_nodes $mds2sync "$LCTL get_param osc.*MDT*.sync_*"
3315 # we can use "for" here because we are waiting the slowest
3316 for host in ${hostlist//,/ }; do
3317 check_network "$host" 900
3319 while ! do_nodes $hostlist hostname > /dev/null; do sleep 5; done
3326 for facet in ${facetlist//,/ }; do
3327 hostlist=$(expand_list $hostlist $(facet_active_host $facet))
3329 wait_for_host $hostlist
3332 _wait_recovery_complete () {
3335 # Use default policy if $2 is not passed by caller.
3336 local MAX=${2:-$(max_recovery_time)}
3341 while [ $WAIT -lt $MAX ]; do
3342 STATUS=$(lctl get_param -n $param | grep status)
3344 [[ $STATUS = "status: COMPLETE" || $STATUS = "status: INACTIVE" ]] && return 0
3347 echo "Waiting $((MAX - WAIT)) secs for $param recovery done. $STATUS"
3349 echo "$param recovery not done in $MAX sec. $STATUS"
3353 wait_recovery_complete () {
3356 # with an assumption that at_max is the same on all nodes
3357 local MAX=${2:-$(max_recovery_time)}
3360 if [ "$FAILURE_MODE" = HARD ]; then
3361 facets=$(facets_on_host $(facet_active_host $facet))
3363 echo affected facets: $facets
3365 # we can use "for" here because we are waiting the slowest
3366 for facet in ${facets//,/ }; do
3367 local var_svc=${facet}_svc
3368 local param="*.${!var_svc}.recovery_status"
3370 local host=$(facet_active_host $facet)
3371 do_rpc_nodes "$host" _wait_recovery_complete $param $MAX
3375 wait_mds_ost_sync () {
3376 # just because recovery is done doesn't mean we've finished
3377 # orphan cleanup. Wait for llogs to get synchronized.
3378 echo "Waiting for orphan cleanup..."
3379 # MAX value includes time needed for MDS-OST reconnection
3380 local MAX=$(( TIMEOUT * 2 ))
3381 local WAIT_TIMEOUT=${1:-$MAX}
3384 local list=$(comma_list $(mdts_nodes))
3385 local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed"
3386 if ! do_facet $SINGLEMDS \
3387 "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null"
3389 # old way, use mds_sync
3391 list=$(comma_list $(osts_nodes))
3392 cmd="$LCTL get_param -n obdfilter.*.mds_sync"
3395 echo "wait $WAIT_TIMEOUT secs maximumly for $list mds-ost sync done."
3396 while [ $WAIT -lt $WAIT_TIMEOUT ]; do
3397 local -a sync=($(do_nodes $list "$cmd"))
3400 for ((i=0; i<${#sync[@]}; i++)); do
3402 [ ${sync[$i]} -eq 1 ] && continue
3404 [ ${sync[$i]} -eq 0 ] && continue
3406 # there is a not finished MDS-OST synchronization
3410 sleep 2 # increase waiting time and cover statfs cache
3411 [ ${con} -eq 1 ] && return 0
3412 echo "Waiting $WAIT secs for $list $i mds-ost sync done."
3416 # show which nodes are not finished.
3417 cmd=$(echo $cmd | sed 's/-n//')
3418 do_nodes $list "$cmd"
3419 echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS"
3423 # Wait OSTs to be active on both client and MDT side.
3425 local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
3426 awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
3427 wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
3428 error "wait_update OSTs up on client failed"
3430 cmd="$LCTL get_param osp.$FSNAME-OST*-MDT0000.prealloc_last_id |
3431 awk '/=[1-9][0-9]/ { c += 1 } END { printf \\\"%d\\\", c }'"
3432 wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
3433 error "wait_update OSTs up on MDT0000 failed"
3436 wait_destroy_complete () {
3437 echo "Waiting for MDT destroys to complete"
3438 # MAX value shouldn't be big as this mean server responsiveness
3439 # never increase this just to make test pass but investigate
3440 # why it takes so long time
3443 local list=$(comma_list $(mdts_nodes))
3444 while [ $WAIT -lt $MAX ]; do
3445 local -a RPCs=($(do_nodes $list $LCTL get_param -n osp.*.destroys_in_flight))
3449 for ((i=0; i<${#RPCs[@]}; i++)); do
3450 [ ${RPCs[$i]} -eq 0 ] && continue
3451 # there are still some destroy RPCs in flight
3456 [ ${con} -eq 1 ] && return 0 # done waiting
3457 echo "Waiting ${WAIT}s for local destroys to complete"
3460 echo "MDT destroys weren't done in $MAX sec."
3464 wait_delete_completed() {
3465 wait_delete_completed_mds $1 || return $?
3466 wait_destroy_complete || return $?
3475 # conf-sanity 31 takes a long time cleanup
3476 while [ $WAIT -lt 300 ]; do
3477 running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null &&
3478 lctl dl | grep ' ST ' || true")
3479 [ -z "${running}" ] && return 0
3480 echo "waited $WAIT for${running}"
3481 [ $INTERVAL -lt 64 ] && INTERVAL=$((INTERVAL + INTERVAL))
3483 WAIT=$((WAIT + INTERVAL))
3485 echo "service didn't stop after $WAIT seconds. Still running:"
3490 wait_remote_prog () {
3496 [ "$PDSH" = "no_dsh" ] && return 0
3498 while [ $WAIT -lt $2 ]; do
3499 running=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep) || true
3500 [ -z "${running}" ] && return 0 || true
3501 echo "waited $WAIT for: "
3503 [ $INTERVAL -lt 60 ] && INTERVAL=$((INTERVAL + INTERVAL))
3505 WAIT=$((WAIT + INTERVAL))
3507 local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}')
3508 [ -z "$pids" ] && return 0
3509 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids"
3510 # FIXME: not portable
3511 for pid in $pids; do
3512 cat /proc/${pid}/status || true
3513 cat /proc/${pid}/wchan || true
3515 kill -9 $pid || true
3524 local clients=${1:-$CLIENTS}
3526 if [ -z "$clients" ]; then
3529 $PDSH $clients "$LFS df $MOUNT" > /dev/null
3534 # not every config has many clients
3540 # usually checked on particular client or locally
3549 client_reconnect_try() {
3550 local f=$MOUNT/recon
3553 if [ -z "$CLIENTS" ]; then
3554 $LFS df $MOUNT; uname -n >> $f
3556 do_nodes $CLIENTS "$LFS df $MOUNT; uname -n >> $f" > /dev/null
3558 echo "Connected clients: $(cat $f)"
3559 ls -l $f > /dev/null
3563 client_reconnect() {
3564 # one client_reconnect_try call does not always do the job...
3566 client_reconnect_try && break
3571 affected_facets () {
3574 local host=$(facet_active_host $facet)
3575 local affected=$facet
3577 if [ "$FAILURE_MODE" = HARD ]; then
3578 affected=$(facets_up_on_host $host)
3584 local E2FSCK_ON_MDT0=false
3585 if [ "$1" == "--fsck" ]; then
3587 [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
3599 #Because it will only get up facets, we need get affected
3600 #facets before shutdown
3601 #For HARD Failure mode, it needs make sure facets on the same
3602 #HOST will only be shutdown and reboot once
3603 for facet in ${facets//,/ }; do
3604 local affected_facet
3606 #check whether facet has been included in other affected facets
3607 for ((index=0; index<$total; index++)); do
3608 [[ *,$facet,* == ,${affecteds[index]}, ]] && skip=1
3611 if [ $skip -eq 0 ]; then
3612 affecteds[$total]=$(affected_facets $facet)
3617 for ((index=0; index<$total; index++)); do
3618 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
3619 local host=$(facet_active_host $facet)
3620 echo "Failing ${affecteds[index]} on $host"
3621 shutdown_facet $facet
3624 $E2FSCK_ON_MDT0 && (run_e2fsck $(facet_active_host $SINGLEMDS) \
3625 $(mdsdevname 1) "-n" || error "Running e2fsck")
3627 for ((index=0; index<$total; index++)); do
3628 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
3629 echo reboot facets: ${affecteds[index]}
3633 change_active ${affecteds[index]}
3635 wait_for_facet ${affecteds[index]}
3638 init_facets_vars_simple
3640 # start mgs first if it is affected
3641 if ! combined_mgs_mds &&
3642 list_member ${affecteds[index]} mgs; then
3643 mount_facet mgs || error "Restart of mgs failed"
3645 # FIXME; has to be changed to mount all facets concurrently
3646 affected=$(exclude_items_from_list ${affecteds[index]} mgs)
3647 echo mount facets: ${affecteds[index]}
3648 mount_facets ${affecteds[index]}
3650 do_nodes $(comma_list $(all_nodes)) \
3651 "keyctl show | grep lustre | cut -c1-11 |
3653 xargs -IX keyctl setperm X 0x3f3f3f3f"
3660 do_facet $facet "sync; sync; sync"
3663 # make sure there will be no seq change
3664 local clients=${CLIENTS:-$HOSTNAME}
3665 local f=fsa-\\\$\(hostname\)
3666 do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
3667 do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi"
3669 local svc=${facet}_svc
3670 do_facet $facet $LCTL --device ${!svc} notransno
3672 # If a ZFS OSD is made read-only here, its pool is "freezed". This
3673 # in-memory state has to be cleared by either rebooting the host or
3674 # exporting and reimporting the pool.
3676 # Although the uberblocks are not updated when a pool is freezed,
3677 # transactions are still written to the disks. Modified blocks may be
3678 # cached in memory when tests try reading them back. The
3679 # export-and-reimport process also evicts any cached pool data from
3680 # memory to provide the correct "data loss" semantics.
3682 # In the test framework, the exporting and importing operations are
3683 # handled by stop() and mount_facet() separately, which are used
3684 # inside fail() and fail_abort().
3686 set_dev_readonly $facet
3687 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3688 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3691 replay_barrier_nodf() {
3692 local facet=$1 echo running=${running}
3693 do_facet $facet "sync; sync; sync"
3694 local svc=${facet}_svc
3695 echo Replay barrier on ${!svc}
3696 do_facet $facet $LCTL --device ${!svc} notransno
3697 set_dev_readonly $facet
3698 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3699 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3702 replay_barrier_nosync() {
3703 local facet=$1 echo running=${running}
3704 local svc=${facet}_svc
3705 echo Replay barrier on ${!svc}
3706 do_facet $facet $LCTL --device ${!svc} notransno
3707 set_dev_readonly $facet
3708 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3709 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3713 # Get Lustre client uuid for a given Lustre mount point.
3716 local mntpnt=${1:-$MOUNT}
3718 local name=$($LFS getname $mntpnt | cut -d' ' -f1)
3719 local uuid=$($LCTL get_param -n llite.$name.uuid)
3724 mds_evict_client() {
3725 local mntpnt=${1:-$MOUNT}
3726 local uuid=$(get_client_uuid $mntpnt)
3728 do_facet $SINGLEMDS \
3729 "$LCTL set_param -n mdt.${mds1_svc}.evict_client $uuid"
3732 ost_evict_client() {
3733 local mntpnt=${1:-$MOUNT}
3734 local uuid=$(get_client_uuid $mntpnt)
3737 "$LCTL set_param -n obdfilter.${ost1_svc}.evict_client $uuid"
3742 local clients=${CLIENTS:-$HOSTNAME}
3744 SK_NO_KEY_save=$SK_NO_KEY
3746 export SK_NO_KEY=false
3748 facet_failover $* || error "failover: $?"
3749 export SK_NO_KEY=$SK_NO_KEY_save
3750 # to initiate all OSC idling connections
3752 wait_clients_import_state "$clients" "$facets" "\(FULL\|IDLE\)"
3753 clients_up || error "post-failover stat: $?"
3759 facet_failover $facet
3764 local abort_type=${2:-"abort_recovery"}
3767 change_active $facet
3768 wait_for_facet $facet
3769 mount_facet $facet -o $abort_type
3770 clients_up || echo "first stat failed: $?"
3771 clients_up || error "post-failover stat: $?"
3774 host_nids_address() {
3778 do_nodes $nodes "$LCTL list_nids | grep -w $net | cut -f 1 -d @"
3782 if [ "$1" = "'*'" ]; then echo \'*\'; else
3788 if [[ -n "$NETTYPE" ]]; then
3789 h2name_or_ip "$1" "$NETTYPE"
3791 h2name_or_ip "$1" "$2"
3794 declare -fx h2nettype
3796 # Wrapper function to print the deprecation warning
3798 echo "h2tcp: deprecated, use h2nettype instead" 1>&2
3799 if [[ -n "$NETTYPE" ]]; then
3802 h2nettype "$1" "tcp"
3806 # Wrapper function to print the deprecation warning
3808 echo "h2o2ib: deprecated, use h2nettype instead" 1>&2
3809 if [[ -n "$NETTYPE" ]]; then
3812 h2nettype "$1" "o2ib"
3816 # This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST
3817 # expressions format. As a bonus we can then just pass in those variables
3818 # to pdsh. What this function does is take a HOSTLIST type string and