3 trap 'print_summary && print_stack_trace | tee $TF_FAIL && \
4 echo "$TESTSUITE: FAIL: test-framework exiting on error"' ERR
8 export REFORMAT=${REFORMAT:-""}
9 export WRITECONF=${WRITECONF:-""}
10 export VERBOSE=${VERBOSE:-false}
11 export GSS=${GSS:-false}
12 export GSS_SK=${GSS_SK:-false}
14 export GSS_PIPEFS=false
15 export SHARED_KEY=${SHARED_KEY:-false}
16 export SK_PATH=${SK_PATH:-/tmp/test-framework-keys}
17 export SK_OM_PATH=$SK_PATH'/tmp-request-mount'
18 export SK_MOUNTED=${SK_MOUNTED:-false}
19 export SK_FLAVOR=${SK_FLAVOR:-ski}
20 export SK_NO_KEY=${SK_NO_KEY:-true}
21 export SK_UNIQUE_NM=${SK_UNIQUE_NM:-false}
22 export SK_S2S=${SK_S2S:-false}
23 export SK_S2SNM=${SK_S2SNM:-TestFrameNM}
24 export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli}
25 export SK_SKIPFIRST=${SK_SKIPFIRST:-true}
26 export IDENTITY_UPCALL=default
28 export FLAKEY=${FLAKEY:-true}
29 # specify environment variable containing batch job name for server statistics
30 export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable"
32 #export PDSH="pdsh -S -Rssh -w"
33 export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"}
34 export UMOUNT=${UMOUNT:-"umount -d"}
36 export LSNAPSHOT_CONF="/etc/ldev.conf"
37 export LSNAPSHOT_LOG="/var/log/lsnapshot.log"
39 # sles12 umount has a issue with -d option
40 [ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && {
41 export UMOUNT="umount"
44 # function used by scripts run on remote nodes
45 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
46 . $LUSTRE/tests/functions.sh
47 . $LUSTRE/tests/yaml.sh
49 export LD_LIBRARY_PATH=${LUSTRE}/utils/.libs:${LUSTRE}/utils:${LD_LIBRARY_PATH}
51 LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg}
53 EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh}
55 if [ -f "$EXCEPT_LIST_FILE" ]; then
56 echo "Reading test skip list from $EXCEPT_LIST_FILE"
61 # check config files for options in decreasing order of preference
62 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/lustre.conf ] &&
63 MODPROBECONF=/etc/modprobe.d/lustre.conf
64 [ -z "$MODPROBECONF" -a -f /etc/modprobe.d/Lustre ] &&
65 MODPROBECONF=/etc/modprobe.d/Lustre
66 [ -z "$MODPROBECONF" -a -f /etc/modprobe.conf ] &&
67 MODPROBECONF=/etc/modprobe.conf
69 sanitize_parameters() {
70 for i in DIR DIR1 DIR2 MOUNT MOUNT1 MOUNT2
73 if [ -d "$path" ]; then
74 eval export $i=$(echo $path | sed -r 's/\/+$//g')
80 [[ $DIR/ = $MOUNT/* ]] ||
81 { failed=1 && echo "DIR=$DIR not in $MOUNT. Aborting."; }
82 [[ $DIR1/ = $MOUNT1/* ]] ||
83 { failed=1 && echo "DIR1=$DIR1 not in $MOUNT1. Aborting."; }
84 [[ $DIR2/ = $MOUNT2/* ]] ||
85 { failed=1 && echo "DIR2=$DIR2 not in $MOUNT2. Aborting"; }
87 [ -n "$failed" ] && exit 99 || true
91 echo "usage: $0 [-r] [-f cfgfile]"
99 [ -z "$DEFAULT_SUITES" ] && return 0
100 [ -n "$ONLY" ] && echo "WARNING: ONLY is set to $(echo $ONLY)"
102 local form="%-13s %-17s %-9s %s %s\n"
104 printf "$form" "status" "script" "Total(sec)" "E(xcluded) S(low)"
105 echo "---------------------------------------------------------------"
106 for O in $DEFAULT_SUITES; do
107 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
108 [ "${!O}" = "no" ] && continue || true
109 local o=$(echo $O | tr "[:upper:]_" "[:lower:]-")
110 local log=${TMP}/${o}.log
111 if is_sanity_benchmark $o; then
112 log=${TMP}/sanity-benchmark.log
117 local status=Unfinished
119 skipped=$(grep excluded $log | awk '{ printf " %s", $3 }' |
121 slow=$(egrep "^PASS|^FAIL" $log | tr -d "("| sed s/s\)$//g |
122 sort -nr -k 3 | head -n5 | awk '{ print $2":"$3"s" }')
123 total=$(grep duration $log | awk '{ print $2 }')
124 if [ "${!O}" = "done" ]; then
128 local durations=$(egrep "^PASS|^FAIL" $log |
129 tr -d "("| sed s/s\)$//g |
130 awk '{ print $2":"$3"|" }')
131 details=$(printf "%s\n%s %s %s\n" "$details" \
132 "DDETAILS" "$O" "$(echo $durations)")
135 printf "$form" $status "$O" "${total}" "E=$skipped"
136 printf "$form" "-" "-" "-" "S=$(echo $slow)"
139 for O in $DEFAULT_SUITES; do
140 O=$(echo $O | tr "-" "_" | tr "[:lower:]" "[:upper:]")
141 if [ "${!O}" = "no" ]; then
142 printf "$form" "Skipped" "$O" ""
146 # print the detailed tests durations if DDETAILS=true
152 # Get information about the Lustre environment. The information collected
153 # will be used in Lustre tests.
154 # usage: get_lustre_env
155 # input: No required or optional arguments
156 # output: No return values, environment variables are exported
160 export mds1_FSTYPE=${mds1_FSTYPE:-$(facet_fstype mds1)}
161 export ost1_FSTYPE=${ost1_FSTYPE:-$(facet_fstype ost1)}
163 export MGS_VERSION=$(lustre_version_code mgs)
164 export MDS1_VERSION=$(lustre_version_code mds1)
165 export OST1_VERSION=$(lustre_version_code ost1)
166 export CLIENT_VERSION=$(lustre_version_code client)
168 # Prefer using "mds1" directly instead of SINGLEMDS.
169 # Keep this for compat until it is removed from scripts.
170 export SINGLEMDS=${SINGLEMDS:-mds1}
174 export LUSTRE=$(absolute_path $LUSTRE)
175 export TESTSUITE=$(basename $0 .sh)
176 export TEST_FAILED=false
177 export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false}
178 export RPC_MODE=${RPC_MODE:-false}
179 export DO_CLEANUP=${DO_CLEANUP:-true}
180 export KEEP_ZPOOL=${KEEP_ZPOOL:-false}
181 export CLEANUP_DM_DEV=false
182 export PAGE_SIZE=$(get_page_size client)
184 export MKE2FS=$MKE2FS
185 if [ -z "$MKE2FS" ]; then
186 if which mkfs.ldiskfs >/dev/null 2>&1; then
187 export MKE2FS=mkfs.ldiskfs
193 export DEBUGFS=$DEBUGFS
194 if [ -z "$DEBUGFS" ]; then
195 if which debugfs.ldiskfs >/dev/null 2>&1; then
196 export DEBUGFS=debugfs.ldiskfs
198 export DEBUGFS=debugfs
202 export TUNE2FS=$TUNE2FS
203 if [ -z "$TUNE2FS" ]; then
204 if which tunefs.ldiskfs >/dev/null 2>&1; then
205 export TUNE2FS=tunefs.ldiskfs
207 export TUNE2FS=tune2fs
211 export E2LABEL=$E2LABEL
212 if [ -z "$E2LABEL" ]; then
213 if which label.ldiskfs >/dev/null 2>&1; then
214 export E2LABEL=label.ldiskfs
216 export E2LABEL=e2label
220 export DUMPE2FS=$DUMPE2FS
221 if [ -z "$DUMPE2FS" ]; then
222 if which dumpfs.ldiskfs >/dev/null 2>&1; then
223 export DUMPE2FS=dumpfs.ldiskfs
225 export DUMPE2FS=dumpe2fs
229 export E2FSCK=$E2FSCK
230 if [ -z "$E2FSCK" ]; then
231 if which fsck.ldiskfs >/dev/null 2>&1; then
232 export E2FSCK=fsck.ldiskfs
238 export RESIZE2FS=$RESIZE2FS
239 if [ -z "$RESIZE2FS" ]; then
240 if which resizefs.ldiskfs >/dev/null 2>&1; then
241 export RESIZE2FS=resizefs.ldiskfs
243 export RESIZE2FS=resize2fs
247 export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after test suite
248 export FSCK_MAX_ERR=4 # File system errors left uncorrected
250 export ZFS=${ZFS:-zfs}
251 export ZPOOL=${ZPOOL:-zpool}
252 export ZDB=${ZDB:-zdb}
253 export PARTPROBE=${PARTPROBE:-partprobe}
255 #[ -d /r ] && export ROOT=${ROOT:-/r}
256 export TMP=${TMP:-$ROOT/tmp}
257 export TESTSUITELOG=${TMP}/${TESTSUITE}.log
258 export LOGDIR=${LOGDIR:-${TMP}/test_logs/$(date +%s)}
259 export TESTLOG_PREFIX=$LOGDIR/$TESTSUITE
261 export HOSTNAME=${HOSTNAME:-$(hostname -s)}
262 if ! echo $PATH | grep -q $LUSTRE/utils; then
263 export PATH=$LUSTRE/utils:$PATH
265 if ! echo $PATH | grep -q $LUSTRE/utils/gss; then
266 export PATH=$LUSTRE/utils/gss:$PATH
268 if ! echo $PATH | grep -q $LUSTRE/tests; then
269 export PATH=$LUSTRE/tests:$PATH
271 if ! echo $PATH | grep -q $LUSTRE/../lustre-iokit/sgpdd-survey; then
272 export PATH=$LUSTRE/../lustre-iokit/sgpdd-survey:$PATH
274 export LST=${LST:-"$LUSTRE/../lnet/utils/lst"}
275 [ ! -f "$LST" ] && export LST=$(which lst)
276 export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")}
277 [ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey)
278 export MCREATE=${MCREATE:-mcreate}
279 export MULTIOP=${MULTIOP:-multiop}
280 export MMAP_CAT=${MMAP_CAT:-mmap_cat}
281 export STATX=${STATX:-statx}
282 # Ubuntu, at least, has a truncate command in /usr/bin
283 # so fully path our truncate command.
284 export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate}
285 export FSX=${FSX:-$LUSTRE/tests/fsx}
286 export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"}
287 [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null)
288 if ! echo $PATH | grep -q $LUSTRE/tests/racer; then
289 export PATH=$LUSTRE/tests/racer:$PATH:
291 if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then
292 export PATH=$LUSTRE/tests/mpi:$PATH
294 export RSYNC_RSH=${RSYNC_RSH:-rsh}
296 export LNETCTL=${LNETCTL:-"$LUSTRE/../lnet/utils/lnetctl"}
297 [ ! -f "$LNETCTL" ] && export LNETCTL=$(which lnetctl 2> /dev/null)
298 export LCTL=${LCTL:-"$LUSTRE/utils/lctl"}
299 [ ! -f "$LCTL" ] && export LCTL=$(which lctl)
300 export LFS=${LFS:-"$LUSTRE/utils/lfs"}
301 [ ! -f "$LFS" ] && export LFS=$(which lfs)
303 export PERM_CMD=${PERM_CMD:-"$LCTL conf_param"}
305 export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"}
306 if [ ! -f "$L_GETIDENTITY" ]; then
307 if `which l_getidentity > /dev/null 2>&1`; then
308 export L_GETIDENTITY=$(which l_getidentity)
310 export L_GETIDENTITY=NONE
313 export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"}
314 [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid"
315 export LL_DECODE_LINKEA=${LL_DECODE_LINKEA:-"$LUSTRE/utils/ll_decode_linkea"}
316 [ ! -f "$LL_DECODE_LINKEA" ] && export LL_DECODE_LINKEA="ll_decode_linkea"
317 export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"}
318 [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre"
319 export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"}
320 [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre"
321 export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} "
322 export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod}
323 [ ! -f "$LUSTRE_RMMOD" ] &&
324 export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null)
325 export LUSTRE_ROUTES_CONVERSION=${LUSTRE_ROUTES_CONVERSION:-$LUSTRE/scripts/lustre_routes_conversion}
326 [ ! -f "$LUSTRE_ROUTES_CONVERSION" ] &&
327 export LUSTRE_ROUTES_CONVERSION=$(which lustre_routes_conversion 2> /dev/null)
328 export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate}
329 [ ! -f "$LFS_MIGRATE" ] &&
330 export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null)
331 export LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"}
332 [ ! -f "$LR_READER" ] &&
333 export LR_READER=$(which lr_reader 2> /dev/null)
334 [ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader"
335 export LSOM_SYNC=${LSOM_SYNC:-"$LUSTRE/utils/llsom_sync"}
336 [ ! -f "$LSOM_SYNC" ] &&
337 export LSOM_SYNC=$(which llsom_sync 2> /dev/null)
338 [ -z "$LSOM_SYNC" ] && export LSOM_SYNC="/usr/sbin/llsom_sync"
339 export NAME=${NAME:-local}
340 export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"}
341 [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] &&
342 export LGSSD=$(which lgssd)
343 export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"}
344 [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null)
345 export KRB5DIR=${KRB5DIR:-"/usr/kerberos"}
347 export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests}
349 export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"}
350 [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null)
352 export DMSETUP=${DMSETUP:-dmsetup}
353 export DM_DEV_PATH=${DM_DEV_PATH:-/dev/mapper}
354 export LOSETUP=${LOSETUP:-losetup}
356 if [ "$ACCEPTOR_PORT" ]; then
357 export PORT_OPT="--port $ACCEPTOR_PORT"
361 $RPC_MODE || echo "Using GSS shared-key feature"
362 which lgss_sk > /dev/null 2>&1 ||
363 error_exit "built with lgss_sk disabled! SEC=$SEC"
371 $RPC_MODE || echo "Using GSS/krb5 ptlrpc security flavor"
372 which lgss_keyring > /dev/null 2>&1 ||
373 error_exit "built with gss disabled! SEC=$SEC"
384 IDENTITY_UPCALL=false
388 export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
390 # Paths on remote nodes, if different
391 export RLUSTRE=${RLUSTRE:-$LUSTRE}
392 export RPWD=${RPWD:-$PWD}
393 export I_MOUNTED=${I_MOUNTED:-"no"}
394 export AUSTER_CLEANUP=${AUSTER_CLEANUP:-false}
395 if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \
396 ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \
397 ! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \
398 ! -f $LUSTRE/mdt/mdt.ko ]; then
399 export CLIENTMODSONLY=yes
402 export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3}
403 export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"}
407 while getopts "rvwf:" opt $*; do
412 w) WRITECONF=writeconf;;
417 shift $((OPTIND - 1))
420 # print the durations of each test if "true"
421 DDETAILS=${DDETAILS:-false}
422 [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true
427 export TF_FAIL=${TF_FAIL:-$TMP/tf.fail}
429 # Constants used in more than one test script
430 export LOV_MAX_STRIPE_COUNT=2000
431 export DELETE_OLD_POOLS=${DELETE_OLD_POOLS:-false}
432 export KEEP_POOLS=${KEEP_POOLS:-false}
434 export MACHINEFILE=${MACHINEFILE:-$TMP/$(basename $0 .sh).machines}
435 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
438 # use localrecov to enable recovery for local clients, LU-12722
439 [[ $MDS1_VERSION -lt $(version_code 2.13.52) ]] || {
440 export MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-"-o localrecov"}
441 export MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-"-o localrecov"}
444 [[ $OST1_VERSION -lt $(version_code 2.13.52) ]] ||
445 export OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-"-o localrecov"}
452 ncpts=$(do_facet $facet "lctl get_param -n " \
453 "cpu_partition_table 2>/dev/null| wc -l" || echo 1)
455 if [ $ncpts -eq 0 ]; then
462 # Return a numeric version code based on a version string. The version
463 # code is useful for comparison two version strings to see which is newer.
465 # split arguments like "1.8.6-wc3" into "1", "8", "6", "3"
466 eval set -- $(tr "[:punct:][a-z]" " " <<< $*)
468 echo -n $(((${1:-0}<<24) | (${2:-0}<<16) | (${3:-0}<<8) | (${4:-0})))
471 export LINUX_VERSION=$(uname -r | sed -e "s/\([0-9]*\.[0-9]*\.[0-9]*\).*/\1/")
472 export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ })
474 # Report the Lustre build version string (e.g. 1.8.7.3 or 2.4.1).
476 # usage: lustre_build_version
478 # All Lustre versions support "lctl get_param" to report the version of the
479 # code running in the kernel (what our tests are interested in), but it
480 # doesn't work without modules loaded. After 2.9.53 and in upstream kernels
481 # the "version" parameter doesn't include "lustre: " at the beginning.
482 # If that fails, call "lctl lustre_build_version" which prints either (or both)
483 # the userspace and kernel build versions, but until 2.8.55 required root
484 # access to get the Lustre kernel version. If that also fails, fall back to
485 # using "lctl --version", which is easy to parse and works without the kernel
486 # modules, but was only added in 2.6.50 and only prints the lctl tool version,
487 # not the module version, though they are usually the same.
489 # Various commands and their output format for different Lustre versions:
490 # lctl get_param version: 2.9.55
491 # lctl get_param version: lustre: 2.8.53
492 # lctl get_param version: lustre: 2.6.52
493 # kernel: patchless_client
494 # build: v2_6_92_0-2.6.32-431.el6_lustre.x86_64
495 # lctl lustre_build_version: Lustre version: 2.8.53_27_gae67fc01
496 # lctl lustre_build_version: error: lustre_build_version: Permission denied
497 # (as non-root user) lctl version: v2_6_92_0-2.6.32-431.el6.x86_64
498 # lctl lustre_build_version: Lustre version: 2.5.3-2.6.32.26-175.fc12.x86_64
499 # lctl version: 2.5.3-2.6.32..26-175fc12.x86_64
500 # lctl --version: lctl 2.6.50
502 # output: prints version string to stdout in (up to 4) dotted-decimal values
503 lustre_build_version() {
504 local facet=${1:-client}
505 local facet_version=${facet}_VERSION
507 # if the global variable is already set, then use that
508 [ -n "${!facet_version}" ] && echo ${!facet_version} && return
510 # this is the currently-running version of the kernel modules
511 local ver=$(do_facet $facet "$LCTL get_param -n version 2>/dev/null")
512 # we mostly test 2.10+ systems, only try others if the above fails
513 if [ -z "$ver" ]; then
514 ver=$(do_facet $facet "$LCTL lustre_build_version 2>/dev/null")
516 if [ -z "$ver" ]; then
517 ver=$(do_facet $facet "$LCTL --version 2>/dev/null" |
520 local lver=$(egrep -i "lustre: |version: " <<<"$ver" | head -n 1)
521 [ -n "$lver" ] && ver="$lver"
523 lver=$(sed -e 's/[^:]*: //' -e 's/^v//' -e 's/[ -].*//' <<<$ver |
524 tr _ . | cut -d. -f1-4)
526 # save in global variable for the future
527 export $facet_version=$lver
532 # Report the Lustre numeric build version code for the supplied facet.
533 lustre_version_code() {
534 version_code $(lustre_build_version $1)
538 /sbin/lsmod | grep -q "^\<$1\>"
541 check_lfs_df_ret_val() {
542 # Ignore only EOPNOTSUPP (which is 95; Operation not supported) error
543 # returned by 'lfs df' for valid dentry but not a lustrefs.
545 # 'lfs df' historically always returned success(0) instead of
546 # EOPNOTSUPP. This function for compatibility reason, ignores and
547 # masquerades EOPNOTSUPP as success.
548 [[ $1 -eq 95 ]] && return 0
561 msg="$(insmod $module $args 2>&1)" && return 0 || rc=$?
564 # parallels can't load modules directly from prlfs, use /tmp instead
565 if $PRLFS || [[ "$(stat -f -c%t $module)" == "7c7c6673" ]]; then
566 local target="$(mktemp)"
568 cp "$module" "$target"
571 [[ $rc == 0 ]] && PRLFS=true
579 # Load a module on the system where this is running.
581 # usage: load_module module_name [module arguments for insmod/modprobe]
583 # If module arguments are not given but MODOPTS_<MODULE> is set, then its value
584 # will be used as the arguments. Otherwise arguments will be obtained from
585 # /etc/modprobe.conf, from /etc/modprobe.d/Lustre, or else none will be used.
588 local module=$1 # '../libcfs/libcfs/libcfs', 'obdclass/obdclass', ...
591 local base=$(basename $module $ext)
593 local -A module_is_loaded_aa
597 for mod in $(lsmod | awk '{ print $1; }'); do
598 module_is_loaded_aa[${mod//-/_}]=true
602 ${module_is_loaded_aa[${1//-/_}]:-false}
605 if module_is_loaded $base; then
609 if [[ -f $LUSTRE/$module$ext ]]; then
610 path=$LUSTRE/$module$ext
611 elif [[ "$base" == lnet_selftest ]] &&
612 [[ -f $LUSTRE/../lnet/selftest/$base$ext ]]; then
613 path=$LUSTRE/../lnet/selftest/$base$ext
618 if [[ -n "$path" ]]; then
619 # Try to load any non-Lustre modules that $module depends on.
620 for mod in $(modinfo --field=depends $path | tr ',' ' '); do
621 if ! module_is_loaded $mod; then
627 # If no module arguments were passed then get them from
628 # $MODOPTS_<MODULE>, otherwise from modprobe.conf.
629 if [ $# -eq 0 ]; then
630 # $MODOPTS_<MODULE>; we could use associative arrays, but that's
631 # not in Bash until 4.x, so we resort to eval.
632 optvar="MODOPTS_$(basename $module | tr a-z A-Z)"
633 eval set -- \$$optvar
634 if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then
635 # Nothing in $MODOPTS_<MODULE>; try modprobe.conf
637 opt=$(awk -v var="^options $base" '$0 ~ var \
638 {gsub("'"options $base"'",""); print}' \
640 set -- $(echo -n $opt)
642 # Ensure we have accept=all for lnet
643 if [[ "$base" == lnet ]]; then
644 # OK, this is a bit wordy...
645 local arg accept_all_present=false
648 [[ "$arg" == accept=all ]] &&
649 accept_all_present=true
652 $accept_all_present || set -- "$@" accept=all
659 [ $# -gt 0 ] && echo "${module} options: '$*'"
661 # Note that insmod will ignore anything in modprobe.conf, which is why
662 # we're passing options on the command-line. If $path does not exist
663 # then we must be testing a "make install" or"rpm" installation. Also
664 # note that failing to load ptlrpc_gss is not considered fatal.
665 if [[ -n "$path" ]]; then
666 lustre_insmod $path "$@"
667 elif [[ "$base" == ptlrpc_gss ]]; then
668 if ! modprobe $base "$@" 2>/dev/null; then
669 echo "gss/krb5 is not supported"
676 load_modules_local() {
677 if [ -n "$MODPROBE" ]; then
679 echo "Using modprobe to load modules"
683 # Create special udev test rules on every node
684 if [ -f $LUSTRE/lustre/conf/99-lustre.rules ]; then {
685 sed -e 's|/usr/sbin/lctl|$LCTL|g' $LUSTRE/lustre/conf/99-lustre.rules > /etc/udev/rules.d/99-lustre-test.rules
687 echo "SUBSYSTEM==\"lustre\", ACTION==\"change\", ENV{PARAM}==\"?*\", RUN+=\"$LCTL set_param '\$env{PARAM}=\$env{SETTING}'\"" > /etc/udev/rules.d/99-lustre-test.rules
689 udevadm control --reload-rules
692 # For kmemleak-enabled kernels we need clear all past state
693 # that obviously has nothing to do with this Lustre run
694 # Disable automatic memory scanning to avoid perf hit.
695 if [ -f /sys/kernel/debug/kmemleak ] ; then
696 echo scan=off > /sys/kernel/debug/kmemleak || true
697 echo scan > /sys/kernel/debug/kmemleak || true
698 echo clear > /sys/kernel/debug/kmemleak || true
701 echo Loading modules from $LUSTRE
705 if [ -f /sys/devices/system/cpu/online ]; then
706 ncpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/online) + 1))
707 echo "detected $ncpus online CPUs by sysfs"
709 ncpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
711 if [ $rc -eq 0 ]; then
712 echo "detected $ncpus online CPUs by getconf"
714 echo "Can't detect number of CPUs"
719 # if there is only one CPU core, libcfs can only create one partition
720 # if there is more than 4 CPU cores, libcfs should create multiple CPU
721 # partitions. So we just force libcfs to create 2 partitions for
722 # system with 2 or 4 cores
723 local saved_opts="$MODOPTS_LIBCFS"
724 if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then
725 # force to enable multiple CPU partitions
726 echo "Force libcfs to create 2 CPU partitions"
727 MODOPTS_LIBCFS="cpu_npartitions=2 $MODOPTS_LIBCFS"
729 echo "libcfs will create CPU partition based on online CPUs"
732 load_module ../libcfs/libcfs/libcfs
733 # Prevent local MODOPTS_LIBCFS being passed as part of environment
734 # variable to remote nodes
735 MODOPTS_LIBCFS=$saved_opts
738 load_module ../lnet/lnet/lnet
740 LNDPATH=${LNDPATH:-"../lnet/klnds"}
741 if [ -z "$LNETLND" ]; then
743 o2ib*) LNETLND="o2iblnd/ko2iblnd" ;;
744 tcp*) LNETLND="socklnd/ksocklnd" ;;
745 *) local lnd="${NETTYPE%%[0-9]}lnd"
746 [ -f "$LNDPATH/$lnd/k$lnd.ko" ] &&
747 LNETLND="$lnd/k$lnd" ||
748 LNETLND="socklnd/ksocklnd"
751 load_module ../lnet/klnds/$LNETLND
752 load_module obdclass/obdclass
753 load_module ptlrpc/ptlrpc
754 load_module ptlrpc/gss/ptlrpc_gss
762 load_module obdecho/obdecho
763 if ! client_only; then
764 load_module lfsck/lfsck
765 [ "$LQUOTA" != "no" ] &&
766 load_module quota/lquota $LQUOTAOPTS
767 if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then
768 load_module osd-zfs/osd_zfs
769 elif [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then
770 load_module ../ldiskfs/ldiskfs
771 load_module osd-ldiskfs/osd_ldiskfs
783 load_module llite/lustre
784 [ -d /r ] && OGDB=${OGDB:-"/r/tmp"}
786 rm -f $OGDB/ogdb-$HOSTNAME
787 $LCTL modules > $OGDB/ogdb-$HOSTNAME
789 # 'mount' doesn't look in $PATH, just sbin
790 local mount_lustre=$LUSTRE/utils/mount.lustre
791 if [ -f $mount_lustre ]; then
792 local sbin_mount=$(readlink -f /sbin)/mount.lustre
793 if grep -qw "$sbin_mount" /proc/mounts; then
794 cmp -s $mount_lustre $sbin_mount || umount $sbin_mount
796 if ! grep -qw "$sbin_mount" /proc/mounts; then
797 [ ! -f "$sbin_mount" ] && touch "$sbin_mount"
798 if [ ! -s "$sbin_mount" -a -w "$sbin_mount" ]; then
799 cat <<- EOF > "$sbin_mount"
802 echo "This $sbin_mount just a mountpoint." 1>&2
803 echo "It is never supposed to be run." 1>&2
804 logger -p emerg -- "using stub $sbin_mount $@"
807 chmod a+x $sbin_mount
809 mount --bind $mount_lustre $sbin_mount ||
810 error "can't bind $mount_lustre to $sbin_mount"
818 # load modules on remote nodes optionally
819 # lustre-tests have to be installed on these nodes
820 if $LOAD_MODULES_REMOTE; then
821 local list=$(comma_list $(remote_nodes_list))
822 if [ -n "$list" ]; then
823 echo "loading modules on: '$list'"
824 do_rpc_nodes "$list" load_modules_local
830 LEAK_LUSTRE=$(dmesg | tail -n 30 | grep "obd_memory.*leaked" || true)
831 LEAK_PORTALS=$(dmesg | tail -n 20 | egrep -i "libcfs.*memory leaked" || true)
832 if [ "$LEAK_LUSTRE" -o "$LEAK_PORTALS" ]; then
833 echo "$LEAK_LUSTRE" 1>&2
834 echo "$LEAK_PORTALS" 1>&2
835 mv $TMP/debug $TMP/debug-leak.`date +%s` || true
836 echo "Memory leaks detected"
837 [ -n "$IGNORE_LEAK" ] && { echo "ignoring leaks" && return 0; } || true
842 unload_modules_local() {
843 $LUSTRE_RMMOD ldiskfs || return 2
845 [ -f /etc/udev/rules.d/99-lustre-test.rules ] &&
846 rm /etc/udev/rules.d/99-lustre-test.rules
847 udevadm control --reload-rules
850 check_mem_leak || return 254
858 wait_exit_ST client # bug 12845
860 unload_modules_local || rc=$?
862 if $LOAD_MODULES_REMOTE; then
863 local list=$(comma_list $(remote_nodes_list))
864 if [ -n "$list" ]; then
865 echo "unloading modules on: '$list'"
866 do_rpc_nodes "$list" unload_modules_local
870 local sbin_mount=$(readlink -f /sbin)/mount.lustre
871 if grep -qe "$sbin_mount " /proc/mounts; then
872 umount $sbin_mount || true
873 [ -s $sbin_mount ] && ! grep -q "STUB MARK" $sbin_mount ||
877 [[ $rc -eq 0 ]] && echo "modules unloaded."
883 local facet=${1:-$SINGLEMDS}
886 case $(facet_fstype $facet) in
887 ldiskfs) size=72;; # largest seen is 64, leave some headroom
888 # grant_block_size is in bytes, allow at least 2x max blocksize
889 zfs) size=$(lctl get_param osc.$FSNAME*.import |
890 awk '/grant_block_size:/ {print $2/512; exit;}')
894 echo -n $((size * MDSCOUNT))
898 local facet=${1:-$SINGLEMDS}
899 local fstype=$(facet_fstype $facet)
902 ldiskfs) size=4;; # ~4KB per inode
903 zfs) size=11;; # 10 to 11KB per inode
909 check_gss_daemon_nodes() {
913 do_nodesv $list "num=\\\$(ps -o cmd -C $dname | grep $dname | wc -l);
914 if [ \\\"\\\$num\\\" -ne 1 ]; then
915 echo \\\$num instance of $dname;
920 check_gss_daemon_facet() {
924 num=`do_facet $facet ps -o cmd -C $dname | grep $dname | wc -l`
925 if [ $num -ne 1 ]; then
926 echo "$num instance of $dname on $facet"
935 echo Stopping $@ on $list
936 do_nodes $list "killall -2 $@ 2>/dev/null || true"
939 # start gss daemons on all nodes, or "daemon" on "nodes" if set
940 start_gss_daemons() {
944 if [ "$nodes" ] && [ "$daemon" ] ; then
945 echo "Starting gss daemon on nodes: $nodes"
946 do_nodes $nodes "$daemon" || return 8
950 nodes=$(comma_list $(mdts_nodes))
951 echo "Starting gss daemon on mds: $nodes"
953 # Start all versions, in case of switching
954 do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 1
956 do_nodes $nodes "$LSVCGSSD -v" || return 1
959 do_nodes $nodes "$LGSSD -v" || return 2
962 nodes=$(comma_list $(osts_nodes))
963 echo "Starting gss daemon on ost: $nodes"
965 # Start all versions, in case of switching
966 do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 3
968 do_nodes $nodes "$LSVCGSSD -v" || return 3
970 # starting on clients
972 local clients=${CLIENTS:-$HOSTNAME}
974 echo "Starting $LGSSD on clients $clients "
975 do_nodes $clients "$LGSSD -v" || return 4
978 # wait daemons entering "stable" status
982 # check daemons are running
984 nodes=$(comma_list $(mdts_nodes) $(osts_nodes))
985 check_gss_daemon_nodes $nodes lsvcgssd || return 5
987 nodes=$(comma_list $(mdts_nodes))
988 check_gss_daemon_nodes $nodes lgssd || return 6
991 check_gss_daemon_nodes $clients lgssd || return 7
996 local nodes=$(comma_list $(mdts_nodes))
998 send_sigint $nodes lsvcgssd lgssd
1000 nodes=$(comma_list $(osts_nodes))
1001 send_sigint $nodes lsvcgssd
1003 nodes=${CLIENTS:-$HOSTNAME}
1004 send_sigint $nodes lgssd
1008 # Add mount flags for shared key
1010 if grep -q skpath <<< "$mt_opts" ; then
1011 mt_opts=$(echo $mt_opts |
1012 sed -e "s#skpath=[^ ,]*#skpath=$SK_PATH#")
1014 if [ -z "$mt_opts" ]; then
1015 mt_opts="-o skpath=$SK_PATH"
1017 mt_opts="$mt_opts,skpath=$SK_PATH"
1027 /usr/lib/lustre/* | /usr/lib64/lustre/* | /usr/lib/lustre | \
1036 [ $from_tree = true ]
1040 if $SHARED_KEY; then
1049 if ! module_loaded ptlrpc_gss; then
1050 load_module ptlrpc/gss/ptlrpc_gss
1051 module_loaded ptlrpc_gss ||
1052 error_exit "init_gss: GSS=$GSS, but gss/krb5 missing"
1055 if $GSS_KRB5 || $GSS_SK; then
1056 start_gss_daemons || error_exit "start gss daemon failed! rc=$?"
1059 if $GSS_SK && ! $SK_NO_KEY; then
1060 echo "Loading basic SSK keys on all servers"
1061 do_nodes $(comma_list $(all_server_nodes)) \
1062 "lgss_sk -t server -l $SK_PATH/$FSNAME.key || true"
1063 do_nodes $(comma_list $(all_server_nodes)) \
1064 "keyctl show | grep lustre | cut -c1-11 |
1066 xargs -IX keyctl setperm X 0x3f3f3f3f"
1069 if $GSS_SK && $SK_NO_KEY; then
1070 local numclients=${1:-$CLIENTCOUNT}
1071 local clients=${CLIENTS:-$HOSTNAME}
1073 # security ctx config for keyring
1075 local lgssc_conf_file="/etc/request-key.d/lgssc.conf"
1077 if from_build_tree; then
1078 mkdir -p $SK_OM_PATH
1079 if grep -q request-key /proc/mounts > /dev/null; then
1080 echo "SSK: Request key already mounted."
1082 mount -o bind $SK_OM_PATH /etc/request-key.d/
1084 local lgssc_conf_line='create lgssc * * '
1085 lgssc_conf_line+=$(which lgss_keyring)
1086 lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S'
1087 echo "$lgssc_conf_line" > $lgssc_conf_file
1090 [ -e $lgssc_conf_file ] ||
1091 error_exit "Could not find key options in $lgssc_conf_file"
1092 echo "$lgssc_conf_file content is:"
1093 cat $lgssc_conf_file
1095 if ! local_mode; then
1096 if from_build_tree; then
1097 do_nodes $(comma_list $(all_nodes)) "mkdir -p \
1099 do_nodes $(comma_list $(all_nodes)) "mount \
1100 -o bind $SK_OM_PATH \
1101 /etc/request-key.d/"
1102 do_nodes $(comma_list $(all_nodes)) "rsync \
1103 -aqv $HOSTNAME:$lgssc_conf_file \
1104 $lgssc_conf_file >/dev/null 2>&1"
1106 do_nodes $(comma_list $(all_nodes)) \
1107 "echo $lgssc_conf_file: ; \
1108 cat $lgssc_conf_file"
1112 # create shared key on all nodes
1113 mkdir -p $SK_PATH/nodemap
1114 rm -f $SK_PATH/$FSNAME.key $SK_PATH/nodemap/c*.key \
1115 $SK_PATH/$FSNAME-*.key
1116 # for nodemap testing each client may need own key,
1117 # and S2S now requires keys as well, both for "client"
1120 lgss_sk -t server -f$FSNAME -n $SK_S2SNMCLI \
1121 -w $SK_PATH/$FSNAME-nmclient.key \
1122 -d /dev/urandom >/dev/null 2>&1
1123 lgss_sk -t mgs,server -f$FSNAME -n $SK_S2SNM \
1124 -w $SK_PATH/$FSNAME-s2s-server.key \
1125 -d /dev/urandom >/dev/null 2>&1
1128 lgss_sk -t server -f$FSNAME -w $SK_PATH/$FSNAME.key \
1129 -d /dev/urandom >/dev/null 2>&1
1131 for i in $(seq 0 $((numclients - 1))); do
1132 lgss_sk -t server -f$FSNAME -n c$i \
1133 -w $SK_PATH/nodemap/c$i.key -d /dev/urandom \
1137 if ! local_mode; then
1138 for lnode in $(all_nodes); do
1139 scp -r $SK_PATH ${lnode}:$(dirname $SK_PATH)/
1142 # Set client keys to client type to generate prime P
1144 do_nodes $(all_nodes) "lgss_sk -t client,server -m \
1145 $SK_PATH/$FSNAME.key >/dev/null 2>&1"
1147 do_nodes $clients "lgss_sk -t client -m \
1148 $SK_PATH/$FSNAME.key >/dev/null 2>&1"
1149 do_nodes $clients "find $SK_PATH/nodemap -name \*.key | \
1150 xargs -IX lgss_sk -t client -m X >/dev/null 2>&1"
1152 # This is required for servers as well, if S2S in use
1154 do_nodes $(comma_list $(mdts_nodes)) \
1155 "cp $SK_PATH/$FSNAME-s2s-server.key \
1156 $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \
1157 -t client -m $SK_PATH/$FSNAME-s2s-client.key \
1159 do_nodes $(comma_list $(osts_nodes)) \
1160 "cp $SK_PATH/$FSNAME-s2s-server.key \
1161 $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \
1162 -t client -m $SK_PATH/$FSNAME-s2s-client.key \
1164 do_nodes $clients "lgss_sk -t client \
1165 -m $SK_PATH/$FSNAME-nmclient.key \
1170 # mount options for servers and clients
1171 MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS)
1172 MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS)
1173 OST_MOUNT_OPTS=$(add_sk_mntflag $OST_MOUNT_OPTS)
1174 MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS)
1176 if [ -z "$LGSS_KEYRING_DEBUG" ]; then
1177 LGSS_KEYRING_DEBUG=4
1181 if [ -n "$LGSS_KEYRING_DEBUG" ] && \
1182 ( local_mode || from_build_tree ); then
1184 sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG
1185 elif [ -n "$LGSS_KEYRING_DEBUG" ]; then
1186 do_nodes $(comma_list $(all_nodes)) "modprobe ptlrpc_gss && \
1188 sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG"
1195 # maybe cleanup credential cache?
1202 do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNM"
1203 do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNMCLI"
1204 $RPC_MODE || echo "Sleeping for 10 sec for Nodemap.."
1208 $RPC_MODE || echo "Cleaning up Shared Key.."
1209 do_nodes $(comma_list $(all_nodes)) "rm -f \
1210 $SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key"
1211 do_nodes $(comma_list $(all_nodes)) "keyctl show | \
1212 awk '/lustre/ { print \\\$1 }' | xargs -IX keyctl unlink X"
1213 if from_build_tree; then
1214 # Remove the mount and clean up the files we added to
1216 do_nodes $(comma_list $(all_nodes)) "while grep -q \
1217 request-key.d /proc/mounts; do umount \
1218 /etc/request-key.d/; done"
1219 do_nodes $(comma_list $(all_nodes)) "rm -f \
1220 $SK_OM_PATH/lgssc.conf"
1221 do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH"
1229 local var=${facet}_svc
1237 echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9_]\+//' |
1238 tr '[:lower:]' '[:upper:]'
1244 if [ $facet == mgs ] || [ $facet == client ]; then
1248 echo -n $facet | sed -e 's/^fs[0-9]\+//' | sed -e 's/^[a-z]\+//'
1256 if [ -n "${!var}" ]; then
1261 var=$(facet_type $facet)FSTYPE
1262 if [ -n "${!var}" ]; then
1267 if [ -n "$FSTYPE" ]; then
1272 if [[ $facet == mgs ]] && combined_mgs_mds; then
1284 local facets=$(get_facets)
1287 for facet in ${facets//,/ }; do
1288 if [ $node == $(facet_host $facet) ] ||
1289 [ $node == "$(facet_failover_host $facet)" ]; then
1290 fstype=$(facet_fstype $facet)
1291 if [[ $fstypes != *$fstype* ]]; then
1292 fstypes+="${fstypes:+,}$fstype"
1301 local num=$(facet_number $facet)
1304 if [[ $(facet_type $facet) = OST ]]; then
1305 index=OSTINDEX${num}
1306 if [[ -n "${!index}" ]]; then
1311 index=${OST_INDICES[num - 1]}
1314 [[ -n "$index" ]] || index=$((num - 1))
1322 local fstype=$(facet_fstype $facet)
1326 label=$(do_facet ${facet} "$E2LABEL ${dev} 2>/dev/null");;
1328 label=$(do_facet ${facet} "$ZFS get -H -o value lustre:svname \
1329 ${dev} 2>/dev/null");;
1331 error "unknown fstype!";;
1338 # Get the device of a facet.
1345 mgs) device=$(mgsdevname) ;;
1346 mds*) device=$(mdsdevname $(facet_number $facet)) ;;
1347 ost*) device=$(ostdevname $(facet_number $facet)) ;;
1348 fs2mds) device=$(mdsdevname 1_2) ;;
1349 fs2ost) device=$(ostdevname 1_2) ;;
1350 fs3ost) device=$(ostdevname 2_2) ;;
1358 # Get the virtual device of a facet.
1365 mgs) device=$(mgsvdevname) ;;
1366 mds*) device=$(mdsvdevname $(facet_number $facet)) ;;
1367 ost*) device=$(ostvdevname $(facet_number $facet)) ;;
1368 fs2mds) device=$(mdsvdevname 1_2) ;;
1369 fs2ost) device=$(ostvdevname 1_2) ;;
1370 fs3ost) device=$(ostvdevname 2_2) ;;
1378 local virt=$(virt-what 2> /dev/null)
1380 [ $? -eq 0 ] && [ -n "$virt" ] && { echo $virt; return; }
1382 virt=$(dmidecode -s system-product-name | awk '{print $1}')
1385 VMware|KVM|VirtualBox|Parallels|Bochs)
1386 echo $virt | tr '[A-Z]' '[a-z]' ;;
1392 # Re-read the partition table on failover partner host.
1393 # After a ZFS storage pool is created on a shared device, the partition table
1394 # on the device may change. However, the operating system on the failover
1395 # host may not notice the change automatically. Without the up-to-date partition
1396 # block devices, 'zpool import ..' cannot find the labels, whose positions are
1397 # relative to partition rather than disk beginnings.
1399 # This function performs partprobe on the failover host to make it re-read the
1402 refresh_partition_table() {
1407 host=$(facet_passive_host $facet)
1408 if [[ -n "$host" ]]; then
1409 do_node $host "$PARTPROBE $device"
1414 # Get ZFS storage pool name.
1421 device=$(facet_device $facet)
1422 # poolname is string before "/"
1423 poolname="${device%%/*}"
1430 # Get ZFS local fsname.
1432 zfs_local_fsname() {
1434 local lfsname=$(basename $(facet_device $facet))
1440 # Create ZFS storage pool.
1447 local opts=${@:-"-o cachefile=none"}
1449 do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
1450 $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1451 $ZPOOL create -f $opts $poolname $vdev"
1455 # Create ZFS file system.
1461 local opts=${@:-"-o mountpoint=legacy"}
1463 do_facet $facet "$ZFS list -H $dataset >/dev/null 2>&1 ||
1464 $ZFS create $opts $dataset"
1468 # Export ZFS storage pool.
1469 # Before exporting the pool, all datasets within the pool should be unmounted.
1477 poolname=$(zpool_name $facet)
1479 if [[ -n "$poolname" ]]; then
1480 do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1481 grep -q ^$poolname/ /proc/mounts ||
1482 $ZPOOL export $opts $poolname"
1487 # Destroy ZFS storage pool.
1488 # Destroy the given pool and free up any devices for other use. This command
1489 # tries to unmount any active datasets before destroying the pool.
1490 # -f Force any active datasets contained within the pool to be unmounted.
1494 local poolname=${2:-$(zpool_name $facet)}
1496 if [[ -n "$poolname" ]]; then
1497 do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1498 $ZPOOL destroy -f $poolname"
1503 # Import ZFS storage pool.
1504 # Force importing, even if the pool appears to be potentially active.
1509 local opts=${@:-"-o cachefile=none -o failmode=panic"}
1512 poolname=$(zpool_name $facet)
1514 if [[ -n "$poolname" ]]; then
1515 opts+=" -d $(dirname $(facet_vdevice $facet))"
1516 do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs;
1517 $ZPOOL list -H $poolname >/dev/null 2>&1 ||
1518 $ZPOOL import -f $opts $poolname"
1523 # Reimport ZFS storage pool with new name
1528 local opts="-o cachefile=none"
1529 local poolname=$(zpool_name $facet)
1531 opts+=" -d $(dirname $(facet_vdevice $facet))"
1532 do_facet $facet "$ZPOOL export $poolname;
1533 $ZPOOL import $opts $poolname $newpool"
1537 # Set the "cachefile=none" property on ZFS storage pool so that the pool
1538 # is not automatically imported on system startup.
1540 # In a failover environment, this will provide resource level fencing which
1541 # will ensure that the same ZFS storage pool will not be imported concurrently
1542 # on different nodes.
1544 disable_zpool_cache() {
1548 poolname=$(zpool_name $facet)
1550 if [[ -n "$poolname" ]]; then
1551 do_facet $facet "$ZPOOL set cachefile=none $poolname"
1556 # This and set_osd_param() shall be used to access OSD parameters
1557 # once existed under "obdfilter":
1562 # writethrough_cache_enable
1566 local device=${2:-$FSNAME-OST*}
1569 do_nodes $nodes "$LCTL get_param -n osd-*.$device.$name"
1574 local device=${2:-$FSNAME-OST*}
1578 do_nodes $nodes "$LCTL set_param -n osd-*.$device.$name=$value"
1582 local dz=${1:-$DEBUG_SIZE}
1584 if [ -f /sys/devices/system/cpu/possible ]; then
1585 local cpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/possible)+1))
1587 local cpus=$(getconf _NPROCESSORS_CONF 2>/dev/null)
1590 # bug 19944, adjust size to be -gt num_possible_cpus()
1591 # promise 2MB for every cpu at least
1592 if [ -n "$cpus" ] && [ $((cpus * 2)) -gt $dz ]; then
1595 lctl set_param debug_mb=$dz
1598 set_default_debug () {
1599 local debug=${1:-"$PTLDEBUG"}
1600 local subsys=${2:-"$SUBSYSTEM"}
1601 local debug_size=${3:-$DEBUG_SIZE}
1603 [ -n "$debug" ] && lctl set_param debug="$debug" >/dev/null
1604 [ -n "$subsys" ] && lctl set_param subsystem_debug="${subsys# }" >/dev/null
1606 [ -n "$debug_size" ] && set_debug_size $debug_size > /dev/null
1609 set_default_debug_nodes () {
1611 local debug="${2:-"$PTLDEBUG"}"
1612 local subsys="${3:-"$SUBSYSTEM"}"
1613 local debug_size="${4:-$DEBUG_SIZE}"
1615 if [[ ,$nodes, = *,$HOSTNAME,* ]]; then
1616 nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME")
1620 [[ -z "$nodes" ]] ||
1621 do_rpc_nodes "$nodes" set_default_debug \
1622 \\\"$debug\\\" \\\"$subsys\\\" $debug_size || true
1625 set_default_debug_facet () {
1627 local debug="${2:-"$PTLDEBUG"}"
1628 local subsys="${3:-"$SUBSYSTEM"}"
1629 local debug_size="${4:-$DEBUG_SIZE}"
1630 local node=$(facet_active_host $facet)
1632 [ -n "$node" ] || error "No host defined for facet $facet"
1634 set_default_debug_nodes $node "$debug" "$subsys" $debug_size
1637 set_params_nodes () {
1638 [[ $# -ge 2 ]] || return 0
1642 do_nodes $nodes $LCTL set_param $@
1645 set_params_clients () {
1646 local clients=${1:-$CLIENTS}
1647 local params=${2:-$CLIENT_LCTL_SETPARAM_PARAM}
1649 [[ -n $params ]] || return 0
1650 set_params_nodes $clients $params
1654 local hostid=${1:-$(hostid)}
1656 if [ ! -s /etc/hostid ]; then
1657 printf $(echo -n $hostid |
1658 sed 's/\(..\)\(..\)\(..\)\(..\)/\\x\4\\x\3\\x\2\\x\1/') >/etc/hostid
1664 local facets=${1:-$(get_facets)}
1667 for facet in ${facets//,/ }; do
1670 [ $RC -eq 0 ] && continue
1672 if [ "$TESTSUITE.$TESTNAME" = "replay-dual.test_0a" ]; then
1673 skip_noexit "Restart of $facet failed!." &&
1676 error "Restart of $facet failed!"
1683 # Add argument "arg" (e.g., "loop") to the comma-separated list
1684 # of arguments for option "opt" (e.g., "-o") on command
1685 # line "opts" (e.g., "-o flock").
1691 local opt_pattern="\([[:space:]]\+\|^\)$opt"
1693 if echo "$opts" | grep -q $opt_pattern; then
1694 opts=$(echo "$opts" | sed -e \
1695 "s/$opt_pattern[[:space:]]*[^[:space:]]\+/&,$arg/")
1697 opts+="${opts:+ }$opt $arg"
1703 # Associate loop device with a given regular file.
1704 # Return the loop device.
1706 setup_loop_device() {
1710 do_facet $facet "loop_dev=\\\$($LOSETUP -j $file | cut -d : -f 1);
1711 if [[ -z \\\$loop_dev ]]; then
1712 loop_dev=\\\$($LOSETUP -f);
1713 $LOSETUP \\\$loop_dev $file || loop_dev=;
1715 echo -n \\\$loop_dev"
1719 # Detach a loop device.
1721 cleanup_loop_device() {
1725 do_facet $facet "! $LOSETUP $loop_dev >/dev/null 2>&1 ||
1726 $LOSETUP -d $loop_dev"
1730 # Check if a given device is a block device.
1737 [[ -n "$dev" ]] || return 1
1738 do_facet $facet "test -b $dev" || return 1
1739 if [[ -n "$size" ]]; then
1740 local in=$(do_facet $facet "dd if=$dev of=/dev/null bs=1k \
1741 count=1 skip=$size 2>&1" |
1742 awk '($3 == "in") { print $1 }')
1743 [[ "$in" = "1+0" ]] || return 1
1748 # Check if a given device is a device-mapper device.
1754 [[ -n "$dev" ]] || return 1
1755 do_facet $facet "$DMSETUP status $dev >/dev/null 2>&1"
1759 # Check if a given device is a device-mapper flakey device.
1761 is_dm_flakey_dev() {
1766 [[ -n "$dev" ]] || return 1
1768 type=$(do_facet $facet "$DMSETUP status $dev 2>&1" |
1770 [[ $type = flakey ]] && return 0 || return 1
1774 # Check if device-mapper flakey device is supported by the kernel
1775 # of $facet node or not.
1777 dm_flakey_supported() {
1781 do_facet $facet "modprobe dm-flakey;
1782 $DMSETUP targets | grep -q flakey" &> /dev/null
1786 # Get the device-mapper flakey device name of a given facet.
1788 dm_facet_devname() {
1790 [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
1792 echo -n ${facet}_flakey
1796 # Get the device-mapper flakey device of a given facet.
1797 # A device created by dmsetup will appear as /dev/mapper/<device-name>.
1799 dm_facet_devpath() {
1802 echo -n $DM_DEV_PATH/$(dm_facet_devname $facet)
1806 # Set a device-mapper device with a new table.
1808 # The table has the following format:
1809 # <logical_start_sector> <num_sectors> <target_type> <target_args>
1811 # flakey <target_args> includes:
1812 # <destination_device> <offset> <up_interval> <down_interval> \
1813 # [<num_features> [<feature_arguments>]]
1815 # linear <target_args> includes:
1816 # <destination_device> <start_sector>
1818 dm_set_dev_table() {
1821 local target_type=$3
1827 read tmp num_sectors tmp real_dev tmp \
1828 <<< $(do_facet $facet "$DMSETUP table $dm_dev")
1830 case $target_type in
1832 table="0 $num_sectors flakey $real_dev 0 0 1800 1 drop_writes"
1835 table="0 $num_sectors linear $real_dev 0"
1837 *) error "invalid target type $target_type" ;;
1840 do_facet $facet "$DMSETUP suspend --nolockfs --noflush $dm_dev" ||
1841 error "failed to suspend $dm_dev"
1842 do_facet $facet "$DMSETUP load $dm_dev --table \\\"$table\\\"" ||
1843 error "failed to load $target_type table into $dm_dev"
1844 do_facet $facet "$DMSETUP resume $dm_dev" ||
1845 error "failed to resume $dm_dev"
1849 # Set a device-mapper flakey device as "read-only" by using the "drop_writes"
1850 # feature parameter.
1853 # All write I/O is silently ignored.
1854 # Read I/O is handled correctly.
1856 dm_set_dev_readonly() {
1858 local dm_dev=${2:-$(dm_facet_devpath $facet)}
1860 dm_set_dev_table $facet $dm_dev flakey
1864 # Set a device-mapper device to traditional linear mapping mode.
1866 dm_clear_dev_readonly() {
1868 local dm_dev=${2:-$(dm_facet_devpath $facet)}
1870 dm_set_dev_table $facet $dm_dev linear
1874 # Set the device of a given facet as "read-only".
1876 set_dev_readonly() {
1878 local svc=${facet}_svc
1880 if [[ $(facet_fstype $facet) = zfs ]] ||
1881 ! dm_flakey_supported $facet; then
1882 do_facet $facet $LCTL --device ${!svc} readonly
1884 dm_set_dev_readonly $facet
1889 # Get size in 512-byte sectors (BLKGETSIZE64 / 512) of a given device.
1896 num_sectors=$(do_facet $facet "blockdev --getsz $dev 2>/dev/null")
1897 [[ ${PIPESTATUS[0]} = 0 && -n "$num_sectors" ]] || num_sectors=0
1898 echo -n $num_sectors
1902 # Create a device-mapper device with a given block device or regular file (will
1903 # be associated with loop device).
1904 # Return the full path of the device-mapper device.
1908 local real_dev=$2 # destination device
1909 local dm_dev_name=${3:-$(dm_facet_devname $facet)} # device name
1910 local dm_dev=$DM_DEV_PATH/$dm_dev_name # device-mapper device
1912 # check if the device-mapper device to be created already exists
1913 if is_dm_dev $facet $dm_dev; then
1914 # if the existing device was set to "read-only", then clear it
1915 ! is_dm_flakey_dev $facet $dm_dev ||
1916 dm_clear_dev_readonly $facet $dm_dev
1922 # check if the destination device is a block device, and if not,
1923 # associate it with a loop device
1924 is_blkdev $facet $real_dev ||
1925 real_dev=$(setup_loop_device $facet $real_dev)
1926 [[ -n "$real_dev" ]] || { echo -n $real_dev; return 2; }
1928 # now create the device-mapper device
1929 local num_sectors=$(get_num_sectors $facet $real_dev)
1930 local table="0 $num_sectors linear $real_dev 0"
1933 do_facet $facet "$DMSETUP create $dm_dev_name --table \\\"$table\\\"" ||
1934 { rc=${PIPESTATUS[0]}; dm_dev=; }
1935 do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
1942 # Map the facet name to its device variable name.
1944 facet_device_alias() {
1946 local dev_alias=$facet
1949 fs2mds) dev_alias=mds1_2 ;;
1950 fs2ost) dev_alias=ost1_2 ;;
1951 fs3ost) dev_alias=ost2_2 ;;
1959 # Save the original value of the facet device and export the new value.
1965 local active_facet=$(facet_active $facet)
1966 local dev_alias=$(facet_device_alias $active_facet)
1967 local dev_name=${dev_alias}_dev
1968 local dev=${!dev_name}
1970 if [[ $active_facet = $facet ]]; then
1971 local failover_dev=${dev_alias}failover_dev
1972 if [[ ${!failover_dev} = $dev ]]; then
1973 eval export ${failover_dev}_saved=$dev
1974 eval export ${failover_dev}=$dm_dev
1977 dev_alias=$(facet_device_alias $facet)
1978 local facet_dev=${dev_alias}_dev
1979 if [[ ${!facet_dev} = $dev ]]; then
1980 eval export ${facet_dev}_saved=$dev
1981 eval export ${facet_dev}=$dm_dev
1985 eval export ${dev_name}_saved=$dev
1986 eval export ${dev_name}=$dm_dev
1990 # Restore the saved value of the facet device.
1995 [[ $facet = mgs ]] && combined_mgs_mds && facet=mds1
1996 local dev_alias=$(facet_device_alias $facet)
1998 local saved_dev=${dev_alias}_dev_saved
1999 [[ -z ${!saved_dev} ]] ||
2000 eval export ${dev_alias}_dev=${!saved_dev}
2002 saved_dev=${dev_alias}failover_dev_saved
2003 [[ -z ${!saved_dev} ]] ||
2004 eval export ${dev_alias}failover_dev=${!saved_dev}
2008 # Remove a device-mapper device.
2009 # If the destination device is a loop device, then also detach it.
2013 local dm_dev=${2:-$(dm_facet_devpath $facet)}
2017 is_dm_dev $facet $dm_dev || return 0
2019 read major minor <<< $(do_facet $facet "$DMSETUP table $dm_dev" |
2020 awk '{ print $4 }' | awk -F: '{ print $1" "$2 }')
2022 do_facet $facet "$DMSETUP remove $dm_dev"
2023 do_facet $facet "$DMSETUP mknodes >/dev/null 2>&1"
2025 unexport_dm_dev $facet
2027 # detach a loop device
2028 [[ $major -ne 7 ]] || cleanup_loop_device $facet /dev/loop$minor
2030 # unload dm-flakey module
2031 do_facet $facet "modprobe -r dm-flakey" || true
2037 local active_facet=$(facet_active $facet)
2038 local dev_alias=$(facet_device_alias $active_facet)
2039 local dev=${dev_alias}_dev
2040 local opt=${facet}_opt
2041 local mntpt=$(facet_mntpt $facet)
2042 local opts="${!opt} $@"
2043 local fstype=$(facet_fstype $facet)
2045 local dm_dev=${!dev}
2047 module_loaded lustre || load_modules
2051 if dm_flakey_supported $facet; then
2052 dm_dev=$(dm_create_dev $facet ${!dev})
2053 [[ -n "$dm_dev" ]] || dm_dev=${!dev}
2056 is_blkdev $facet $dm_dev || opts=$(csa_add "$opts" -o loop)
2058 devicelabel=$(do_facet ${facet} "$E2LABEL $dm_dev");;
2060 # import ZFS storage pool
2061 import_zpool $facet || return ${PIPESTATUS[0]}
2063 devicelabel=$(do_facet ${facet} "$ZFS get -H -o value \
2064 lustre:svname $dm_dev");;
2066 error "unknown fstype!";;
2069 echo "Starting ${facet}: $opts $dm_dev $mntpt"
2070 # for testing LU-482 error handling in mount_facets() and test_0a()
2071 if [ -f $TMP/test-lu482-trigger ]; then
2075 "mkdir -p $mntpt; $MOUNT_CMD $opts $dm_dev $mntpt"
2079 if [ $RC -ne 0 ]; then
2080 echo "Start of $dm_dev on ${facet} failed ${RC}"
2084 health=$(do_facet ${facet} "$LCTL get_param -n health_check")
2085 if [[ "$health" != "healthy" ]]; then
2086 error "$facet is in a unhealthy state"
2089 set_default_debug_facet $facet
2091 if [[ $opts =~ .*nosvc.* ]]; then
2092 echo "Start $dm_dev without service"
2097 wait_update_facet ${facet} "$E2LABEL $dm_dev \
2098 2>/dev/null | grep -E ':[a-zA-Z]{3}[0-9]{4}'" \
2099 "" || error "$dm_dev failed to initialize!";;
2101 wait_update_facet ${facet} "$ZFS get -H -o value \
2102 lustre:svname $dm_dev 2>/dev/null | \
2103 grep -E ':[a-zA-Z]{3}[0-9]{4}'" "" ||
2104 error "$dm_dev failed to initialize!";;
2107 error "unknown fstype!";;
2111 # commit the device label change to disk
2112 if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then
2113 echo "Commit the device label on ${!dev}"
2114 do_facet $facet "sync; sleep 1; sync"
2118 label=$(devicelabel ${facet} $dm_dev)
2119 [ -z "$label" ] && echo no label for $dm_dev && exit 1
2120 eval export ${facet}_svc=${label}
2121 echo Started ${label}
2123 export_dm_dev $facet $dm_dev
2128 # start facet device options
2134 local dev_alias=$(facet_device_alias $facet)
2136 eval export ${dev_alias}_dev=${device}
2137 eval export ${facet}_opt=\"$@\"
2139 local varname=${dev_alias}failover_dev
2140 if [ -n "${!varname}" ] ; then
2141 eval export ${dev_alias}failover_dev=${!varname}
2143 eval export ${dev_alias}failover_dev=$device
2146 local mntpt=$(facet_mntpt $facet)
2147 do_facet ${facet} mkdir -p $mntpt
2148 eval export ${facet}_MOUNT=$mntpt
2149 mount_facet ${facet}
2159 local HOST=$(facet_active_host $facet)
2160 [[ -z $HOST ]] && echo stop: no host for $facet && return 0
2162 local mntpt=$(facet_mntpt $facet)
2163 running=$(do_facet ${facet} "grep -c $mntpt' ' /proc/mounts || true")
2164 if [ ${running} -ne 0 ]; then
2165 echo "Stopping $mntpt (opts:$@) on $HOST"
2166 do_facet ${facet} $UMOUNT $@ $mntpt
2169 # umount should block, but we should wait for unrelated obd's
2170 # like the MGS or MGC to also stop.
2171 wait_exit_ST ${facet} || return ${PIPESTATUS[0]}
2173 if [[ $(facet_fstype $facet) == zfs ]]; then
2174 # export ZFS storage pool
2175 [ "$KEEP_ZPOOL" = "true" ] || export_zpool $facet
2176 elif dm_flakey_supported $facet; then
2177 local host=${facet}_HOST
2178 local failover_host=${facet}failover_HOST
2179 if [[ -n ${!failover_host} && ${!failover_host} != ${!host} ]]||
2180 $CLEANUP_DM_DEV || [[ $facet = fs* ]]; then
2181 dm_cleanup_dev $facet
2186 # get mdt quota type
2188 local varsvc=${SINGLEMDS}_svc
2189 do_facet $SINGLEMDS $LCTL get_param -n \
2190 osd-$(facet_fstype $SINGLEMDS).${!varsvc}.quota_slave.enabled
2193 # get ost quota type
2195 # All OSTs should have same quota type
2196 local varsvc=ost1_svc
2197 do_facet ost1 $LCTL get_param -n \
2198 osd-$(facet_fstype ost1).${!varsvc}.quota_slave.enabled
2201 # restore old quota type settings
2203 if [ "$old_MDT_QUOTA_TYPE" ]; then
2204 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2205 do_facet mgs $PERM_CMD \
2206 osd-*.$FSNAME-MDT*.quota_slave.enabled = \
2209 do_facet mgs $PERM_CMD \
2210 $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE
2213 if [ "$old_OST_QUOTA_TYPE" ]; then
2214 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2215 do_facet mgs $PERM_CMD \
2216 osd-*.$FSNAME-OST*.quota_slave.enabled = \
2219 do_facet mgs $LCTL conf_param \
2220 $FSNAME.quota.ost=$old_OST_QUOTA_TYPE
2225 # Handle the case when there is a space in the lfs df
2226 # "filesystem summary" line the same as when there is no space.
2227 # This will allow fixing the "lfs df" summary line in the future.
2229 $LFS df $* | sed -e 's/filesystem /filesystem_/'
2230 check_lfs_df_ret_val $?
2233 # Get free inodes on the MDT specified by mdt index, free indoes on
2234 # the whole filesystem will be returned when index == -1.
2240 if [ $index -eq -1 ]; then
2243 mdt_uuid=$(mdtuuid_from_index $index)
2246 free_inodes=$(lfs_df -i $MOUNT | grep $mdt_uuid | awk '{print $4}')
2251 # Get the OST device status from 'lfs df' with a given OST index.
2255 local mnt_pnt=${2:-$MOUNT}
2259 ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt)
2260 lfs_df $opts $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }'
2266 # save old quota type & set new quota type
2267 local mdt_qtype=$(mdt_quota_type)
2268 local ost_qtype=$(ost_quota_type)
2270 echo "[HOST:$HOSTNAME] [old_mdt_qtype:$mdt_qtype]" \
2271 "[old_ost_qtype:$ost_qtype] [new_qtype:$QUOTA_TYPE]"
2273 export old_MDT_QUOTA_TYPE=$mdt_qtype
2274 export old_OST_QUOTA_TYPE=$ost_qtype
2276 if [[ $PERM_CMD == *"set_param -P"* ]]; then
2277 do_facet mgs $PERM_CMD \
2278 osd-*.$FSNAME-MDT*.quota_slave.enabled=$QUOTA_TYPE
2279 do_facet mgs $PERM_CMD \
2280 osd-*.$FSNAME-OST*.quota_slave.enabled=$QUOTA_TYPE
2282 do_facet mgs $PERM_CMD $FSNAME.quota.mdt=$QUOTA_TYPE ||
2283 error "set mdt quota type failed"
2284 do_facet mgs $PERM_CMD $FSNAME.quota.ost=$QUOTA_TYPE ||
2285 error "set ost quota type failed"
2288 local quota_usrs=$QUOTA_USERS
2290 # get_filesystem_size
2291 local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}')
2292 local blk_soft=$((disksz + 1024))
2293 local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
2295 local inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}')
2296 local i_soft=$inodes
2297 local i_hard=$((i_soft + i_soft / 20))
2299 echo "Total disk size: $disksz block-softlimit: $blk_soft" \
2300 "block-hardlimit: $blk_hard inode-softlimit: $i_soft" \
2301 "inode-hardlimit: $i_hard"
2304 for usr in $quota_usrs; do
2305 echo "Setting up quota on $HOSTNAME:$mntpt for $usr..."
2307 cmd="$LFS setquota -$type $usr -b $blk_soft"
2308 cmd="$cmd -B $blk_hard -i $i_soft -I $i_hard $mntpt"
2310 eval $cmd || error "$cmd FAILED!"
2312 # display the quota status
2313 echo "Quota settings for $usr : "
2314 $LFS quota -v -u $usr $mntpt || true
2321 local opts=${3:-$MOUNT_OPTS}
2322 opts=${opts:+-o $opts}
2323 local flags=${4:-$MOUNT_FLAGS}
2325 local device=$MGSNID:/$FSNAME$FILESET
2326 if [ -z "$mnt" -o -z "$FSNAME" ]; then
2327 echo "Bad mount command: opt=$flags $opts dev=$device " \
2333 # update mount option with skpath
2334 opts=$(add_sk_mntflag $opts)
2337 echo "Starting client: $client: $flags $opts $device $mnt"
2338 do_node $client mkdir -p $mnt
2339 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ];then
2340 do_node $client $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \
2342 #disable FILESET if not supported
2343 do_nodes $client lctl get_param -n \
2344 mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
2345 device=$MGSNID:/$FSNAME
2346 do_node $client mkdir -p $mnt/$FILESET
2347 do_node $client "! grep -q $mnt' ' /proc/mounts ||
2350 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2351 # Mount using nodemap key
2352 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2353 if $SK_UNIQUE_NM; then
2354 mountkey=$SK_PATH/nodemap/c0.key
2356 local prunedopts=$(echo $opts |
2357 sed -e "s#skpath=[^,^ ]*#skpath=$mountkey#g")
2358 do_node $client $MOUNT_CMD $flags $prunedopts $device $mnt ||
2361 do_node $client $MOUNT_CMD $flags $opts $device $mnt ||
2365 set_default_debug_nodes $client
2366 set_params_clients $client
2377 local running=$(do_node $client "grep -c $mnt' ' /proc/mounts") || true
2379 [ "$3" ] && force=-f
2380 [ $running -eq 0 ] && return 0
2382 echo "Stopping client $client $mnt (opts:$force)"
2383 do_node $client lsof -t $mnt || need_kill=no
2384 if [ "x$force" != "x" ] && [ "x$need_kill" != "xno" ]; then
2385 pids=$(do_node $client lsof -t $mnt | sort -u);
2386 if [ -n "$pids" ]; then
2387 do_node $client kill -9 $pids || true
2391 busy=$(do_node $client "umount $force $mnt 2>&1" | grep -c "busy") ||
2393 if [ $busy -ne 0 ] ; then
2394 echo "$mnt is still busy, wait one second" && sleep 1
2395 do_node $client umount $force $mnt
2399 # Mount the file system on the MDS
2400 mount_mds_client() {
2401 local mds_HOST=${SINGLEMDS}_HOST
2403 zconf_mount $mds1_HOST $MOUNT2 $MOUNT_OPTS ||
2404 error "unable to mount $MOUNT2 on MDS"
2407 # Unmount the file system on the MDS
2408 umount_mds_client() {
2409 local mds_HOST=${SINGLEMDS}_HOST
2410 zconf_umount $mds1_HOST $MOUNT2
2411 do_facet $SINGLEMDS "rmdir $MOUNT2"
2414 # nodes is comma list
2415 sanity_mount_check_nodes () {
2421 # FIXME: assume that all cluster nodes run the same os
2422 [ "$(uname)" = Linux ] || return 0
2425 for mnt in $mnts ; do
2426 do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts);
2427 mpts=\\\$(mount | grep -c $mnt' ');
2428 if [ \\\$running -ne \\\$mpts ]; then
2429 echo \\\$(hostname) env are INSANE!;
2432 [ $? -eq 0 ] || rc=1
2437 sanity_mount_check_servers () {
2438 [ -n "$CLIENTONLY" ] &&
2439 { echo "CLIENTONLY mode, skip mount_check_servers"; return 0; } || true
2440 echo Checking servers environments
2442 # FIXME: modify get_facets to display all facets wo params
2443 local facets="$(get_facets OST),$(get_facets MDS),mgs"
2447 for facet in ${facets//,/ }; do
2448 node=$(facet_host ${facet})
2449 mntpt=$(facet_mntpt $facet)
2450 sanity_mount_check_nodes $node $mntpt ||
2451 { error "server $node environments are insane!"; return 1; }
2455 sanity_mount_check_clients () {
2456 local clients=${1:-$CLIENTS}
2457 local mntpt=${2:-$MOUNT}
2458 local mntpt2=${3:-$MOUNT2}
2460 [ -z $clients ] && clients=$(hostname)
2461 echo Checking clients $clients environments
2463 sanity_mount_check_nodes $clients $mntpt $mntpt2 ||
2464 error "clients environments are insane!"
2467 sanity_mount_check () {
2468 sanity_mount_check_servers || return 1
2469 sanity_mount_check_clients || return 2
2472 # mount clients if not mouted
2473 zconf_mount_clients() {
2476 local opts=${3:-$MOUNT_OPTS}
2477 opts=${opts:+-o $opts}
2478 local flags=${4:-$MOUNT_FLAGS}
2479 local device=$MGSNID:/$FSNAME$FILESET
2480 if [ -z "$mnt" -o -z "$FSNAME" ]; then
2481 echo "Bad conf mount command: opt=$flags $opts dev=$device " \
2486 echo "Starting client $clients: $flags $opts $device $mnt"
2487 do_nodes $clients mkdir -p $mnt
2488 if [ -n "$FILESET" -a -z "$SKIP_FILESET" ]; then
2489 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2490 # Mount with own nodemap key
2492 # Mount all server nodes first with per-NM keys
2493 for nmclient in ${clients//,/ }; do
2494 # do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key -n c$i"
2495 do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key"
2498 # set perms for per-nodemap keys else permission denied
2499 do_nodes $(comma_list $(all_nodes)) \
2500 "keyctl show | grep lustre | cut -c1-11 |
2502 xargs -IX keyctl setperm X 0x3f3f3f3f"
2503 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2505 for nmclient in ${clients//,/ }; do
2506 if $SK_UNIQUE_NM; then
2507 mountkey=$SK_PATH/nodemap/c$i.key
2509 do_node $nmclient "! grep -q $mnt' ' \
2510 /proc/mounts || umount $mnt"
2511 local prunedopts=$(add_sk_mntflag $prunedopts);
2512 prunedopts=$(echo $prunedopts | sed -e \
2513 "s#skpath=[^ ^,]*#skpath=$mountkey#g")
2515 do_nodes $(comma_list $(all_server_nodes)) \
2518 do_node $nmclient $MOUNT_CMD $flags \
2519 $prunedopts $MGSNID:/$FSNAME $mnt ||
2524 do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
2526 do_nodes $clients $MOUNT_CMD $flags $opts \
2527 $MGSNID:/$FSNAME $mnt || return 1
2529 #disable FILESET if not supported
2530 do_nodes $clients lctl get_param -n \
2531 mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
2532 device=$MGSNID:/$FSNAME
2533 do_nodes $clients mkdir -p $mnt/$FILESET
2534 do_nodes $clients "! grep -q $mnt' ' /proc/mounts ||
2538 if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then
2539 # Mount with nodemap key
2541 local mountkey=$SK_PATH/$FSNAME-nmclient.key
2542 for nmclient in ${clients//,/ }; do
2543 if $SK_UNIQUE_NM; then
2544 mountkey=$SK_PATH/nodemap/c$i.key
2546 local prunedopts=$(echo $opts | sed -e \
2547 "s#skpath=[^ ^,]*#skpath=$mountkey#g");
2548 do_node $nmclient "! grep -q $mnt' ' /proc/mounts ||
2551 running=\\\$(mount | grep -c $mnt' ');
2553 if [ \\\$running -eq 0 ] ; then
2555 $MOUNT_CMD $flags $prunedopts $device $mnt;
2558 lustre_mnt_count=\\\$(mount | grep $mnt' ' | \
2559 grep 'type lustre' | wc -l);
2560 if [ \\\$running -ne \\\$lustre_mnt_count ] ; then
2561 echo zconf_mount_clients FAILED: \
2562 mount count \\\$running, not matching \
2563 with mount count of 'type lustre' \
2564 \\\$lustre_mnt_count;
2568 exit \\\$rc" || return ${PIPESTATUS[0]}
2575 if $SHARED_KEY; then
2576 tmpopts=$(add_sk_mntflag $opts)
2579 running=\\\$(mount | grep -c $mnt' ');
2581 if [ \\\$running -eq 0 ] ; then
2583 $MOUNT_CMD $flags $tmpopts $device $mnt;
2586 exit \\\$rc" || return ${PIPESTATUS[0]}
2589 echo "Started clients $clients: "
2590 do_nodes $clients "mount | grep $mnt' '"
2592 set_default_debug_nodes $clients
2593 set_params_clients $clients
2598 zconf_umount_clients() {
2603 [ "$3" ] && force=-f
2605 echo "Stopping clients: $clients $mnt (opts:$force)"
2606 do_nodes $clients "running=\\\$(grep -c $mnt' ' /proc/mounts);
2607 if [ \\\$running -ne 0 ] ; then
2608 echo Stopping client \\\$(hostname) $mnt opts:$force;
2609 lsof $mnt || need_kill=no;
2610 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
2611 pids=\\\$(lsof -t $mnt | sort -u);
2612 if [ -n \\\"\\\$pids\\\" ]; then
2616 while umount $force $mnt 2>&1 | grep -q "busy"; do
2617 echo "$mnt is still busy, wait one second" && sleep 1;
2624 echo + $POWER_DOWN $node
2628 shutdown_node_hard () {
2630 local attempts=$SHUTDOWN_ATTEMPTS
2632 for i in $(seq $attempts) ; do
2635 wait_for_function --quiet "! ping -w 3 -c 1 $host" 5 1 && return 0
2636 echo "waiting for $host to fail attempts=$attempts"
2637 [ $i -lt $attempts ] || \
2638 { echo "$host still pingable after power down! attempts=$attempts" && return 1; }
2644 local mnt=${2:-$MOUNT}
2647 if [ "$FAILURE_MODE" = HARD ]; then
2648 shutdown_node_hard $client
2650 zconf_umount_clients $client $mnt -f
2656 local facets="$(get_facets OST),$(get_facets MDS)"
2659 combined_mgs_mds || facets="$facets,mgs"
2661 for facet in ${facets//,/ }; do
2662 if [ $(facet_active_host $facet) == $host ]; then
2663 affected="$affected $facet"
2667 echo $(comma_list $affected)
2672 local host=${2:-$(facet_host $facet)}
2674 local label=$(convert_facet2label $facet)
2675 do_node $host $LCTL dl | awk '{ print $4 }' | grep -q "^$label\$"
2678 facets_up_on_host () {
2680 local facets=$(facets_on_host $host)
2683 for facet in ${facets//,/ }; do
2684 if $(facet_up $facet $host); then
2685 affected_up="$affected_up $facet"
2689 echo $(comma_list $affected_up)
2694 local affected_facet
2695 local affected_facets
2697 if [[ "$FAILURE_MODE" = HARD ]]; then
2698 if [[ $(facet_fstype $facet) = ldiskfs ]] &&
2699 dm_flakey_supported $facet; then
2700 affected_facets=$(affected_facets $facet)
2701 for affected_facet in ${affected_facets//,/ }; do
2702 unexport_dm_dev $affected_facet
2706 shutdown_node_hard $(facet_active_host $facet)
2714 echo + $POWER_UP $node
2727 local node=$(facet_active_host $facet)
2729 if [ "$FAILURE_MODE" = HARD ]; then
2739 if [ "$FAILURE_MODE" = HARD ]; then
2742 if $LOAD_MODULES_REMOTE; then
2743 echo "loading modules on $node: $facet"
2744 do_rpc_nodes $node load_modules_local
2753 for facet in ${facets//,/ }; do
2754 hosts=$(expand_list $hosts $(facet_host $facet) )
2760 _check_progs_installed () {
2764 for prog in $progs; do
2765 if ! [ "$(which $prog)" -o "${!prog}" ]; then
2766 echo $prog missing on $(hostname)
2773 check_progs_installed () {
2777 do_rpc_nodes "$nodes" _check_progs_installed $@
2780 # recovery-scale functions
2782 echo __$(echo $1 | tr '-' '_' | tr '.' '_')
2785 start_client_load() {
2788 local var=$(node_var_name $client)_load
2789 eval export ${var}=$load
2791 do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \
2792 BREAK_ON_ERROR=$BREAK_ON_ERROR \
2793 END_RUN_FILE=$END_RUN_FILE \
2794 LOAD_PID_FILE=$LOAD_PID_FILE \
2795 TESTLOG_PREFIX=$TESTLOG_PREFIX \
2796 TESTNAME=$TESTNAME \
2797 DBENCH_LIB=$DBENCH_LIB \
2798 DBENCH_SRC=$DBENCH_SRC \
2799 CLIENT_COUNT=$((CLIENTCOUNT - 1)) \
2804 MPIRUN_OPTIONS=\\\"$MPIRUN_OPTIONS\\\" \
2805 MACHINEFILE_OPTION=\\\"$MACHINEFILE_OPTION\\\" \
2806 num_clients=$(get_node_count ${CLIENTS//,/ }) \
2807 ior_THREADS=$ior_THREADS ior_iteration=$ior_iteration \
2808 ior_blockSize=$ior_blockSize \
2809 ior_blockUnit=$ior_blockUnit \
2810 ior_xferSize=$ior_xferSize ior_type=$ior_type \
2811 ior_DURATION=$ior_DURATION \
2812 ior_stripe_params=\\\"$ior_stripe_params\\\" \
2813 ior_custom_params=\\\"$ior_custom_param\\\" \
2814 mpi_ior_custom_threads=$mpi_ior_custom_threads \
2817 log "Started client load: ${load} on $client"
2819 # get the children process IDs
2820 local pids=$(ps --ppid $ppid -o pid= | xargs)
2821 CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids"
2825 start_client_loads () {
2826 local -a clients=(${1//,/ })
2827 local numloads=${#CLIENT_LOADS[@]}
2829 for ((nodenum=0; nodenum < ${#clients[@]}; nodenum++ )); do
2830 local load=$((nodenum % numloads))
2831 start_client_load ${clients[nodenum]} ${CLIENT_LOADS[load]}
2833 # bug 22169: wait the background threads to start
2837 # only for remote client
2838 check_client_load () {
2840 local var=$(node_var_name $client)_load
2841 local testload=run_${!var}.sh
2843 ps auxww | grep -v grep | grep $client | grep -q $testload || return 1
2845 # bug 18914: try to connect several times not only when
2846 # check ps, but while check_node_health also
2850 while [ $RC = 254 -a $tries -gt 0 ]; do
2854 if ! check_node_health $client; then
2856 if [ $RC -eq 254 ]; then
2857 # FIXME: not sure how long we shuold sleep here
2861 echo "check node health failed: RC=$RC "
2865 # We can continue try to connect if RC=254
2866 # Just print the warning about this
2867 if [ $RC = 254 ]; then
2868 echo "got a return status of $RC from do_node while checking " \
2869 "node health on $client"
2872 # see if the load is still on the client
2875 while [ $RC = 254 -a $tries -gt 0 ]; do
2879 if ! do_node $client \
2880 "ps auxwww | grep -v grep | grep -q $testload"; then
2885 if [ $RC = 254 ]; then
2886 echo "got a return status of $RC from do_node while checking " \
2887 "(node health and 'ps') the client load on $client"
2888 # see if we can diagnose a bit why this is
2893 check_client_loads () {
2894 local clients=${1//,/ }
2898 for client in $clients; do
2899 check_client_load $client
2901 if [ "$rc" != 0 ]; then
2902 log "Client load failed on node $client, rc=$rc"
2908 restart_client_loads () {
2909 local clients=${1//,/ }
2910 local expectedfail=${2:-""}
2914 for client in $clients; do
2915 check_client_load $client
2917 if [ "$rc" != 0 -a "$expectedfail" ]; then
2918 local var=$(node_var_name $client)_load
2919 start_client_load $client ${!var}
2920 echo "Restarted client load ${!var}: on $client. Checking ..."
2921 check_client_load $client
2923 if [ "$rc" != 0 ]; then
2924 log "Client load failed to restart on node $client, rc=$rc"
2925 # failure one client load means test fail
2926 # we do not need to check other
2935 # Start vmstat and save its process ID in a file.
2940 [ -z "$nodes" -o -z "$pid_file" ] && return 0
2943 "vmstat 1 > $TESTLOG_PREFIX.$TESTNAME.vmstat.\\\$(hostname -s).log \
2944 2>/dev/null </dev/null & echo \\\$! > $pid_file"
2947 # Display the nodes on which client loads failed.
2948 print_end_run_file() {
2952 [ -s $file ] || return 0
2954 echo "Found the END_RUN_FILE file: $file"
2957 # A client load will stop if it finds the END_RUN_FILE file.
2958 # That does not mean the client load actually failed though.
2959 # The first node in END_RUN_FILE is the one we are interested in.
2962 if [ -n "$node" ]; then
2963 local var=$(node_var_name $node)_load
2965 local prefix=$TESTLOG_PREFIX
2966 [ -n "$TESTNAME" ] && prefix=$prefix.$TESTNAME
2967 local stdout_log=$prefix.run_${!var}_stdout.$node.log
2968 local debug_log=$(echo $stdout_log | sed 's/\(.*\)stdout/\1debug/')
2970 echo "Client load ${!var} failed on node $node:"
2976 # Stop the process which had its PID saved in a file.
2981 [ -z "$nodes" -o -z "$pid_file" ] && return 0
2983 do_nodes $nodes "test -f $pid_file &&
2984 { kill -s TERM \\\$(cat $pid_file); rm -f $pid_file; }" || true
2987 # Stop all client loads.
2988 stop_client_loads() {
2989 local nodes=${1:-$CLIENTS}
2992 # stop the client loads
2993 stop_process $nodes $pid_file
2995 # clean up the processes that started them
2996 [ -n "$CLIENT_LOAD_PIDS" ] && kill -9 $CLIENT_LOAD_PIDS 2>/dev/null || true
2998 # End recovery-scale functions
3001 # wait for a command to return the expected result
3003 # This will run @check on @node repeatedly until the output matches @expect
3004 # based on the supplied condition, or until @max_wait seconds have elapsed,
3005 # whichever comes first. @cond may be one of the normal bash operators,
3006 # "-gt", "-ge", "-eq", "-le", "-lt", "==", "!=", or "=~", and must be quoted
3007 # in the caller to avoid unintentional evaluation by the shell in the caller.
3009 # If @max_wait is not specified, the condition will be checked for up to 90s.
3011 # If --verbose is passed as the first argument, the result is printed on each
3012 # value change, otherwise it is only printed after every 10s interval.
3014 # If --quiet is passed as the first/second argument, the do_node() command
3015 # will not print the remote command before executing it each time.
3017 # Using wait_update_cond() or related helper function is preferable to adding
3018 # a "long enough" wait for some state to change in the background, since
3019 # "long enough" may be too short due to tunables, system config, or running in
3020 # a VM, and must by necessity wait too long for most cases or risk failure.
3022 # usage: wait_update_cond [--verbose] [--quiet] node check cond expect [max_wait]
3023 wait_update_cond() {
3027 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3028 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3034 local max_wait=${5:-90}
3038 local begin=$SECONDS
3042 while (( $waited <= $max_wait )); do
3043 result=$(do_node $quiet $node "$check")
3045 eval [[ "'$result'" $cond "'$expect'" ]]
3046 if [[ $? == 0 ]]; then
3047 [[ -z "$result" || $waited -le $sleep ]] ||
3048 echo "Updated after ${waited}s: want '$expect' got '$result'"
3051 if [[ -n "$verbose" && "$result" != "$prev_result" ]]; then
3052 [[ -n "$prev_result" ]] &&
3053 echo "Changed after ${waited}s: from '$prev_result' to '$result'"
3054 prev_result="$result"
3056 (( $waited % $print == 0 )) &&
3057 echo "Waiting $((max_wait - waited))s for '$expect'"
3059 waited=$((SECONDS - begin))
3061 echo "Update not seen after ${max_wait}s: want '$expect' got '$result'"
3065 # usage: wait_update [--verbose] [--quiet] node check expect [max_wait]
3070 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3071 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3078 wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait
3081 # usage: wait_update_facet_cond [--verbose] facet check cond expect [max_wait]
3082 wait_update_facet_cond() {
3086 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3087 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3089 local node=$(facet_active_host $1)
3095 wait_update_cond $verbose $quiet $node "$check" "$cond" "$expect" $max_wait
3098 # usage: wait_update_facet [--verbose] facet check expect [max_wait]
3099 wait_update_facet() {
3103 [[ "$1" == "--verbose" ]] && verbose="$1" && shift
3104 [[ "$1" == "--quiet" || "$1" == "-q" ]] && quiet="$1" && shift
3106 local node=$(facet_active_host $1)
3111 wait_update_cond $verbose $quiet $node "$check" "==" "$expect" $max_wait
3115 do_nodes $(comma_list $(mdts_nodes)) \
3116 "lctl set_param -n os[cd]*.*MDT*.force_sync=1"
3117 do_nodes $(comma_list $(osts_nodes)) \
3118 "lctl set_param -n osd*.*OS*.force_sync=1" 2>&1 |
3119 grep -v 'Found no match'
3123 local zfs_wait=${2:-5}
3125 # the occupied disk space will be released
3126 # only after TXGs are committed
3127 if [[ $(facet_fstype $1) == zfs ]]; then
3128 echo "sleep $zfs_wait for ZFS $(facet_fstype $1)"
3136 local lwm=$3 #low watermark
3137 local size_mb #how many MB should we write to pass watermark
3138 local ost_name=$(ostname_from_index $ost_idx)
3140 free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3142 if (( $free_kb / 1024 > lwm )); then
3143 size_mb=$((free_kb / 1024 - lwm))
3145 #If 10% of free space cross low watermark use it
3146 if (( $free_kb / 10240 > size_mb )); then
3147 size_mb=$((free_kb / 10240))
3149 #At least we need to store 1.1 of difference between
3150 #free space and low watermark
3151 size_mb=$((size_mb + size_mb / 10))
3153 if (( lwm <= $free_kb / 1024 )) ||
3154 [ ! -f $DIR/${filename}.fill_ost$ost_idx ]; then
3155 $LFS setstripe -i $ost_idx -c1 $DIR/${filename}.fill_ost$ost_idx
3156 dd if=/dev/zero of=$DIR/${filename}.fill_ost$ost_idx bs=1M \
3157 count=$size_mb oflag=append conv=notrunc
3162 free_kb=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3163 echo "OST still has $((free_kb / 1024)) MB free"
3166 # This checks only the primary MDS
3167 ost_watermarks_get() {
3169 local ost_name=$(ostname_from_index $ost_idx)
3170 local mdtosc_proc=$(get_mdtosc_proc_path $SINGLEMDS $ost_name)
3172 local hwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
3173 osp.$mdtosc_proc.reserved_mb_high)
3174 local lwm=$(do_facet $SINGLEMDS $LCTL get_param -n \
3175 osp.$mdtosc_proc.reserved_mb_low)
3180 # Note that we set watermarks on all MDSes (necessary for striped dirs)
3181 ost_watermarks_set() {
3185 local ost_name=$(ostname_from_index $ost_idx)
3186 local facets=$(get_facets MDS)
3188 do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
3189 osp.*$ost_name*.reserved_mb_low=$lwm \
3190 osp.*$ost_name*.reserved_mb_high=$hwm > /dev/null
3192 # sleep to ensure we see the change
3196 ost_watermarks_set_low_space() {
3198 local wms=$(ost_watermarks_get $ost_idx)
3199 local ost_name=$(ostname_from_index $ost_idx)
3201 local old_lwm=$(echo $wms | awk '{ print $1 }')
3202 local old_hwm=$(echo $wms | awk '{ print $2 }')
3204 local blocks=$($LFS df $MOUNT | awk "/$ost_name/ { print \$4 }")
3205 # minimal extension size is 64M
3207 if (( $blocks / 1024 > 50 )); then
3208 new_lwm=$((blocks / 1024 - 50))
3210 local new_hwm=$((new_lwm + 5))
3212 ost_watermarks_set $ost_idx $new_lwm $new_hwm
3213 echo "watermarks: $old_lwm $old_hwm $new_lwm $new_hwm"
3216 # Set watermarks to ~current available space & then write data to fill it
3217 # Note OST is not *actually* full after this, it just reports ENOSPC in the
3218 # internal statfs used by the stripe allocator
3220 # first parameter is the filename-prefix, which must get under t-f cleanup
3221 # requirements (rm -rf $DIR/[Rdfs][0-9]*), i.e. $tfile work fine
3222 ost_watermarks_set_enospc() {
3226 local ost_name=$(ostname_from_index $ost_idx)
3227 local facets=$(get_facets MDS)
3231 for MDS in ${facets//,/ }; do
3232 local mdtosc_proc=$(get_mdtosc_proc_path $MDS $ost_name)
3234 do_facet $MDS $LCTL get_param -n \
3235 osp.$mdtosc_proc.reserved_mb_high ||
3236 skip "remote MDS does not support reserved_mb_high"
3239 wms=$(ost_watermarks_set_low_space $ost_idx)
3240 local new_lwm=$(echo $wms | awk '{ print $4 }')
3241 fill_ost $filename $ost_idx $new_lwm
3242 #First enospc could execute orphan deletion so repeat
3243 fill_ost $filename $ost_idx $new_lwm
3247 ost_watermarks_enospc_delete_files() {
3251 rm -f $DIR/${filename}.fill_ost$ost_idx
3253 wait_delete_completed
3257 # clean up from "ost_watermarks_set_enospc"
3258 ost_watermarks_clear_enospc() {
3264 ost_watermarks_enospc_delete_files $filename $ost_idx
3265 ost_watermarks_set $ost_idx $old_lwm $old_hwm
3266 echo "set OST$ost_idx lwm back to $old_lwm, hwm back to $old_hwm"
3269 wait_delete_completed_mds() {
3270 local max_wait=${1:-20}
3272 local stime=$(date +%s)
3277 # find MDS with pending deletions
3278 for node in $(mdts_nodes); do
3279 changes=$(do_node $node "$LCTL get_param -n osc.*MDT*.sync_*" \
3280 2>/dev/null | calc_sum)
3281 if [[ $changes -eq 0 ]]; then
3284 mds2sync="$mds2sync $node"
3286 if [ -z "$mds2sync" ]; then
3287 wait_zfs_commit $SINGLEMDS
3290 mds2sync=$(comma_list $mds2sync)
3292 # sync MDS transactions
3293 do_nodes $mds2sync "$LCTL set_param -n os[cd]*.*MD*.force_sync 1"
3295 # wait till all changes are sent and commmitted by OSTs
3296 # for ldiskfs space is released upon execution, but DMU
3297 # do this upon commit
3300 while [[ $WAIT -ne $max_wait ]]; do
3301 changes=$(do_nodes $mds2sync \
3302 "$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum)
3303 #echo "$node: $changes changes on all"
3304 if [[ $changes -eq 0 ]]; then
3305 wait_zfs_commit $SINGLEMDS
3307 # the occupied disk space will be released
3308 # only after TXGs are committed
3309 wait_zfs_commit ost1
3317 echo "Delete is not completed in $((etime - stime)) seconds"
3318 do_nodes $mds2sync "$LCTL get_param osc.*MDT*.sync_*"
3325 # we can use "for" here because we are waiting the slowest
3326 for host in ${hostlist//,/ }; do
3327 check_network "$host" 900
3329 while ! do_nodes $hostlist hostname > /dev/null; do sleep 5; done
3336 for facet in ${facetlist//,/ }; do
3337 hostlist=$(expand_list $hostlist $(facet_active_host $facet))
3339 wait_for_host $hostlist
3342 _wait_recovery_complete () {
3345 # Use default policy if $2 is not passed by caller.
3346 local MAX=${2:-$(max_recovery_time)}
3351 while [ $WAIT -lt $MAX ]; do
3352 STATUS=$(lctl get_param -n $param | grep status)
3354 [[ $STATUS = "status: COMPLETE" || $STATUS = "status: INACTIVE" ]] && return 0
3357 echo "Waiting $((MAX - WAIT)) secs for $param recovery done. $STATUS"
3359 echo "$param recovery not done in $MAX sec. $STATUS"
3363 wait_recovery_complete () {
3366 # with an assumption that at_max is the same on all nodes
3367 local MAX=${2:-$(max_recovery_time)}
3370 if [ "$FAILURE_MODE" = HARD ]; then
3371 facets=$(facets_on_host $(facet_active_host $facet))
3373 echo affected facets: $facets
3375 # we can use "for" here because we are waiting the slowest
3376 for facet in ${facets//,/ }; do
3377 local var_svc=${facet}_svc
3378 local param="*.${!var_svc}.recovery_status"
3380 local host=$(facet_active_host $facet)
3381 do_rpc_nodes "$host" _wait_recovery_complete $param $MAX
3385 wait_mds_ost_sync () {
3386 # just because recovery is done doesn't mean we've finished
3387 # orphan cleanup. Wait for llogs to get synchronized.
3388 echo "Waiting for orphan cleanup..."
3389 # MAX value includes time needed for MDS-OST reconnection
3390 local MAX=$(( TIMEOUT * 2 ))
3391 local WAIT_TIMEOUT=${1:-$MAX}
3394 local list=$(comma_list $(mdts_nodes))
3395 local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed"
3396 if ! do_facet $SINGLEMDS \
3397 "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null"
3399 # old way, use mds_sync
3401 list=$(comma_list $(osts_nodes))
3402 cmd="$LCTL get_param -n obdfilter.*.mds_sync"
3405 echo "wait $WAIT_TIMEOUT secs maximumly for $list mds-ost sync done."
3406 while [ $WAIT -lt $WAIT_TIMEOUT ]; do
3407 local -a sync=($(do_nodes $list "$cmd"))
3410 for ((i=0; i<${#sync[@]}; i++)); do
3412 [ ${sync[$i]} -eq 1 ] && continue
3414 [ ${sync[$i]} -eq 0 ] && continue
3416 # there is a not finished MDS-OST synchronization
3420 sleep 2 # increase waiting time and cover statfs cache
3421 [ ${con} -eq 1 ] && return 0
3422 echo "Waiting $WAIT secs for $list $i mds-ost sync done."
3426 # show which nodes are not finished.
3427 cmd=$(echo $cmd | sed 's/-n//')
3428 do_nodes $list "$cmd"
3429 echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS"
3433 # Wait OSTs to be active on both client and MDT side.
3435 local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd |
3436 awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'"
3437 wait_update $HOSTNAME "eval $cmd" $OSTCOUNT ||
3438 error "wait_update OSTs up on client failed"
3440 cmd="$LCTL get_param osp.$FSNAME-OST*-MDT0000.prealloc_last_id |
3441 awk '/=[1-9][0-9]/ { c += 1 } END { printf \\\"%d\\\", c }'"
3442 wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT ||
3443 error "wait_update OSTs up on MDT0000 failed"
3446 wait_destroy_complete () {
3447 echo "Waiting for MDT destroys to complete"
3448 # MAX value shouldn't be big as this mean server responsiveness
3449 # never increase this just to make test pass but investigate
3450 # why it takes so long time
3453 local list=$(comma_list $(mdts_nodes))
3454 while [ $WAIT -lt $MAX ]; do
3455 local -a RPCs=($(do_nodes $list $LCTL get_param -n osp.*.destroys_in_flight))
3459 for ((i=0; i<${#RPCs[@]}; i++)); do
3460 [ ${RPCs[$i]} -eq 0 ] && continue
3461 # there are still some destroy RPCs in flight
3466 [ ${con} -eq 1 ] && return 0 # done waiting
3467 echo "Waiting ${WAIT}s for local destroys to complete"
3470 echo "MDT destroys weren't done in $MAX sec."
3474 wait_delete_completed() {
3475 wait_delete_completed_mds $1 || return $?
3476 wait_destroy_complete || return $?
3485 # conf-sanity 31 takes a long time cleanup
3486 while [ $WAIT -lt 300 ]; do
3487 running=$(do_facet ${facet} "lsmod | grep lnet > /dev/null &&
3488 lctl dl | grep ' ST ' || true")
3489 [ -z "${running}" ] && return 0
3490 echo "waited $WAIT for${running}"
3491 [ $INTERVAL -lt 64 ] && INTERVAL=$((INTERVAL + INTERVAL))
3493 WAIT=$((WAIT + INTERVAL))
3495 echo "service didn't stop after $WAIT seconds. Still running:"
3500 wait_remote_prog () {
3506 [ "$PDSH" = "no_dsh" ] && return 0
3508 while [ $WAIT -lt $2 ]; do
3509 running=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep) || true
3510 [ -z "${running}" ] && return 0 || true
3511 echo "waited $WAIT for: "
3513 [ $INTERVAL -lt 60 ] && INTERVAL=$((INTERVAL + INTERVAL))
3515 WAIT=$((WAIT + INTERVAL))
3517 local pids=$(ps uax | grep "$PDSH.*$prog.*$MOUNT" | grep -v grep | awk '{print $2}')
3518 [ -z "$pids" ] && return 0
3519 echo "$PDSH processes still exists after $WAIT seconds. Still running: $pids"
3520 # FIXME: not portable
3521 for pid in $pids; do
3522 cat /proc/${pid}/status || true
3523 cat /proc/${pid}/wchan || true
3525 kill -9 $pid || true
3534 local clients=${1:-$CLIENTS}
3537 if [ -z "$clients" ]; then
3538 $LFS df $MOUNT > /dev/null
3541 $PDSH $clients "$LFS df $MOUNT" > /dev/null
3545 check_lfs_df_ret_val $rc
3552 # not every config has many clients
3558 (( MDSCOUNT == 1 )) && return
3560 # wait so that statfs data on MDT expire
3561 local delay=$(do_facet $SINGLEMDS lctl \
3562 get_param -n osp.*MDT0000*MDT0001.maxage)
3564 local nodes=$(comma_list $(mdts_nodes))
3565 # initiate statfs RPC, all to all MDTs
3566 do_nodes $nodes $LCTL get_param -N osp.*MDT*MDT*.filesfree >&/dev/null
3567 do_nodes $nodes $LCTL get_param -N osp.*MDT*MDT*.filesfree >&/dev/null
3571 # usually checked on particular client or locally
3580 client_reconnect_try() {
3581 local f=$MOUNT/recon
3584 if [ -z "$CLIENTS" ]; then
3585 $LFS df $MOUNT; uname -n >> $f
3587 do_nodes $CLIENTS "$LFS df $MOUNT; uname -n >> $f" > /dev/null
3589 echo "Connected clients: $(cat $f)"
3590 ls -l $f > /dev/null
3594 client_reconnect() {
3595 # one client_reconnect_try call does not always do the job...
3597 client_reconnect_try && break
3602 affected_facets () {
3605 local host=$(facet_active_host $facet)
3606 local affected=$facet
3608 if [ "$FAILURE_MODE" = HARD ]; then
3609 affected=$(facets_up_on_host $host)
3615 local E2FSCK_ON_MDT0=false
3616 if [ "$1" == "--fsck" ]; then
3618 [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
3630 #Because it will only get up facets, we need get affected
3631 #facets before shutdown
3632 #For HARD Failure mode, it needs make sure facets on the same
3633 #HOST will only be shutdown and reboot once
3634 for facet in ${facets//,/ }; do
3635 local affected_facet
3637 #check whether facet has been included in other affected facets
3638 for ((index=0; index<$total; index++)); do
3639 [[ *,$facet,* == ,${affecteds[index]}, ]] && skip=1
3642 if [ $skip -eq 0 ]; then
3643 affecteds[$total]=$(affected_facets $facet)
3648 for ((index=0; index<$total; index++)); do
3649 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
3650 local host=$(facet_active_host $facet)
3651 echo "Failing ${affecteds[index]} on $host"
3652 shutdown_facet $facet
3655 $E2FSCK_ON_MDT0 && (run_e2fsck $(facet_active_host $SINGLEMDS) \
3656 $(mdsdevname 1) "-n" || error "Running e2fsck")
3658 for ((index=0; index<$total; index++)); do
3659 facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1)
3660 echo reboot facets: ${affecteds[index]}
3664 change_active ${affecteds[index]}
3666 wait_for_facet ${affecteds[index]}
3669 init_facets_vars_simple
3671 # start mgs first if it is affected
3672 if ! combined_mgs_mds &&
3673 list_member ${affecteds[index]} mgs; then
3674 mount_facet mgs || error "Restart of mgs failed"
3676 # FIXME; has to be changed to mount all facets concurrently
3677 affected=$(exclude_items_from_list ${affecteds[index]} mgs)
3678 echo mount facets: ${affecteds[index]}
3679 mount_facets ${affecteds[index]}
3681 do_nodes $(comma_list $(all_nodes)) \
3682 "keyctl show | grep lustre | cut -c1-11 |
3684 xargs -IX keyctl setperm X 0x3f3f3f3f"
3691 do_facet $facet "sync; sync; sync"
3694 # make sure there will be no seq change
3695 local clients=${CLIENTS:-$HOSTNAME}
3696 local f=fsa-\\\$\(hostname\)
3697 do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f"
3698 do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi"
3700 local svc=${facet}_svc
3701 do_facet $facet $LCTL --device ${!svc} notransno
3703 # If a ZFS OSD is made read-only here, its pool is "freezed". This
3704 # in-memory state has to be cleared by either rebooting the host or
3705 # exporting and reimporting the pool.
3707 # Although the uberblocks are not updated when a pool is freezed,
3708 # transactions are still written to the disks. Modified blocks may be
3709 # cached in memory when tests try reading them back. The
3710 # export-and-reimport process also evicts any cached pool data from
3711 # memory to provide the correct "data loss" semantics.
3713 # In the test framework, the exporting and importing operations are
3714 # handled by stop() and mount_facet() separately, which are used
3715 # inside fail() and fail_abort().
3717 set_dev_readonly $facet
3718 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3719 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3722 replay_barrier_nodf() {
3723 local facet=$1 echo running=${running}
3724 do_facet $facet "sync; sync; sync"
3725 local svc=${facet}_svc
3726 echo Replay barrier on ${!svc}
3727 do_facet $facet $LCTL --device ${!svc} notransno
3728 set_dev_readonly $facet
3729 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3730 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3733 replay_barrier_nosync() {
3734 local facet=$1 echo running=${running}
3735 local svc=${facet}_svc
3736 echo Replay barrier on ${!svc}
3737 do_facet $facet $LCTL --device ${!svc} notransno
3738 set_dev_readonly $facet
3739 do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}"
3740 $LCTL mark "local REPLAY BARRIER on ${!svc}"
3744 # Get Lustre client uuid for a given Lustre mount point.
3747 local mntpnt=${1:-$MOUNT}
3749 local name=$($LFS getname $mntpnt | cut -d' ' -f1)
3750 local uuid=$($LCTL get_param -n llite.$name.uuid)
3755 mds_evict_client() {
3756 local mntpnt=${1:-$MOUNT}
3757 local uuid=$(get_client_uuid $mntpnt)
3759 do_facet $SINGLEMDS \
3760 "$LCTL set_param -n mdt.${mds1_svc}.evict_client $uuid"
3763 ost_evict_client() {
3764 local mntpnt=${1:-$MOUNT}
3765 local uuid=$(get_client_uuid $mntpnt)
3768 "$LCTL set_param -n obdfilter.${ost1_svc}.evict_client $uuid"
3773 local clients=${CLIENTS:-$HOSTNAME}
3775 SK_NO_KEY_save=$SK_NO_KEY
3777 export SK_NO_KEY=false
3779 facet_failover $* || error "failover: $?"
3780 export SK_NO_KEY=$SK_NO_KEY_save
3781 # to initiate all OSC idling connections
3783 wait_clients_import_state "$clients" "$facets" "\(FULL\|IDLE\)"
3784 clients_up || error "post-failover stat: $?"
3790 facet_failover $facet
3795 local abort_type=${2:-"abort_recovery"}
3798 change_active $facet
3799 wait_for_facet $facet
3800 mount_facet $facet -o $abort_type
3801 clients_up || echo "first stat failed: $?"
3802 clients_up || error "post-failover stat: $?"
3806 host_nids_address() {
3810 do_nodes $nodes "$LCTL list_nids | grep -w $net | cut -f 1 -d @"
3814 if [ "$1" = "'*'" ]; then echo \'*\'; else