X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=a5fe9670c2bd119d1ff78d63077c70775a59c08b;hp=8561897698b6f2eebb15050526d2feb678ddf71d;hb=125f98fb5c103a164a2f81615204798679f94fd7;hpb=2fd108171fb08563d2e0ddc09789620d402b0014;ds=sidebyside diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 8561897..a5fe967 100755 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -1,29 +1,39 @@ #!/bin/bash -trap 'print_summary && touch $TF_FAIL && \ - echo "test-framework exiting on error"' ERR +trap 'print_summary && print_stack_trace | tee $TF_FAIL && \ + echo "$TESTSUITE: FAIL: test-framework exiting on error"' ERR set -e -#set -x export LANG=en_US export REFORMAT=${REFORMAT:-""} export WRITECONF=${WRITECONF:-""} export VERBOSE=${VERBOSE:-false} -export GSS=false +export GSS=${GSS:-false} +export GSS_SK=${GSS_SK:-false} export GSS_KRB5=false export GSS_PIPEFS=false +export SHARED_KEY=${SHARED_KEY:-false} +export SK_PATH=${SK_PATH:-/tmp/test-framework-keys} +export SK_OM_PATH=$SK_PATH'/tmp-request-mount' +export SK_MOUNTED=${SK_MOUNTED:-false} +export SK_FLAVOR=${SK_FLAVOR:-ski} +export SK_NO_KEY=${SK_NO_KEY:-true} +export SK_UNIQUE_NM=${SK_UNIQUE_NM:-false} +export SK_S2S=${SK_S2S:-false} +export SK_S2SNM=${SK_S2SNM:-TestFrameNM} +export SK_S2SNMCLI=${SK_S2SNMCLI:-TestFrameNMCli} export IDENTITY_UPCALL=default export QUOTA_AUTO=1 # specify environment variable containing batch job name for server statistics export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable" -# LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or -# LOAD_LLOOP is true. LOAD_LLOOP is false by default. -export LOAD_LLOOP=${LOAD_LLOOP:-false} - #export PDSH="pdsh -S -Rssh -w" export MOUNT_CMD=${MOUNT_CMD:-"mount -t lustre"} export UMOUNT=${UMOUNT:-"umount -d"} + +export LSNAPSHOT_CONF="/etc/ldev.conf" +export LSNAPSHOT_LOG="/var/log/lsnapshot.log" + # sles12 umount has a issue with -d option [ -e /etc/SuSE-release ] && grep -w VERSION /etc/SuSE-release | grep -wq 12 && { export UMOUNT="umount" @@ -143,60 +153,61 @@ init_test_env() { export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} export RPC_MODE=${RPC_MODE:-false} export DO_CLEANUP=${DO_CLEANUP:-true} + export KEEP_ZPOOL=${KEEP_ZPOOL:-false} - export MKE2FS=$MKE2FS - if [ -z "$MKE2FS" ]; then - if which mkfs.ldiskfs >/dev/null 2>&1; then - export MKE2FS=mkfs.ldiskfs - else - export MKE2FS=mke2fs - fi - fi + export MKE2FS=$MKE2FS + if [ -z "$MKE2FS" ]; then + if which mkfs.ldiskfs >/dev/null 2>&1; then + export MKE2FS=mkfs.ldiskfs + else + export MKE2FS=mke2fs + fi + fi - export DEBUGFS=$DEBUGFS - if [ -z "$DEBUGFS" ]; then - if which debugfs.ldiskfs >/dev/null 2>&1; then - export DEBUGFS=debugfs.ldiskfs - else - export DEBUGFS=debugfs - fi - fi + export DEBUGFS=$DEBUGFS + if [ -z "$DEBUGFS" ]; then + if which debugfs.ldiskfs >/dev/null 2>&1; then + export DEBUGFS=debugfs.ldiskfs + else + export DEBUGFS=debugfs + fi + fi - export TUNE2FS=$TUNE2FS - if [ -z "$TUNE2FS" ]; then - if which tunefs.ldiskfs >/dev/null 2>&1; then - export TUNE2FS=tunefs.ldiskfs - else - export TUNE2FS=tune2fs - fi - fi + export TUNE2FS=$TUNE2FS + if [ -z "$TUNE2FS" ]; then + if which tunefs.ldiskfs >/dev/null 2>&1; then + export TUNE2FS=tunefs.ldiskfs + else + export TUNE2FS=tune2fs + fi + fi - export E2LABEL=$E2LABEL - if [ -z "$E2LABEL" ]; then - if which label.ldiskfs >/dev/null 2>&1; then - export E2LABEL=label.ldiskfs - else - export E2LABEL=e2label - fi - fi + export E2LABEL=$E2LABEL + if [ -z "$E2LABEL" ]; then + if which label.ldiskfs >/dev/null 2>&1; then + export E2LABEL=label.ldiskfs + else + export E2LABEL=e2label + fi + fi - export DUMPE2FS=$DUMPE2FS - if [ -z "$DUMPE2FS" ]; then - if which dumpfs.ldiskfs >/dev/null 2>&1; then - export DUMPE2FS=dumpfs.ldiskfs - else - export DUMPE2FS=dumpe2fs - fi - fi + export DUMPE2FS=$DUMPE2FS + if [ -z "$DUMPE2FS" ]; then + if which dumpfs.ldiskfs >/dev/null 2>&1; then + export DUMPE2FS=dumpfs.ldiskfs + else + export DUMPE2FS=dumpe2fs + fi + fi - export E2FSCK=$E2FSCK - if [ -z "$E2FSCK" ]; then - if which fsck.ldiskfs >/dev/null 2>&1; then - export E2FSCK=fsck.ldiskfs - else - export E2FSCK=e2fsck - fi - fi + export E2FSCK=$E2FSCK + if [ -z "$E2FSCK" ]; then + if which fsck.ldiskfs >/dev/null 2>&1; then + export E2FSCK=fsck.ldiskfs + else + export E2FSCK=e2fsck + fi + fi export RESIZE2FS=$RESIZE2FS if [ -z "$RESIZE2FS" ]; then @@ -215,117 +226,130 @@ init_test_env() { export ZDB=${ZDB:-zdb} export PARTPROBE=${PARTPROBE:-partprobe} - #[ -d /r ] && export ROOT=${ROOT:-/r} - export TMP=${TMP:-$ROOT/tmp} - export TESTSUITELOG=${TMP}/${TESTSUITE}.log - export LOGDIR=${LOGDIR:-${TMP}/test_logs/$(date +%s)} - export TESTLOG_PREFIX=$LOGDIR/$TESTSUITE + #[ -d /r ] && export ROOT=${ROOT:-/r} + export TMP=${TMP:-$ROOT/tmp} + export TESTSUITELOG=${TMP}/${TESTSUITE}.log + export LOGDIR=${LOGDIR:-${TMP}/test_logs/$(date +%s)} + export TESTLOG_PREFIX=$LOGDIR/$TESTSUITE - export HOSTNAME=${HOSTNAME:-$(hostname -s)} - if ! echo $PATH | grep -q $LUSTRE/utils; then - export PATH=$LUSTRE/utils:$PATH - fi - if ! echo $PATH | grep -q $LUSTRE/utils/gss; then - export PATH=$LUSTRE/utils/gss:$PATH - fi - if ! echo $PATH | grep -q $LUSTRE/tests; then - export PATH=$LUSTRE/tests:$PATH - fi - if ! echo $PATH | grep -q $LUSTRE/../lustre-iokit/sgpdd-survey; then - export PATH=$LUSTRE/../lustre-iokit/sgpdd-survey:$PATH - fi - export LST=${LST:-"$LUSTRE/../lnet/utils/lst"} - [ ! -f "$LST" ] && export LST=$(which lst) - export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")} - [ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey) + export HOSTNAME=${HOSTNAME:-$(hostname -s)} + if ! echo $PATH | grep -q $LUSTRE/utils; then + export PATH=$LUSTRE/utils:$PATH + fi + if ! echo $PATH | grep -q $LUSTRE/utils/gss; then + export PATH=$LUSTRE/utils/gss:$PATH + fi + if ! echo $PATH | grep -q $LUSTRE/tests; then + export PATH=$LUSTRE/tests:$PATH + fi + if ! echo $PATH | grep -q $LUSTRE/../lustre-iokit/sgpdd-survey; then + export PATH=$LUSTRE/../lustre-iokit/sgpdd-survey:$PATH + fi + export LST=${LST:-"$LUSTRE/../lnet/utils/lst"} + [ ! -f "$LST" ] && export LST=$(which lst) + export SGPDDSURVEY=${SGPDDSURVEY:-"$LUSTRE/../lustre-iokit/sgpdd-survey/sgpdd-survey")} + [ ! -f "$SGPDDSURVEY" ] && export SGPDDSURVEY=$(which sgpdd-survey) export MCREATE=${MCREATE:-mcreate} - # Ubuntu, at least, has a truncate command in /usr/bin - # so fully path our truncate command. - export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate} + export MULTIOP=${MULTIOP:-multiop} + # Ubuntu, at least, has a truncate command in /usr/bin + # so fully path our truncate command. + export TRUNCATE=${TRUNCATE:-$LUSTRE/tests/truncate} export FSX=${FSX:-$LUSTRE/tests/fsx} - export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"} - [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) - if ! echo $PATH | grep -q $LUSTRE/tests/racer; then - export PATH=$LUSTRE/tests/racer:$PATH: - fi - if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then - export PATH=$LUSTRE/tests/mpi:$PATH - fi - export RSYNC_RSH=${RSYNC_RSH:-rsh} - - export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} - [ ! -f "$LCTL" ] && export LCTL=$(which lctl) - export LFS=${LFS:-"$LUSTRE/utils/lfs"} - [ ! -f "$LFS" ] && export LFS=$(which lfs) - SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"} - GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"} - - export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"} - if [ ! -f "$L_GETIDENTITY" ]; then - if `which l_getidentity > /dev/null 2>&1`; then - export L_GETIDENTITY=$(which l_getidentity) - else - export L_GETIDENTITY=NONE - fi - fi - export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"} - [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid" - export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"} - [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre" - export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} - [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre" - export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " - export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod} - [ ! -f "$LUSTRE_RMMOD" ] && - export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null) - export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate} - [ ! -f "$LFS_MIGRATE" ] && - export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null) - export LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"} - [ ! -f "$LR_READER" ] && export LR_READER=$(which lr_reader 2> /dev/null) - [ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader" - export NAME=${NAME:-local} - export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} - [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \ - export LGSSD=$(which lgssd) - export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"} - [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null) - export KRB5DIR=${KRB5DIR:-"/usr/kerberos"} - export DIR2 - export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests} - export AT_MAX_PATH - export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"} - [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null) - - if [ "$ACCEPTOR_PORT" ]; then - export PORT_OPT="--port $ACCEPTOR_PORT" - fi + export MDSRATE=${MDSRATE:-"$LUSTRE/tests/mpi/mdsrate"} + [ ! -f "$MDSRATE" ] && export MDSRATE=$(which mdsrate 2> /dev/null) + if ! echo $PATH | grep -q $LUSTRE/tests/racer; then + export PATH=$LUSTRE/tests/racer:$PATH: + fi + if ! echo $PATH | grep -q $LUSTRE/tests/mpi; then + export PATH=$LUSTRE/tests/mpi:$PATH + fi + export RSYNC_RSH=${RSYNC_RSH:-rsh} + + export LCTL=${LCTL:-"$LUSTRE/utils/lctl"} + [ ! -f "$LCTL" ] && export LCTL=$(which lctl) + export LFS=${LFS:-"$LUSTRE/utils/lfs"} + [ ! -f "$LFS" ] && export LFS=$(which lfs) + SETSTRIPE=${SETSTRIPE:-"$LFS setstripe"} + GETSTRIPE=${GETSTRIPE:-"$LFS getstripe"} + + export L_GETIDENTITY=${L_GETIDENTITY:-"$LUSTRE/utils/l_getidentity"} + if [ ! -f "$L_GETIDENTITY" ]; then + if `which l_getidentity > /dev/null 2>&1`; then + export L_GETIDENTITY=$(which l_getidentity) + else + export L_GETIDENTITY=NONE + fi + fi + export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"} + [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid" + export LL_DECODE_LINKEA=${LL_DECODE_LINKEA:-"$LUSTRE/utils/ll_decode_linkea"} + [ ! -f "$LL_DECODE_LINKEA" ] && export LL_DECODE_LINKEA="ll_decode_linkea" + export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"} + [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre" + export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} + [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre" + export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " + export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod} + [ ! -f "$LUSTRE_RMMOD" ] && + export LUSTRE_RMMOD=$(which lustre_rmmod 2> /dev/null) + export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate} + [ ! -f "$LFS_MIGRATE" ] && + export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null) + export LR_READER=${LR_READER:-"$LUSTRE/utils/lr_reader"} + [ ! -f "$LR_READER" ] && + export LR_READER=$(which lr_reader 2> /dev/null) + [ -z "$LR_READER" ] && export LR_READER="/usr/sbin/lr_reader" + export NAME=${NAME:-local} + export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} + [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && + export LGSSD=$(which lgssd) + export LSVCGSSD=${LSVCGSSD:-"$LUSTRE/utils/gss/lsvcgssd"} + [ ! -f "$LSVCGSSD" ] && export LSVCGSSD=$(which lsvcgssd 2> /dev/null) + export KRB5DIR=${KRB5DIR:-"/usr/kerberos"} + export DIR2 + export SAVE_PWD=${SAVE_PWD:-$LUSTRE/tests} + export AT_MAX_PATH + export LDEV=${LDEV:-"$LUSTRE/scripts/ldev"} + [ ! -f "$LDEV" ] && export LDEV=$(which ldev 2> /dev/null) + + if [ "$ACCEPTOR_PORT" ]; then + export PORT_OPT="--port $ACCEPTOR_PORT" + fi + + if $SHARED_KEY; then + $RPC_MODE || echo "Using GSS shared-key feature" + which lgss_sk > /dev/null 2>&1 || + error_exit "built with lgss_sk disabled! SEC=$SEC" + GSS=true + GSS_SK=true + SEC=$SK_FLAVOR + fi + + case "x$SEC" in + xkrb5*) + $RPC_MODE || echo "Using GSS/krb5 ptlrpc security flavor" + which lgss_keyring > /dev/null 2>&1 || + error_exit "built with gss disabled! SEC=$SEC" + GSS=true + GSS_KRB5=true + ;; + esac + + case "x$IDUP" in + xtrue) + IDENTITY_UPCALL=true + ;; + xfalse) + IDENTITY_UPCALL=false + ;; + esac - case "x$SEC" in - xkrb5*) - echo "Using GSS/krb5 ptlrpc security flavor" - which lgss_keyring > /dev/null 2>&1 || \ - error_exit "built with gss disabled! SEC=$SEC" - GSS=true - GSS_KRB5=true - ;; - esac - - case "x$IDUP" in - xtrue) - IDENTITY_UPCALL=true - ;; - xfalse) - IDENTITY_UPCALL=false - ;; - esac - - export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} - - # Paths on remote nodes, if different - export RLUSTRE=${RLUSTRE:-$LUSTRE} - export RPWD=${RPWD:-$PWD} - export I_MOUNTED=${I_MOUNTED:-"no"} + export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} + + # Paths on remote nodes, if different + export RLUSTRE=${RLUSTRE:-$LUSTRE} + export RPWD=${RPWD:-$PWD} + export I_MOUNTED=${I_MOUNTED:-"no"} if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \ ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \ ! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \ @@ -393,28 +417,39 @@ export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ }) # # All Lustre versions support "lctl get_param" to report the version of the # code running in the kernel (what our tests are interested in), but it -# doesn't work without modules loaded. If that fails, use "lctl version" -# instead, which is easy to parse and works without the kernel modules, -# but was only added in 2.6.50. If that also fails, fall back to calling -# "lctl lustre_build_version" which prints either (or both) the userspace -# and kernel build versions, but is deprecated and should eventually be -# removed. +# doesn't work without modules loaded. After 2.9.53 and in upstream kernels +# the "version" parameter doesn't include "lustre: " at the beginning. +# If that fails, call "lctl lustre_build_version" which prints either (or both) +# the userspace and kernel build versions, but until 2.8.55 required root +# access to get the Lustre kernel version. If that also fails, fall back to +# using "lctl --version", which is easy to parse and works without the kernel +# modules, but was only added in 2.6.50 and only prints the lctl tool version, +# not the module version, though they are usually the same. +# +# Various commands and their output format for different Lustre versions: +# lctl get_param version: 2.9.55 +# lctl get_param version: lustre: 2.8.53 +# lctl get_param version: lustre: 2.6.52 +# kernel: patchless_client +# build: v2_6_92_0-2.6.32-431.el6_lustre.x86_64 +# lctl lustre_build_version: Lustre version: 2.8.53_27_gae67fc01 +# lctl lustre_build_version: error: lustre_build_version: Permission denied +# (as non-root user) lctl version: v2_6_92_0-2.6.32-431.el6.x86_64 +# lctl lustre_build_version: Lustre version: 2.5.3-2.6.32.26-175.fc12.x86_64 +# lctl version: 2.5.3-2.6.32..26-175fc12.x86_64 +# lctl --version: lctl 2.6.50 # -# output: prints version string to stdout in dotted-decimal format +# output: prints version string to stdout in (up to 4) dotted-decimal values lustre_build_version() { local facet=${1:-client} + local ver=$(do_facet $facet "$LCTL get_param -n version 2>/dev/null || + $LCTL lustre_build_version 2>/dev/null || + $LCTL --version 2>/dev/null | cut -d' ' -f2") + local lver=$(egrep -i "lustre: |version: " <<<"$ver" | head -n 1) + [ -n "$lver" ] && ver="$lver" - # lustre: 2.8.52 - local VER=$(do_facet $facet $LCTL get_param -n version 2> /dev/null | - awk '/lustre: / { print $2 }') - # lctl 2.6.50 - [ -z "$VER" ] && VER=$(do_facet $facet $LCTL --version 2>/dev/null | - awk '{ print $2 }') - # Lustre version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64 - # lctl version: 2.5.3-gfcfd782-CHANGED-2.6.32.26-175.fc12.x86_64 - [ -z "$VER" ] && VER=$(do_facet $facet $LCTL lustre_build_version | - awk '/version:/ { print $3; exit; }') - sed -e 's/^v//' -e 's/-.*//' -e 's/_/./g' <<<$VER + sed -e 's/[^:]*: //' -e 's/^v//' -e 's/[ -].*//' -e 's/_/./g' <<<$ver | + cut -d. -f1-4 } # Report the Lustre numeric build version code for the supplied facet. @@ -426,6 +461,33 @@ module_loaded () { /sbin/lsmod | grep -q "^\<$1\>" } +PRLFS=false +lustre_insmod() { + local module=$1 + shift + local args="$@" + local msg + local rc=0 + + if ! $PRLFS; then + msg="$(insmod $module $args 2>&1)" && return 0 || rc=$? + fi + + # parallels can't load modules directly from prlfs, use /tmp instead + if $PRLFS || [[ "$(stat -f -c%t $module)" == "7c7c6673" ]]; then + local target="$(mktemp)" + + cp "$module" "$target" + insmod $target $args + rc=$? + [[ $rc == 0 ]] && PRLFS=true + rm -f $target + else + echo "$msg" + fi + return $rc +} + # Load a module on the system where this is running. # # usage: load_module module_name [module arguments for insmod/modprobe] @@ -478,10 +540,10 @@ load_module() { # we're passing options on the command-line. if [[ "$BASE" == "lnet_selftest" ]] && [[ -f ${LUSTRE}/../lnet/selftest/${module}${EXT} ]]; then - insmod ${LUSTRE}/../lnet/selftest/${module}${EXT} + lustre_insmod ${LUSTRE}/../lnet/selftest/${module}${EXT} elif [[ -f ${LUSTRE}/${module}${EXT} ]]; then [[ "$BASE" != "ptlrpc_gss" ]] || modprobe sunrpc - insmod ${LUSTRE}/${module}${EXT} "$@" + lustre_insmod ${LUSTRE}/${module}${EXT} "$@" else # must be testing a "make install" or "rpm" installation # note failed to load ptlrpc_gss is considered not fatal @@ -494,20 +556,6 @@ load_module() { fi } -llite_lloop_enabled() { - local n1=$(uname -r | cut -d. -f1) - local n2=$(uname -r | cut -d. -f2) - local n3=$(uname -r | cut -d- -f1 | cut -d. -f3) - - # load the llite_lloop module for < 2.6.32 kernels - if [[ $n1 -lt 2 ]] || [[ $n1 -eq 2 && $n2 -lt 6 ]] || \ - [[ $n1 -eq 2 && $n2 -eq 6 && $n3 -lt 32 ]] || \ - $LOAD_LLOOP; then - return 0 - fi - return 1 -} - load_modules_local() { if [ -n "$MODPROBE" ]; then # use modprobe @@ -546,30 +594,36 @@ load_modules_local() { fi load_module ../libcfs/libcfs/libcfs - - [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG" - [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }" - load_module ../lnet/lnet/lnet - case $NETTYPE in - o2ib) - LNETLND="o2iblnd/ko2iblnd" - ;; - *) - ;; - esac - LNETLND=${LNETLND:-"socklnd/ksocklnd"} - load_module ../lnet/klnds/$LNETLND - load_module obdclass/obdclass - load_module ptlrpc/ptlrpc - load_module ptlrpc/gss/ptlrpc_gss - load_module fld/fld - load_module fid/fid - load_module lmv/lmv - load_module mdc/mdc - load_module osc/osc - load_module lov/lov - load_module mgc/mgc - load_module obdecho/obdecho + # Prevent local MODOPTS_LIBCFS being passed as part of environment + # variable to remote nodes + unset MODOPTS_LIBCFS + + set_default_debug + load_module ../lnet/lnet/lnet + + LNDPATH=${LNDPATH:-"../lnet/klnds"} + if [ -z "$LNETLND" ]; then + case $NETTYPE in + o2ib*) LNETLND="o2iblnd/ko2iblnd" ;; + tcp*) LNETLND="socklnd/ksocklnd" ;; + *) local lnd="${NETTYPE%%[0-9]}lnd" + [ -f "$LNDPATH/$lnd/k$lnd.ko" ] && + LNETLND="$lnd/k$lnd" || + LNETLND="socklnd/ksocklnd" + esac + fi + load_module ../lnet/klnds/$LNETLND + load_module obdclass/obdclass + load_module ptlrpc/ptlrpc + load_module ptlrpc/gss/ptlrpc_gss + load_module fld/fld + load_module fid/fid + load_module lmv/lmv + load_module osc/osc + load_module mdc/mdc + load_module lov/lov + load_module mgc/mgc + load_module obdecho/obdecho if ! client_only; then SYMLIST=/proc/kallsyms grep -q crc16 $SYMLIST || @@ -580,7 +634,7 @@ load_modules_local() { [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then - modprobe zfs + lsmod | grep zfs >&/dev/null || modprobe zfs load_module osd-zfs/osd_zfs fi if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then @@ -602,7 +656,6 @@ load_modules_local() { fi load_module llite/lustre - llite_lloop_enabled && load_module llite/llite_lloop [ -d /r ] && OGDB=${OGDB:-"/r/tmp"} OGDB=${OGDB:-$TMP} rm -f $OGDB/ogdb-$HOSTNAME @@ -694,7 +747,19 @@ fs_log_size() { local size=0 case $fstype in ldiskfs) size=50;; # largest seen is 44, leave some headroom - zfs) size=400;; # largest seen is 384 + zfs) size=512;; # largest seen is 512 + esac + + echo -n $size +} + +fs_inode_ksize() { + local facet=${1:-$SINGLEMDS} + local fstype=$(facet_fstype $facet) + local size=0 + case $fstype in + ldiskfs) size=4;; # ~4KB per inode + zfs) size=11;; # 10 to 11KB per inode esac echo -n $size @@ -730,88 +795,234 @@ send_sigint() { do_nodes $list "killall -2 $@ 2>/dev/null || true" } -# start gss daemons on all nodes, or -# "daemon" on "list" if set +# start gss daemons on all nodes, or "daemon" on "nodes" if set start_gss_daemons() { - local list=$1 - local daemon=$2 + local nodes=$1 + local daemon=$2 - if [ "$list" ] && [ "$daemon" ] ; then - echo "Starting gss daemon on nodes: $list" - do_nodes $list "$daemon" || return 8 - return 0 - fi + if [ "$nodes" ] && [ "$daemon" ] ; then + echo "Starting gss daemon on nodes: $nodes" + do_nodes $nodes "$daemon" || return 8 + return 0 + fi - local list=$(comma_list $(mdts_nodes)) - echo "Starting gss daemon on mds: $list" - do_nodes $list "$LSVCGSSD -v" || return 1 - if $GSS_PIPEFS; then - do_nodes $list "$LGSSD -v" || return 2 - fi + nodes=$(comma_list $(mdts_nodes)) + echo "Starting gss daemon on mds: $nodes" + if $GSS_SK; then + # Start all versions, in case of switching + do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 1 + else + do_nodes $nodes "$LSVCGSSD -v" || return 1 + fi + if $GSS_PIPEFS; then + do_nodes $nodes "$LGSSD -v" || return 2 + fi - list=$(comma_list $(osts_nodes)) - echo "Starting gss daemon on ost: $list" - do_nodes $list "$LSVCGSSD -v" || return 3 - # starting on clients + nodes=$(comma_list $(osts_nodes)) + echo "Starting gss daemon on ost: $nodes" + if $GSS_SK; then + # Start all versions, in case of switching + do_nodes $nodes "$LSVCGSSD -vvv -s -m -o -z" || return 3 + else + do_nodes $nodes "$LSVCGSSD -v" || return 3 + fi + # starting on clients - local clients=${CLIENTS:-`hostname`} - if $GSS_PIPEFS; then - echo "Starting $LGSSD on clients $clients " - do_nodes $clients "$LGSSD -v" || return 4 - fi + local clients=${CLIENTS:-$HOSTNAME} + if $GSS_PIPEFS; then + echo "Starting $LGSSD on clients $clients " + do_nodes $clients "$LGSSD -v" || return 4 + fi - # wait daemons entering "stable" status - sleep 5 - - # - # check daemons are running - # - list=$(comma_list $(mdts_nodes) $(osts_nodes)) - check_gss_daemon_nodes $list lsvcgssd || return 5 - if $GSS_PIPEFS; then - list=$(comma_list $(mdts_nodes)) - check_gss_daemon_nodes $list lgssd || return 6 - fi - if $GSS_PIPEFS; then - check_gss_daemon_nodes $clients lgssd || return 7 - fi + # wait daemons entering "stable" status + sleep 5 + + # + # check daemons are running + # + nodes=$(comma_list $(mdts_nodes) $(osts_nodes)) + check_gss_daemon_nodes $nodes lsvcgssd || return 5 + if $GSS_PIPEFS; then + nodes=$(comma_list $(mdts_nodes)) + check_gss_daemon_nodes $nodes lgssd || return 6 + fi + if $GSS_PIPEFS; then + check_gss_daemon_nodes $clients lgssd || return 7 + fi } stop_gss_daemons() { - local list=$(comma_list $(mdts_nodes)) + local nodes=$(comma_list $(mdts_nodes)) - send_sigint $list lsvcgssd lgssd + send_sigint $nodes lsvcgssd lgssd - list=$(comma_list $(osts_nodes)) - send_sigint $list lsvcgssd + nodes=$(comma_list $(osts_nodes)) + send_sigint $nodes lsvcgssd - list=${CLIENTS:-`hostname`} - send_sigint $list lgssd + nodes=${CLIENTS:-$HOSTNAME} + send_sigint $nodes lgssd +} + +add_sk_mntflag() { + # Add mount flags for shared key + local mt_opts=$@ + if grep -q skpath <<< "$mt_opts" ; then + mt_opts=$(echo $mt_opts | + sed -e "s#skpath=[^ ,]*#skpath=$SK_PATH#") + else + if [ -z "$mt_opts" ]; then + mt_opts="-o skpath=$SK_PATH" + else + mt_opts="$mt_opts,skpath=$SK_PATH" + fi + fi + echo -n $mt_opts } init_gss() { - if $GSS; then - if ! module_loaded ptlrpc_gss; then - load_module ptlrpc/gss/ptlrpc_gss - module_loaded ptlrpc_gss || - error_exit "init_gss : GSS=$GSS, but gss/krb5 is not supported!" - fi - if $GSS_KRB5; then - start_gss_daemons || error_exit "start gss daemon failed! rc=$?" - fi + if $SHARED_KEY; then + GSS=true + GSS_SK=true + fi + + if ! $GSS; then + return + fi + + if ! module_loaded ptlrpc_gss; then + load_module ptlrpc/gss/ptlrpc_gss + module_loaded ptlrpc_gss || + error_exit "init_gss: GSS=$GSS, but gss/krb5 missing" + fi + + if $GSS_KRB5 || $GSS_SK; then + start_gss_daemons || error_exit "start gss daemon failed! rc=$?" + fi + + if $GSS_SK && $SK_NO_KEY; then + local numclients=${1:-$CLIENTCOUNT} + local clients=${CLIENTS:-$HOSTNAME} + + # security ctx config for keyring + SK_NO_KEY=false + mkdir -p $SK_OM_PATH + mount -o bind $SK_OM_PATH /etc/request-key.d/ + local lgssc_conf_line='create lgssc * * ' + lgssc_conf_line+=$(which lgss_keyring) + lgssc_conf_line+=' %o %k %t %d %c %u %g %T %P %S' + + local lgssc_conf_file="/etc/request-key.d/lgssc.conf" + echo "$lgssc_conf_line" > $lgssc_conf_file + [ -e $lgssc_conf_file ] || + error_exit "Could not find key options in $lgssc_conf_file" + + if ! local_mode; then + do_nodes $(comma_list $(all_nodes)) "mkdir -p \ + $SK_OM_PATH" + do_nodes $(comma_list $(all_nodes)) "mount \ + -o bind $SK_OM_PATH \ + /etc/request-key.d/" + do_nodes $(comma_list $(all_nodes)) "rsync -aqv \ + $HOSTNAME:$lgssc_conf_file \ + $lgssc_conf_file >/dev/null 2>&1" + fi - if [ -n "$LGSS_KEYRING_DEBUG" ]; then + # create shared key on all nodes + mkdir -p $SK_PATH/nodemap + rm -f $SK_PATH/$FSNAME.key $SK_PATH/nodemap/c*.key \ + $SK_PATH/$FSNAME-*.key + # for nodemap testing each client may need own key, + # and S2S now requires keys as well, both for "client" + # and for "server" + if $SK_S2S; then + lgss_sk -t server -f$FSNAME -n $SK_S2SNMCLI \ + -w $SK_PATH/$FSNAME-nmclient.key \ + -d /dev/urandom >/dev/null 2>&1 + lgss_sk -t mgs,server -f$FSNAME -n $SK_S2SNM \ + -w $SK_PATH/$FSNAME-s2s-server.key \ + -d /dev/urandom >/dev/null 2>&1 + fi + # basic key create + lgss_sk -t server -f$FSNAME -w $SK_PATH/$FSNAME.key \ + -d /dev/urandom >/dev/null 2>&1 + # per-nodemap keys + for i in $(seq 0 $((numclients - 1))); do + lgss_sk -t server -f$FSNAME -n c$i \ + -w $SK_PATH/nodemap/c$i.key -d /dev/urandom \ + >/dev/null 2>&1 + done + # Distribute keys + if ! local_mode; then + do_nodes $(comma_list $(all_nodes)) "rsync -av \ + $HOSTNAME:$SK_PATH/ $SK_PATH >/dev/null 2>&1" + fi + # Set client keys to client type to generate prime P + if local_mode; then + do_nodes $(all_nodes) "lgss_sk -t client,server -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" + else + do_nodes $clients "lgss_sk -t client -m \ + $SK_PATH/$FSNAME.key >/dev/null 2>&1" + do_nodes $clients "find $SK_PATH/nodemap -name \*.key | \ + xargs -IX lgss_sk -t client -m X >/dev/null 2>&1" + fi + # This is required for servers as well, if S2S in use + if $SK_S2S; then + do_nodes $(comma_list $(mdts_nodes)) \ + "cp $SK_PATH/$FSNAME-s2s-server.key \ + $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \ + -t client -m $SK_PATH/$FSNAME-s2s-client.key \ + >/dev/null 2>&1" + do_nodes $(comma_list $(osts_nodes)) \ + "cp $SK_PATH/$FSNAME-s2s-server.key \ + $SK_PATH/$FSNAME-s2s-client.key; lgss_sk \ + -t client -m $SK_PATH/$FSNAME-s2s-client.key \ + >/dev/null 2>&1" + do_nodes $clients "lgss_sk -t client \ + -m $SK_PATH/$FSNAME-nmclient.key \ + >/dev/null 2>&1" + fi + # mount options for servers and clients + MGS_MOUNT_OPTS=$(add_sk_mntflag $MGS_MOUNT_OPTS) + MDS_MOUNT_OPTS=$(add_sk_mntflag $MDS_MOUNT_OPTS) + OST_MOUNT_OPTS=$(add_sk_mntflag $OST_MOUNT_OPTS) + MOUNT_OPTS=$(add_sk_mntflag $MOUNT_OPTS) + SEC=$SK_FLAVOR + fi + + if [ -n "$LGSS_KEYRING_DEBUG" ]; then lctl set_param -n \ - sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG - fi - fi + sptlrpc.gss.lgss_keyring.debug_level=$LGSS_KEYRING_DEBUG + fi } cleanup_gss() { - if $GSS; then - stop_gss_daemons - # maybe cleanup credential cache? - fi + if $GSS; then + stop_gss_daemons + # maybe cleanup credential cache? + fi +} + +cleanup_sk() { + if $GSS_SK; then + if $SK_S2S; then + do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNM" + do_node $(mgs_node) "$LCTL nodemap_del $SK_S2SNMCLI" + $RPC_MODE || echo "Sleeping for 10 sec for Nodemap.." + sleep 10 + fi + stop_gss_daemons + $RPC_MODE || echo "Cleaning up Shared Key.." + do_nodes $(comma_list $(all_nodes)) "rm -f \ + $SK_PATH/$FSNAME*.key $SK_PATH/nodemap/$FSNAME*.key" + # Remove the mount and clean up the files we added to SK_PATH + do_nodes $(comma_list $(all_nodes)) "umount \ + /etc/request-key.d/" + do_nodes $(comma_list $(all_nodes)) "rm -f \ + $SK_OM_PATH/lgssc.conf" + do_nodes $(comma_list $(all_nodes)) "rmdir $SK_OM_PATH" + SK_NO_KEY=true + fi } facet_svc() { @@ -986,7 +1197,8 @@ running_in_vm() { virt=$(dmidecode -s system-product-name | awk '{print $1}') case $virt in - VMware|KVM|VirtualBox|Parallels) echo ${virt,,} ;; + VMware|KVM|VirtualBox|Parallels) + echo $virt | tr '[A-Z]' '[a-z]' ;; *) ;; esac } @@ -1029,6 +1241,17 @@ zpool_name() { } # +# +# Get ZFS local fsname. +# +zfs_local_fsname() { + local facet=$1 + local lfsname=$(basename $(facet_device $facet)) + + echo -n $lfsname +} + +# # Create ZFS storage pool. # create_zpool() { @@ -1038,7 +1261,8 @@ create_zpool() { shift 3 local opts=${@:-"-o cachefile=none"} - do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 || + do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL create -f $opts $poolname $vdev" } @@ -1104,12 +1328,27 @@ import_zpool() { if [[ -n "$poolname" ]]; then opts+=" -d $(dirname $(facet_vdevice $facet))" - do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 || + do_facet $facet "lsmod | grep zfs >&/dev/null || modprobe zfs; + $ZPOOL list -H $poolname >/dev/null 2>&1 || $ZPOOL import -f $opts $poolname" fi } # +# Reimport ZFS storage pool with new name +# +reimport_zpool() { + local facet=$1 + local newpool=$2 + local opts="-o cachefile=none" + local poolname=$(zpool_name $facet) + + opts+=" -d $(dirname $(facet_vdevice $facet))" + do_facet $facet "$ZPOOL export $poolname; + $ZPOOL import $opts $poolname $newpool" +} + +# # Set the "cachefile=none" property on ZFS storage pool so that the pool # is not automatically imported on system startup. # @@ -1333,7 +1572,8 @@ mount_facet() { # commit the device label change to disk if [[ $devicelabel =~ (:[a-zA-Z]{3}[0-9]{4}) ]]; then - do_facet $facet "sync; sync; sync" + echo "Commit the device label on ${!dev}" + do_facet $facet "sync; sleep 1; sync" fi @@ -1396,7 +1636,7 @@ stop() { if [[ $(facet_fstype $facet) == zfs ]]; then # export ZFS storage pool - export_zpool $facet + [ "$KEEP_ZPOOL" = "true" ] || export_zpool $facet fi } @@ -1491,6 +1731,18 @@ mdt_free_inodes() { echo $free_inodes } +# +# Get the OST device status from 'lfs df' with a given OST index. +# +ost_dev_status() { + local ost_idx=$1 + local mnt_pnt=${2:-$MOUNT} + local ost_uuid + + ost_uuid=$(ostuuid_from_index $ost_idx $mnt_pnt) + lfs_df $mnt_pnt | awk '/'$ost_uuid'/ { print $7 }' +} + setup_quota(){ local mntpt=$1 @@ -1566,7 +1818,20 @@ zconf_mount() { do_node $client "! grep -q $mnt' ' /proc/mounts || umount $mnt" fi - do_node $client $MOUNT_CMD $flags $opts $device $mnt || return 1 + if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then + # Mount using nodemap key + local mountkey=$SK_PATH/$FSNAME-nmclient.key + if $SK_UNIQUE_NM; then + mountkey=$SK_PATH/nodemap/c0.key + fi + local prunedopts=$(echo $opts | + sed -e "s#skpath=[^,^ ]*#skpath=$mountkey#g") + do_node $client $MOUNT_CMD $flags $prunedopts $device $mnt || + return 1 + else + do_node $client $MOUNT_CMD $flags $opts $device $mnt || + return 1 + fi set_default_debug_nodes $client @@ -1665,7 +1930,6 @@ zconf_mount_clients() { local opts=${3:-$MOUNT_OPTS} opts=${opts:+-o $opts} local flags=${4:-$MOUNT_FLAGS} - local device=$MGSNID:/$FSNAME$FILESET if [ -z "$mnt" -o -z "$FSNAME" ]; then echo "Bad conf mount command: opt=$flags $opts dev=$device " \ @@ -1675,10 +1939,46 @@ zconf_mount_clients() { echo "Starting client $clients: $flags $opts $device $mnt" if [ -n "$FILESET" -a ! -n "$SKIP_FILESET" ]; then - do_nodes $clients "! grep -q $mnt' ' /proc/mounts || - umount $mnt" - do_nodes $clients $MOUNT_CMD $flags $opts $MGSNID:/$FSNAME \ - $mnt || return 1 + if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then + # Mount with own nodemap key + local i=0 + # Mount all server nodes first with per-NM keys + for nmclient in ${clients//,/ }; do +# do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key -n c$i" + do_nodes $(comma_list $(all_server_nodes)) "lgss_sk -t server -l $SK_PATH/nodemap/c$i.key" + i=$((i + 1)) + done + # set perms for per-nodemap keys else permission denied + do_nodes $(comma_list $(all_nodes)) \ + "keyctl show | grep lustre | cut -c1-11 | + sed -e 's/ //g;' | + xargs -IX keyctl setperm X 0x3f3f3f3f" + local mountkey=$SK_PATH/$FSNAME-nmclient.key + i=0 + for nmclient in ${clients//,/ }; do + if $SK_UNIQUE_NM; then + mountkey=$SK_PATH/nodemap/c$i.key + fi + do_node $nmclient "! grep -q $mnt' ' \ + /proc/mounts || umount $mnt" + local prunedopts=$(add_sk_mntflag $prunedopts); + prunedopts=$(echo $prunedopts | sed -e \ + "s#skpath=[^ ^,]*#skpath=$mountkey#g") + set -x + do_nodes $(comma_list $(all_server_nodes)) \ + "keyctl show" + set +x + do_node $nmclient $MOUNT_CMD $flags \ + $prunedopts $MGSNID:/$FSNAME $mnt || + return 1 + i=$((i + 1)) + done + else + do_nodes $clients "! grep -q $mnt' ' /proc/mounts || + umount $mnt" + do_nodes $clients $MOUNT_CMD $flags $opts \ + $MGSNID:/$FSNAME $mnt || return 1 + fi #disable FILESET if not supported do_nodes $clients lctl get_param -n \ mdc.$FSNAME-MDT0000*.import | grep -q subtree || @@ -1688,15 +1988,56 @@ zconf_mount_clients() { umount $mnt" fi - do_nodes $clients " + if $GSS_SK && ($SK_UNIQUE_NM || $SK_S2S); then + # Mount with nodemap key + local i=0 + local mountkey=$SK_PATH/$FSNAME-nmclient.key + for nmclient in ${clients//,/ }; do + if $SK_UNIQUE_NM; then + mountkey=$SK_PATH/nodemap/c$i.key + fi + local prunedopts=$(echo $opts | sed -e \ + "s#skpath=[^ ^,]*#skpath=$mountkey#g"); + do_node $nmclient "! grep -q $mnt' ' /proc/mounts || + umount $mnt" + do_node $nmclient " + running=\\\$(mount | grep -c $mnt' '); + rc=0; + if [ \\\$running -eq 0 ] ; then + mkdir -p $mnt; + $MOUNT_CMD $flags $prunedopts $device $mnt; + rc=\\\$?; + else + lustre_mnt_count=\\\$(mount | grep $mnt' ' | \ + grep 'type lustre' | wc -l); + if [ \\\$running -ne \\\$lustre_mnt_count ] ; then + echo zconf_mount_clients FAILED: \ + mount count \\\$running, not matching \ + with mount count of 'type lustre' \ + \\\$lustre_mnt_count; + rc=1; + fi; + fi; + exit \\\$rc" || return ${PIPESTATUS[0]} + + i=$((i + 1)) + done + else + + local tmpopts=$opts + if $SHARED_KEY; then + tmpopts=$(add_sk_mntflag $opts) + fi + do_nodes $clients " running=\\\$(mount | grep -c $mnt' '); rc=0; if [ \\\$running -eq 0 ] ; then - mkdir -p $mnt; - $MOUNT_CMD $flags $opts $device $mnt; - rc=\\\$?; + mkdir -p $mnt; + $MOUNT_CMD $flags $tmpopts $device $mnt; + rc=\\\$?; fi; exit \\\$rc" || return ${PIPESTATUS[0]} + fi echo "Started clients $clients: " do_nodes $clients "mount | grep $mnt' '" @@ -1877,29 +2218,31 @@ node_var_name() { } start_client_load() { - local client=$1 - local load=$2 - local var=$(node_var_name $client)_load - eval export ${var}=$load - - do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ -BREAK_ON_ERROR=$BREAK_ON_ERROR \ -END_RUN_FILE=$END_RUN_FILE \ -LOAD_PID_FILE=$LOAD_PID_FILE \ -TESTLOG_PREFIX=$TESTLOG_PREFIX \ -TESTNAME=$TESTNAME \ -DBENCH_LIB=$DBENCH_LIB \ -DBENCH_SRC=$DBENCH_SRC \ -CLIENT_COUNT=$((CLIENTCOUNT - 1)) \ -LFS=$LFS \ -run_${load}.sh" & - local ppid=$! - log "Started client load: ${load} on $client" - - # get the children process IDs - local pids=$(ps --ppid $ppid -o pid= | xargs) - CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids" - return 0 + local client=$1 + local load=$2 + local var=$(node_var_name $client)_load + eval export ${var}=$load + + do_node $client "PATH=$PATH MOUNT=$MOUNT ERRORS_OK=$ERRORS_OK \ + BREAK_ON_ERROR=$BREAK_ON_ERROR \ + END_RUN_FILE=$END_RUN_FILE \ + LOAD_PID_FILE=$LOAD_PID_FILE \ + TESTLOG_PREFIX=$TESTLOG_PREFIX \ + TESTNAME=$TESTNAME \ + DBENCH_LIB=$DBENCH_LIB \ + DBENCH_SRC=$DBENCH_SRC \ + CLIENT_COUNT=$((CLIENTCOUNT - 1)) \ + LFS=$LFS \ + LCTL=$LCTL \ + FSNAME=$FSNAME \ + run_${load}.sh" & + local ppid=$! + log "Started client load: ${load} on $client" + + # get the children process IDs + local pids=$(ps --ppid $ppid -o pid= | xargs) + CLIENT_LOAD_PIDS="$CLIENT_LOAD_PIDS $ppid $pids" + return 0 } start_client_loads () { @@ -1917,55 +2260,59 @@ start_client_loads () { # only for remote client check_client_load () { - local client=$1 - local var=$(node_var_name $client)_load - local TESTLOAD=run_${!var}.sh - - ps auxww | grep -v grep | grep $client | grep -q "$TESTLOAD" || return 1 - - # bug 18914: try to connect several times not only when - # check ps, but while check_catastrophe also - local tries=3 - local RC=254 - while [ $RC = 254 -a $tries -gt 0 ]; do - let tries=$tries-1 - # assume success - RC=0 - if ! check_catastrophe $client; then - RC=${PIPESTATUS[0]} - if [ $RC -eq 254 ]; then - # FIXME: not sure how long we shuold sleep here - sleep 10 - continue - fi - echo "check catastrophe failed: RC=$RC " - return $RC - fi - done - # We can continue try to connect if RC=254 - # Just print the warning about this - if [ $RC = 254 ]; then - echo "got a return status of $RC from do_node while checking catastrophe on $client" - fi - - # see if the load is still on the client - tries=3 - RC=254 - while [ $RC = 254 -a $tries -gt 0 ]; do - let tries=$tries-1 - # assume success - RC=0 - if ! do_node $client "ps auxwww | grep -v grep | grep -q $TESTLOAD"; then - RC=${PIPESTATUS[0]} - sleep 30 - fi - done - if [ $RC = 254 ]; then - echo "got a return status of $RC from do_node while checking (catastrophe and 'ps') the client load on $client" - # see if we can diagnose a bit why this is - fi + local client=$1 + local var=$(node_var_name $client)_load + local testload=run_${!var}.sh + + ps auxww | grep -v grep | grep $client | grep -q $testload || return 1 + + # bug 18914: try to connect several times not only when + # check ps, but while check_node_health also + + local tries=3 + local RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 + if ! check_node_health $client; then + RC=${PIPESTATUS[0]} + if [ $RC -eq 254 ]; then + # FIXME: not sure how long we shuold sleep here + sleep 10 + continue + fi + echo "check node health failed: RC=$RC " + return $RC + fi + done + # We can continue try to connect if RC=254 + # Just print the warning about this + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking " \ + "node health on $client" + fi + + # see if the load is still on the client + tries=3 + RC=254 + while [ $RC = 254 -a $tries -gt 0 ]; do + let tries=$tries-1 + # assume success + RC=0 + if ! do_node $client \ + "ps auxwww | grep -v grep | grep -q $testload"; then + RC=${PIPESTATUS[0]} + sleep 30 + fi + done + if [ $RC = 254 ]; then + echo "got a return status of $RC from do_node while checking " \ + "(node health and 'ps') the client load on $client" + # see if we can diagnose a bit why this is + fi - return $RC + return $RC } check_client_loads () { local clients=${1//,/ } @@ -2155,12 +2502,21 @@ wait_update_facet() { sync_all_data() { do_nodes $(comma_list $(mdts_nodes)) \ - "lctl set_param -n osd*.*MDT*.force_sync=1" + "lctl set_param -n os[cd]*.*MDT*.force_sync=1" do_nodes $(comma_list $(osts_nodes)) \ "lctl set_param -n osd*.*OS*.force_sync=1" 2>&1 | grep -v 'Found no match' } +wait_zfs_commit() { + # the occupied disk space will be released + # only after DMUs are committed + if [[ $(facet_fstype $1) == zfs ]]; then + echo "sleep $2 for ZFS OSD" + sleep $2 + fi +} + wait_delete_completed_mds() { local MAX_WAIT=${1:-20} # for ZFS, waiting more time for DMUs to be committed @@ -2181,12 +2537,13 @@ wait_delete_completed_mds() { mds2sync="$mds2sync $node" done if [ -z "$mds2sync" ]; then + wait_zfs_commit $SINGLEMDS $ZFS_WAIT return fi mds2sync=$(comma_list $mds2sync) # sync MDS transactions - do_nodes $mds2sync "$LCTL set_param -n osd*.*MD*.force_sync 1" + do_nodes $mds2sync "$LCTL set_param -n os[cd]*.*MD*.force_sync 1" # wait till all changes are sent and commmitted by OSTs # for ldiskfs space is released upon execution, but DMU @@ -2198,16 +2555,7 @@ wait_delete_completed_mds() { "$LCTL get_param -n osc.*MDT*.sync_*" | calc_sum) #echo "$node: $changes changes on all" if [[ $changes -eq 0 ]]; then - etime=$(date +%s) - #echo "delete took $((etime - stime)) seconds" - - # the occupied disk space will be released - # only after DMUs are committed - if [[ $(facet_fstype $SINGLEMDS) == zfs ]]; then - echo "sleep $ZFS_WAIT for ZFS OSD" - sleep $ZFS_WAIT - fi - + wait_zfs_commit $SINGLEMDS $ZFS_WAIT return fi sleep 1 @@ -2329,6 +2677,19 @@ wait_mds_ost_sync () { return 1 } +# Wait OSTs to be active on both client and MDT side. +wait_osts_up() { + local cmd="$LCTL get_param -n lov.$FSNAME-clilov-*.target_obd | + awk 'BEGIN {c = 0} /ACTIVE/{c += 1} END {printf \\\"%d\\\", c}'" + wait_update $HOSTNAME "eval $cmd" $OSTCOUNT || + error "wait_update OSTs up on client failed" + + cmd="$LCTL get_param osp.$FSNAME-OST*-MDT0000.prealloc_last_id | + awk '/=[1-9][0-9]/ { c += 1 } END { printf \\\"%d\\\", c }'" + wait_update_facet $SINGLEMDS "eval $cmd" $OSTCOUNT || + error "wait_update OSTs up on MDT0000 failed" +} + wait_destroy_complete () { echo "Waiting for local destroys to complete" # MAX value shouldn't be big as this mean server responsiveness @@ -2414,25 +2775,26 @@ wait_remote_prog () { return $rc } +lfs_df_check() { + local clients=${1:-$CLIENTS} + + if [ -z "$clients" ]; then + $LFS df $MOUNT + else + $PDSH $clients "$LFS df $MOUNT" > /dev/null + fi +} + clients_up() { - # not every config has many clients - sleep 1 - if [ ! -z "$CLIENTS" ]; then - $PDSH $CLIENTS "stat -f $MOUNT" > /dev/null - else - stat -f $MOUNT > /dev/null - fi + # not every config has many clients + sleep 1 + lfs_df_check } client_up() { - local client=$1 - # usually checked on particular client or locally - sleep 1 - if [ ! -z "$client" ]; then - $PDSH $client "stat -f $MOUNT" > /dev/null - else - stat -f $MOUNT > /dev/null - fi + # usually checked on particular client or locally + sleep 1 + lfs_df_check $1 } client_evicted() { @@ -2648,21 +3010,11 @@ fail_abort() { clients_up || error "post-failover stat: $?" } -do_lmc() { - echo There is no lmc. This is mountconf, baby. - exit 1 -} - host_nids_address() { - local nodes=$1 - local kind=$2 + local nodes=$1 + local net=${2:-"."} - if [ -n "$kind" ]; then - nids=$(do_nodes $nodes "$LCTL list_nids | grep $kind | cut -f 1 -d '@'") - else - nids=$(do_nodes $nodes "$LCTL list_nids all | cut -f 1 -d '@'") - fi - echo $nids + do_nodes $nodes "$LCTL list_nids | grep $net | cut -f 1 -d @" } h2name_or_ip() { @@ -2671,40 +3023,34 @@ h2name_or_ip() { fi } -h2ptl() { - if [ "$1" = "'*'" ]; then echo \'*\'; else - ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \ - awk '{print $1}'` - if [ -z "$ID" ]; then - echo "Could not get a ptl id for $1..." - exit 1 - fi - echo $ID"@ptl" +h2nettype() { + if [[ -n "$NETTYPE" ]]; then + h2name_or_ip "$1" "$NETTYPE" + else + h2name_or_ip "$1" "$2" fi } -declare -fx h2ptl +declare -fx h2nettype +# Wrapper function to print the deprecation warning h2tcp() { - h2name_or_ip "$1" "tcp" -} -declare -fx h2tcp - -h2elan() { - if [ "$1" = "'*'" ]; then echo \'*\'; else - if type __h2elan >/dev/null 2>&1; then - ID=$(__h2elan $1) - else - ID=`echo $1 | sed 's/[^0-9]*//g'` - fi - echo $ID"@elan" + echo "h2tcp: deprecated, use h2nettype instead" 1>&2 + if [[ -n "$NETTYPE" ]]; then + h2nettype "$@" + else + h2nettype "$1" "tcp" fi } -declare -fx h2elan +# Wrapper function to print the deprecation warning h2o2ib() { - h2name_or_ip "$1" "o2ib" + echo "h2o2ib: deprecated, use h2nettype instead" 1>&2 + if [[ -n "$NETTYPE" ]]; then + h2nettype "$@" + else + h2nettype "$1" "o2ib" + fi } -declare -fx h2o2ib # This enables variables in cfg/"setup".sh files to support the pdsh HOSTLIST # expressions format. As a bonus we can then just pass in those variables @@ -2740,6 +3086,8 @@ hostlist_expand() { group=${group%%]*} for range in ${group//,/ }; do + local order + begin=${range%-*} end=${range#*-} @@ -2755,7 +3103,13 @@ hostlist_expand() { begin=$(echo $begin | sed 's/0*//') [ -z $begin ] && begin=0 - for num in $(seq -f "%0${padlen}g" $begin $end); do + if [ ! -z "${begin##[!0-9]*}" ]; then + order=$(seq -f "%0${padlen}g" $begin $end) + else + order=$(eval echo {$begin..$end}); + fi + + for num in $order; do value="${name#*,}${num}${back}" [ "$value" != "${value/\[/}" ] && { value=$(hostlist_expand "$value") @@ -2776,8 +3130,10 @@ hostlist_expand() { myList="${list%% *}" while [[ "$list" != ${myList##* } ]]; do - list=${list//${list%% *} /} - myList="$myList ${list%% *}" + local tlist=" $list" + list=${tlist// ${list%% *} / } + list=${list:1} + myList="$myList ${list%% *}" done myList="${myList%* }"; @@ -3389,14 +3745,15 @@ cleanup_echo_devs () { } cleanupall() { - nfs_client_mode && return + nfs_client_mode && return cifs_client_mode && return - stopall $* - cleanup_echo_devs + stopall $* + cleanup_echo_devs - unload_modules - cleanup_gss + unload_modules + cleanup_sk + cleanup_gss } combined_mgs_mds () { @@ -3439,7 +3796,7 @@ mkfs_opts() { var=${facet}failover_HOST if [ -n "${!var}" ] && [ ${!var} != $(facet_host $facet) ]; then - opts+=" --failnode=$(h2$NETTYPE ${!var})" + opts+=" --failnode=$(h2nettype ${!var})" fi opts+=${TIMEOUT:+" --param=sys.timeout=$TIMEOUT"} @@ -3504,6 +3861,17 @@ mkfs_opts() { echo -n "$opts" } +mountfs_opts() { + local facet=$1 + local type=$(facet_type $facet) + local var=${type}_MOUNT_FS_OPTS + local opts="" + if [ -n "${!var}" ]; then + opts+=" --mountfsoptions=${!var}" + fi + echo -n "$opts" +} + check_ost_indices() { local index_count=${#OST_INDICES[@]} [[ $index_count -eq 0 || $OSTCOUNT -le $index_count ]] && return 0 @@ -3522,6 +3890,34 @@ check_ost_indices() { done } +__touch_device() +{ + local facet_type=$1 # mgs || mds || ost + local facet_num=$2 + local facet=${1}${2} + local device + + case "$(facet_fstype $facet)" in + ldiskfs) + device=$(${facet_type}devname $facet_num) + ;; + zfs) + device=$(${facet_type}vdevname $facet_num) + ;; + *) + error "Unhandled filesystem type" + ;; + esac + + do_facet $facet "[ -e \"$device\" ]" && return + + # Note: the following check only works with absolute paths + [[ ! "$device" =~ ^/dev/ ]] || [[ "$device" =~ ^/dev/shm/ ]] || + error "$facet: device '$device' does not exist" + + do_facet $facet "touch \"${device}\"" +} + format_mgs() { local quiet @@ -3530,7 +3926,13 @@ format_mgs() { fi echo "Format mgs: $(mgsdevname)" reformat_external_journal mgs - add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \ + + # touch "device" in case it is a loopback file for testing and needs to + # be created. mkfs.lustre doesn't do this to avoid accidentally writing + # to non-existent files in /dev if the admin made a typo during setup + __touch_device mgs + + add mgs $(mkfs_opts mgs $(mgsdevname)) $(mountfs_opts mgs) --reformat \ $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} || exit 10 } @@ -3543,9 +3945,12 @@ format_mdt() { fi echo "Format mds$num: $(mdsdevname $num)" reformat_external_journal mds$num + + __touch_device mds $num + add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \ - --reformat $(mdsdevname $num) $(mdsvdevname $num) \ - ${quiet:+>/dev/null} || exit 10 + $(mountfs_opts mds$num) --reformat $(mdsdevname $num) \ + $(mdsvdevname $num) ${quiet:+>/dev/null} || exit 10 } format_ost() { @@ -3556,13 +3961,16 @@ format_ost() { fi echo "Format ost$num: $(ostdevname $num)" reformat_external_journal ost$num + + __touch_device ost $num + add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \ - --reformat $(ostdevname $num) $(ostvdevname ${num}) \ - ${quiet:+>/dev/null} || exit 10 + $(mountfs_opts ost$num) --reformat $(ostdevname $num) \ + $(ostvdevname ${num}) ${quiet:+>/dev/null} || exit 10 } formatall() { - stopall + stopall -f # Set hostid for ZFS/SPL zpool import protection # (Assumes MDS version is also OSS version) if [ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.8.54) ]; @@ -3590,11 +3998,11 @@ formatall() { } mount_client() { - grep " $1 " /proc/mounts || zconf_mount $HOSTNAME $* + grep " $1 " /proc/mounts || zconf_mount $HOSTNAME $* } umount_client() { - grep " $1 " /proc/mounts && zconf_umount `hostname` $* + grep " $1 " /proc/mounts && zconf_umount $HOSTNAME $* } # return value: @@ -3630,8 +4038,8 @@ switch_identity() { remount_client() { - zconf_umount `hostname` $1 || error "umount failed" - zconf_mount `hostname` $1 || error "mount failed" + zconf_umount $HOSTNAME $1 || error "umount failed" + zconf_mount $HOSTNAME $1 || error "mount failed" } writeconf_facet() { @@ -3644,24 +4052,95 @@ writeconf_facet() { return 0 } -writeconf_all () { - local mdt_count=${1:-$MDSCOUNT} - local ost_count=${2:-$OSTCOUNT} - local rc=0 +writeconf_all () { + local mdt_count=${1:-$MDSCOUNT} + local ost_count=${2:-$OSTCOUNT} + local rc=0 + + for num in $(seq $mdt_count); do + DEVNAME=$(mdsdevname $num) + writeconf_facet mds$num $DEVNAME || rc=$? + done + + for num in $(seq $ost_count); do + DEVNAME=$(ostdevname $num) + writeconf_facet ost$num $DEVNAME || rc=$? + done + return $rc +} + +mountmgs() { + if ! combined_mgs_mds ; then + start mgs $(mgsdevname) $MGS_MOUNT_OPTS + fi +} + +mountmds() { + for num in $(seq $MDSCOUNT); do + DEVNAME=$(mdsdevname $num) + start mds$num $DEVNAME $MDS_MOUNT_OPTS + + # We started mds, now we should set failover variables properly. + # Set mds${num}failover_HOST if unset (the default + # failnode). + local varname=mds${num}failover_HOST + if [ -z "${!varname}" ]; then + eval mds${num}failover_HOST=$(facet_host mds$num) + fi + + if [ $IDENTITY_UPCALL != "default" ]; then + switch_identity $num $IDENTITY_UPCALL + fi + done +} + +mountoss() { + for num in $(seq $OSTCOUNT); do + DEVNAME=$(ostdevname $num) + start ost$num $DEVNAME $OST_MOUNT_OPTS + + # We started ost$num, now we should set ost${num}failover + # variable properly. Set ost${num}failover_HOST if it is not + # set (the default failnode). + varname=ost${num}failover_HOST + if [ -z "${!varname}" ]; then + eval ost${num}failover_HOST=$(facet_host ost${num}) + fi - for num in $(seq $mdt_count); do - DEVNAME=$(mdsdevname $num) - writeconf_facet mds$num $DEVNAME || rc=$? done +} - for num in $(seq $ost_count); do - DEVNAME=$(ostdevname $num) - writeconf_facet ost$num $DEVNAME || rc=$? +mountcli() { + [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE + if [ ! -z $arg1 ]; then + [ "$arg1" = "server_only" ] && return + fi + mount_client $MOUNT + [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT + clients_up + + if [ "$MOUNT_2" ]; then + mount_client $MOUNT2 + [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2 + fi +} + +sk_nodemap_setup() { + local sk_map_name=${1:-$SK_S2SNM} + local sk_map_nodes=${2:-$HOSTNAME} + do_node $(mgs_node) "$LCTL nodemap_add $sk_map_name" + for servernode in $sk_map_nodes; do + local nids=$(do_nodes $servernode "$LCTL list_nids") + for nid in $nids; do + do_node $(mgs_node) "$LCTL nodemap_add_range --name \ + $sk_map_name --range $nid" + done done - return $rc } setupall() { + local arg1=$1 + nfs_client_mode && return cifs_client_mode && return @@ -3669,73 +4148,73 @@ setupall() { load_modules + init_gss + if [ -z "$CLIENTONLY" ]; then echo Setup mgs, mdt, osts echo $WRITECONF | grep -q "writeconf" && writeconf_all - if ! combined_mgs_mds ; then - start mgs $(mgsdevname) $MGS_MOUNT_OPTS - fi - - for num in `seq $MDSCOUNT`; do - DEVNAME=$(mdsdevname $num) - start mds$num $DEVNAME $MDS_MOUNT_OPTS - - # We started mds, now we should set failover variables properly. - # Set mds${num}failover_HOST if it is not set (the default failnode). - local varname=mds${num}failover_HOST - if [ -z "${!varname}" ]; then - eval mds${num}failover_HOST=$(facet_host mds$num) - fi - - if [ $IDENTITY_UPCALL != "default" ]; then - switch_identity $num $IDENTITY_UPCALL - fi - done - for num in `seq $OSTCOUNT`; do - DEVNAME=$(ostdevname $num) - start ost$num $DEVNAME $OST_MOUNT_OPTS - - # We started ost$num, now we should set ost${num}failover variable properly. - # Set ost${num}failover_HOST if it is not set (the default failnode). - varname=ost${num}failover_HOST - if [ -z "${!varname}" ]; then - eval ost${num}failover_HOST=$(facet_host ost${num}) - fi - done - fi - - init_gss - - # wait a while to allow sptlrpc configuration be propogated to targets, - # only needed when mounting new target devices. - if $GSS; then - sleep 10 - fi - - [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE - mount_client $MOUNT - [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT - clients_up + if $SK_MOUNTED; then + echo "Shared Key file system already mounted" + else + mountmgs + mountmds + mountoss + if $SHARED_KEY; then + export SK_MOUNTED=true + fi + fi + if $GSS_SK; then + echo "GSS_SK: setting kernel keyring perms" + do_nodes $(comma_list $(all_nodes)) \ + "keyctl show | grep lustre | cut -c1-11 | + sed -e 's/ //g;' | + xargs -IX keyctl setperm X 0x3f3f3f3f" + + if $SK_S2S; then + # Need to start one nodemap for servers, + # and one for clients. + sk_nodemap_setup $SK_S2SNM \ + $(comma_list $(all_server_nodes)) + mountcli + sk_nodemap_setup $SK_S2SNMCLI \ + ${CLIENTS:-$HOSTNAME} + echo "Nodemap set up for SK S2S, remounting." + stopall + mountmgs + mountmds + mountoss + fi + fi + fi - if [ "$MOUNT_2" ]; then - mount_client $MOUNT2 - [ -n "$CLIENTS" ] && zconf_mount_clients $CLIENTS $MOUNT2 - fi + # wait a while to allow sptlrpc configuration be propogated to targets, + # only needed when mounting new target devices. + if $GSS; then + sleep 10 + fi - init_param_vars + mountcli + init_param_vars - # by remounting mdt before ost, initial connect from mdt to ost might - # timeout because ost is not ready yet. wait some time to its fully - # recovery. initial obd_connect timeout is 5s; in GSS case it's preceeded - # by a context negotiation rpc with $TIMEOUT. - # FIXME better by monitoring import status. - if $GSS; then - set_flavor_all $SEC - sleep $((TIMEOUT + 5)) - else - sleep 5 - fi + # by remounting mdt before ost, initial connect from mdt to ost might + # timeout because ost is not ready yet. wait some time to its fully + # recovery. initial obd_connect timeout is 5s; in GSS case it's + # preceeded by a context negotiation rpc with $TIMEOUT. + # FIXME better by monitoring import status. + if $GSS; then + if $GSS_SK; then + set_rule $FSNAME any cli2mdt $SK_FLAVOR + set_rule $FSNAME any cli2ost $SK_FLAVOR + wait_flavor cli2mdt $SK_FLAVOR + wait_flavor cli2ost $SK_FLAVOR + else + set_flavor_all $SEC + fi + sleep $((TIMEOUT + 5)) + else + sleep 5 + fi } mounted_lustre_filesystems() { @@ -3961,20 +4440,10 @@ check_config_client () { return 0 fi - local myMGS_host=$mgs_HOST - if [ "$NETTYPE" = "ptl" ]; then - myMGS_host=$(h2ptl $mgs_HOST | sed -e s/@ptl//) - fi - echo Checking config lustre mounted on $mntpt local mgshost=$(mount | grep " $mntpt " | awk -F@ '{print $1}') mgshost=$(echo $mgshost | awk -F: '{print $1}') -# if [ "$mgshost" != "$myMGS_host" ]; then -# log "Bad config file: lustre is mounted with mgs $mgshost, but mgs_HOST=$mgs_HOST, NETTYPE=$NETTYPE -# Please use correct config or set mds_HOST correctly!" -# fi - } check_config_clients () { @@ -4103,7 +4572,9 @@ check_and_setup_lustre() { fi init_gss - if $GSS; then + if $GSS_SK; then + set_flavor_all null + elif $GSS; then set_flavor_all $SEC fi @@ -4719,7 +5190,7 @@ debugrestore() { true DEBUGSAVE="" - [ -n "DEBUGSAVE_SERVER" ] && + [ -n "$DEBUGSAVE_SERVER" ] && do_nodes $(comma_list $(all_server_nodes)) \ "$LCTL set_param debug=\\\"${DEBUGSAVE_SERVER}\\\"" || true @@ -5096,7 +5567,7 @@ run_one() { cd $SAVE_PWD reset_fail_loc check_grant ${testnum} || error "check_grant $testnum failed with $?" - check_catastrophe || error "LBUG/LASSERT detected" + check_node_health check_dmesg_for_errors || error "Error in dmesg detected" if [ "$PARALLEL" != "yes" ]; then ps auxww | grep -v grep | grep -q multiop && @@ -5181,8 +5652,9 @@ check_grant() { export base=$(basetest $1) [ "$CHECK_GRANT" == "no" ] && return 0 - testname=GCHECK_ONLY_${base} - [ ${!testname}x == x ] && return 0 + testnamebase=GCHECK_ONLY_${base} + testname=GCHECK_ONLY_$1 + [ ${!testnamebase}x == x -a ${!testname}x == x ] && return 0 echo -n "checking grant......" @@ -5198,17 +5670,22 @@ check_grant() { awk '{ total += $1 } END { printf("%0.0f", total) }') # get server grant + # which is tot_granted less grant_precreate server_grant=$(do_nodes $(comma_list $(osts_nodes)) \ - "$LCTL get_param -n obdfilter.${FSNAME}-OST*.tot_granted" | - awk '{ total += $1 } END { printf("%0.0f", total) }') + "$LCTL get_param "\ + "obdfilter.${FSNAME}-OST*.{tot_granted,tot_pending,grant_precreate}" | + sed 's/=/ /'| awk '/tot_granted/{ total += $2 }; + /tot_pending/{ total -= $2 }; + /grant_precreate/{ total -= $2 }; + END { printf("%0.0f", total) }') # check whether client grant == server grant if [[ $client_grant -ne $server_grant ]]; then - echo "failed: client:${client_grant} server: ${server_grant}." do_nodes $(comma_list $(osts_nodes)) \ - "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*" + "$LCTL get_param obdfilter.${FSNAME}-OST*.tot*" \ + "obdfilter.${FSNAME}-OST*.grant_*" do_nodes $clients "$LCTL get_param osc.${FSNAME}-*.cur_*_bytes" - return 1 + error "failed: client:${client_grant} server: ${server_grant}." else echo "pass: client:${client_grant} server: ${server_grant}" fi @@ -5366,6 +5843,11 @@ facets_nodes () { echo -n $nodes_sort } +# Get name of the active MGS node. +mgs_node () { + echo -n $(facets_nodes $(get_facets MGS)) +} + # Get all of the active MDS nodes. mdts_nodes () { echo -n $(facets_nodes $(get_facets MDS)) @@ -5407,7 +5889,7 @@ remote_nodes_list () { all_mdts_nodes () { local host local failover_host - local nodes + local nodes="${mds_HOST} ${mdsfailover_HOST}" local nodes_sort local i @@ -5425,7 +5907,7 @@ all_mdts_nodes () { all_osts_nodes () { local host local failover_host - local nodes + local nodes="${ost_HOST} ${ostfailover_HOST}" local nodes_sort local i @@ -5550,13 +6032,19 @@ get_stripe () { setstripe_nfsserver () { local dir=$1 + local nfsexportdir=$2 + shift + shift - local nfsserver=$(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \ - { print $1 }' /proc/mounts | cut -f 1 -d : | head -n1) + local -a nfsexport=($(awk '"'$dir'" ~ $2 && $3 ~ "nfs" && $2 != "/" \ + { print $1 }' /proc/mounts | cut -f 1 -d :)) - [ -z $nfsserver ] && echo "$dir is not nfs mounted" && return 1 + # check that only one nfs mounted + [[ -z $nfsexport ]] && echo "$dir is not nfs mounted" && return 1 + (( ${#nfsexport[@]} == 1 )) || + error "several nfs mounts found for $dir: ${nfsexport[@]} !" - do_nodev $nfsserver lfs setstripe "$@" + do_nodev ${nfsexport[0]} lfs setstripe $nfsexportdir "$@" } # Check and add a test group. @@ -5726,32 +6214,22 @@ inodes_available () { } mdsrate_inodes_available () { - local min_inodes=$(inodes_available) - echo $((min_inodes * 99 / 100)) -} - -# reset llite stat counters -clear_llite_stats(){ - lctl set_param -n llite.*.stats 0 -} - -# sum llite stat items -calc_llite_stats() { - local res=$(lctl get_param -n llite.*.stats | - awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }') - echo $((res)) + local min_inodes=$(inodes_available) + echo $((min_inodes * 99 / 100)) } -# reset osc stat counters -clear_osc_stats(){ - lctl set_param -n osc.*.osc_stats 0 +# reset stat counters +clear_stats() { + local paramfile="$1" + lctl set_param -n $paramfile=0 } -# sum osc stat items -calc_osc_stats() { - local res=$(lctl get_param -n osc.*.osc_stats | - awk '/^'"$1"'/ {sum += $2} END { printf("%0.0f", sum) }') - echo $((res)) +# sum stat items +calc_stats() { + local paramfile="$1" + local stat="$2" + lctl get_param -n $paramfile | + awk '/^'$stat'/ { sum += $2 } END { printf("%0.0f", sum) }' } calc_sum () { @@ -5759,8 +6237,8 @@ calc_sum () { } calc_osc_kbytes () { - df $MOUNT > /dev/null - $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum + df $MOUNT > /dev/null + $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum } # save_lustre_params(comma separated facet list, parameter_mask) @@ -5768,16 +6246,17 @@ calc_osc_kbytes () { save_lustre_params() { local facets=$1 local facet - local nodes - local node + local facet_svc for facet in ${facets//,/ }; do - node=$(facet_active_host $facet) - [[ *\ $node\ * = " $nodes " ]] && continue - nodes="$nodes $node" - - do_node $node "$LCTL get_param $2 | - while read s; do echo $facet \\\$s; done" + facet_svc=$(facet_svc $facet) + do_facet $facet \ + "params=\\\$($LCTL get_param $2); + [[ -z \\\"$facet_svc\\\" ]] && param= || + param=\\\$(grep $facet_svc <<< \\\"\\\$params\\\"); + [[ -z \\\$param ]] && param=\\\"\\\$params\\\"; + while read s; do echo $facet \\\$s; + done <<< \\\"\\\$param\\\"" done } @@ -5792,16 +6271,21 @@ restore_lustre_params() { done } -check_catastrophe() { +check_node_health() { local nodes=${1:-$(comma_list $(nodes_list))} - do_nodes $nodes "rc=0; -val=\\\$($LCTL get_param -n catastrophe 2>&1); -if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then - echo \\\$(hostname -s): \\\$val; - rc=\\\$val; -fi; -exit \\\$rc" + for node in ${nodes//,/ }; do + check_network "$node" 5 + if [ $? -eq 0 ]; then + do_node $node "rc=0; + val=\\\$($LCTL get_param -n catastrophe 2>&1); + if [[ \\\$? -eq 0 && \\\$val -ne 0 ]]; then + echo \\\$(hostname -s): \\\$val; + rc=\\\$val; + fi; + exit \\\$rc" || error "$node:LBUG/LASSERT detected" + fi + done } mdsrate_cleanup () { @@ -5836,7 +6320,7 @@ convert_facet2label() { } get_clientosc_proc_path() { - echo "${1}-osc-*" + echo "${1}-osc-ffff*" } # If the 2.0 MDS was mounted on 1.8 device, then the OSC and LOV names @@ -5993,9 +6477,7 @@ _wait_osc_import_state() { if [[ $facet == client* ]]; then # During setup time, the osc might not be setup, it need wait - # until list_param can return valid value. And also if there - # are mulitple osc entries we should list all of them before - # go to wait. + # until list_param can return valid value. params=$($LCTL list_param $param 2>/dev/null || true) while [ -z "$params" ]; do if [ $i -ge $maxtime ]; then @@ -6019,7 +6501,7 @@ _wait_osc_import_state() { if ! do_rpc_nodes "$(facet_active_host $facet)" \ wait_import_state $expected "$params" $maxtime; then - error "import is not in ${expected} state" + error "$facet: import is not in $expected state after $maxtime" return 1 fi @@ -6163,6 +6645,47 @@ wait_clients_import_state () { fi } +wait_osp_active() { + local facet=$1 + local tgt_name=$2 + local tgt_idx=$3 + local expected=$4 + local num + + # wait until all MDTs are in the expected state + for ((num = 1; num <= $MDSCOUNT; num++)); do + local mdtosp=$(get_mdtosc_proc_path mds${num} ${tgt_name}) + local mproc + + if [ $facet = "mds" ]; then + mproc="osp.$mdtosp.active" + [ $num -eq $((tgt_idx + 1)) ] && continue + else + mproc="osc.$mdtosp.active" + fi + + echo "check $mproc" + while [ 1 ]; do + sleep 5 + local result=$(do_facet mds${num} "$LCTL get_param -n $mproc") + local max=30 + local wait=0 + + [ ${PIPESTATUS[0]} = 0 ] || error "Can't read $mproc" + if [ $result -eq $expected ]; then + echo -n "target updated after" + echo "$wait sec (got $result)" + break + fi + wait=$((wait + 5)) + if [ $wait -eq $max ]; then + error "$tgt_name: wanted $expected got $result" + fi + echo "Waiting $((max - wait)) secs for $tgt_name" + done + done +} + oos_full() { local -a AVAILA local -a GRANTA @@ -6190,27 +6713,42 @@ oos_full() { return $OSCFULL } -pool_list () { - do_facet mgs lctl pool_list $1 +list_pool() { + echo -e "$(do_facet $SINGLEMDS $LCTL pool_list $1 | sed '1d')" +} + +check_pool_not_exist() { + local fsname=${1%%.*} + local poolname=${1##$fsname.} + [[ $# -ne 1 ]] && return 0 + [[ x$poolname = x ]] && return 0 + list_pool $fsname | grep -w $1 && return 1 + return 0 } create_pool() { - local fsname=${1%%.*} - local poolname=${1##$fsname.} - - do_facet mgs lctl pool_new $1 - local RC=$? - # get param should return err unless pool is created - [[ $RC -ne 0 ]] && return $RC - - wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ - 2>/dev/null || echo foo" "" || RC=1 - if [[ $RC -eq 0 ]]; then - add_pool_to_list $1 - else - error "pool_new failed $1" - fi - return $RC + local fsname=${1%%.*} + local poolname=${1##$fsname.} + + trap "destroy_test_pools $fsname" EXIT + do_facet mgs lctl pool_new $1 + local RC=$? + # get param should return err unless pool is created + [[ $RC -ne 0 ]] && return $RC + + for mds_id in $(seq $MDSCOUNT); do + local mdt_id=$((mds_id-1)) + local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov + wait_update_facet mds$mds_id \ + "lctl get_param -n lod.$lodname.pools.$poolname \ + 2>/dev/null || echo foo" "" || + error "mds$mds_id: pool_new failed $1" + done + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "" || error "pool_new failed $1" + + add_pool_to_list $1 + return $RC } add_pool_to_list () { @@ -6232,60 +6770,62 @@ remove_pool_from_list () { } destroy_pool_int() { - local ost - local OSTS=$(do_facet $SINGLEMDS lctl pool_list $1 | \ - awk '$1 !~ /^Pool:/ {print $1}') - for ost in $OSTS; do - do_facet mgs lctl pool_remove $1 $ost - done - do_facet mgs lctl pool_destroy $1 + local ost + local OSTS=$(list_pool $1) + for ost in $OSTS; do + do_facet mgs lctl pool_remove $1 $ost + done + do_facet mgs lctl pool_destroy $1 } # . or destroy_pool() { - local fsname=${1%%.*} - local poolname=${1##$fsname.} + local fsname=${1%%.*} + local poolname=${1##$fsname.} - [[ x$fsname = x$poolname ]] && fsname=$FSNAME + [[ x$fsname = x$poolname ]] && fsname=$FSNAME - local RC + local RC - pool_list $fsname.$poolname || return $? + check_pool_not_exist $fsname.$poolname + [[ $? -eq 0 ]] && return 0 - destroy_pool_int $fsname.$poolname - RC=$? - [[ $RC -ne 0 ]] && return $RC + destroy_pool_int $fsname.$poolname + RC=$? + [[ $RC -ne 0 ]] && return $RC + for mds_id in $(seq $MDSCOUNT); do + local mdt_id=$((mds_id-1)) + local lodname=$fsname-MDT$(printf "%04x" $mdt_id)-mdtlov + wait_update_facet mds$mds_id \ + "lctl get_param -n lod.$lodname.pools.$poolname \ + 2>/dev/null || echo foo" "foo" || + error "mds$mds_id: destroy pool failed $1" + done + wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ + 2>/dev/null || echo foo" "foo" || error "destroy pool failed $1" - wait_update $HOSTNAME "lctl get_param -n lov.$fsname-*.pools.$poolname \ - 2>/dev/null || echo foo" "foo" || RC=1 + remove_pool_from_list $fsname.$poolname - if [[ $RC -eq 0 ]]; then - remove_pool_from_list $fsname.$poolname - else - error "destroy pool failed $1" - fi - return $RC + return $RC } destroy_pools () { - local fsname=${1:-$FSNAME} - local poolname - local listvar=${fsname}_CREATED_POOLS - - pool_list $fsname + local fsname=${1:-$FSNAME} + local poolname + local listvar=${fsname}_CREATED_POOLS - [ x${!listvar} = x ] && return 0 + [ x${!listvar} = x ] && return 0 - echo destroy the created pools: ${!listvar} - for poolname in ${!listvar//,/ }; do - destroy_pool $fsname.$poolname - done + echo "Destroy the created pools: ${!listvar}" + for poolname in ${!listvar//,/ }; do + destroy_pool $fsname.$poolname + done } -cleanup_pools () { - local fsname=${1:-$FSNAME} - trap 0 - destroy_pools $fsname +destroy_test_pools () { + trap 0 + local fsname=${1:-$FSNAME} + destroy_pools $fsname || true } gather_logs () { @@ -6318,6 +6858,7 @@ gather_logs () { do_nodesv $list \ "$LCTL dk > ${prefix}.debug_log.\\\$(hostname -s).${suffix}; dmesg > ${prefix}.dmesg.\\\$(hostname -s).${suffix}" + if [ ! -f $LOGDIR/shared ]; then do_nodes $list rsync -az "${prefix}.*.${suffix}" $HOSTNAME:$LOGDIR fi @@ -6391,15 +6932,17 @@ recovery_time_min() { } get_clients_mount_count () { - local clients=${CLIENTS:-`hostname`} + local clients=${CLIENTS:-$HOSTNAME} - # we need to take into account the clients mounts and - # exclude mds/ost mounts if any; - do_nodes $clients cat /proc/mounts | grep lustre | grep $MOUNT | wc -l + # we need to take into account the clients mounts and + # exclude mds/ost mounts if any; + do_nodes $clients cat /proc/mounts | grep lustre | + grep -w $MOUNT | wc -l } # gss functions PROC_CLI="srpc_info" +PROC_CON="srpc_contexts" combination() { @@ -6422,28 +6965,39 @@ combination() } calc_connection_cnt() { - local dir=$1 + local dir=$1 - # MDT->MDT = 2 * C(M, 2) - # MDT->OST = M * O - # CLI->OST = C * O - # CLI->MDT = C * M - comb_m2=$(combination $MDSCOUNT 2) + # MDT->MDT = 2 * C(M, 2) + # MDT->OST = M * O + # CLI->OST = C * O + # CLI->MDT = C * M + comb_m2=$(combination $MDSCOUNT 2) - local num_clients=$(get_clients_mount_count) + local num_clients=$(get_clients_mount_count) - local cnt_mdt2mdt=$((comb_m2 * 2)) - local cnt_mdt2ost=$((MDSCOUNT * OSTCOUNT)) - local cnt_cli2ost=$((num_clients * OSTCOUNT)) - local cnt_cli2mdt=$((num_clients * MDSCOUNT)) - local cnt_all2ost=$((cnt_mdt2ost + cnt_cli2ost)) - local cnt_all2mdt=$((cnt_mdt2mdt + cnt_cli2mdt)) - local cnt_all2all=$((cnt_mdt2ost + cnt_mdt2mdt + cnt_cli2ost + cnt_cli2mdt)) + local cnt_mdt2mdt=$((comb_m2 * 2)) + local cnt_mdt2ost=$((MDSCOUNT * OSTCOUNT)) + local cnt_cli2ost=$((num_clients * OSTCOUNT)) + local cnt_cli2mdt=$((num_clients * MDSCOUNT)) + if is_mounted $MOUNT2; then + cnt_cli2mdt=$((cnt_cli2mdt * 2)) + cnt_cli2ost=$((cnt_cli2ost * 2)) + fi + if local_mode; then + cnt_mdt2mdt=0 + cnt_mdt2ost=0 + cnt_cli2ost=2 + cnt_cli2mdt=1 + fi + local cnt_all2ost=$((cnt_mdt2ost + cnt_cli2ost)) + local cnt_all2mdt=$((cnt_mdt2mdt + cnt_cli2mdt)) + local cnt_all2all=$((cnt_mdt2ost + cnt_mdt2mdt \ + + cnt_cli2ost + cnt_cli2mdt)) - local var=cnt_$dir - local res=${!var} + local var=cnt_$dir + local res=${!var} - echo $res + echo $res } set_rule() @@ -6468,6 +7022,13 @@ set_rule() do_facet mgs "$LCTL conf_param $cmd" } +count_contexts() +{ + local output=$1 + local total_ctx=$(echo "$output" | grep -c "expire.*key.*hdl") + echo $total_ctx +} + count_flvr() { local output=$1 @@ -6509,12 +7070,22 @@ flvr_cnt_cli2mdt() local flavor=$1 local cnt - local clients=${CLIENTS:-`hostname`} + local clients=${CLIENTS:-$HOSTNAME} for c in ${clients//,/ }; do - output=`do_node $c lctl get_param -n mdc.*-MDT*-mdc-*.$PROC_CLI 2>/dev/null` - tmpcnt=`count_flvr "$output" $flavor` - cnt=$((cnt + tmpcnt)) + local output=$(do_node $c lctl get_param -n \ + mdc.*-*-mdc-*.$PROC_CLI 2>/dev/null) + local tmpcnt=$(count_flvr "$output" $flavor) + if $GSS_SK && [ $flavor != "null" ]; then + # tmpcnt=min(contexts,flavors) to ensure SK context is on + output=$(do_node $c lctl get_param -n \ + mdc.*-MDT*-mdc-*.$PROC_CON 2>/dev/null) + local outcon=$(count_contexts "$output") + if [ "$outcon" -lt "$tmpcnt" ]; then + tmpcnt=$outcon + fi + fi + cnt=$((cnt + tmpcnt)) done echo $cnt } @@ -6524,11 +7095,21 @@ flvr_cnt_cli2ost() local flavor=$1 local cnt - local clients=${CLIENTS:-`hostname`} + local clients=${CLIENTS:-$HOSTNAME} for c in ${clients//,/ }; do - output=`do_node $c lctl get_param -n osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null` - tmpcnt=`count_flvr "$output" $flavor` + local output=$(do_node $c lctl get_param -n \ + osc.*OST*-osc-[^M][^D][^T]*.$PROC_CLI 2>/dev/null) + local tmpcnt=$(count_flvr "$output" $flavor) + if $GSS_SK && [ $flavor != "null" ]; then + # tmpcnt=min(contexts,flavors) to ensure SK context is on + output=$(do_node $c lctl get_param -n \ + osc.*OST*-osc-[^M][^D][^T]*.$PROC_CON 2>/dev/null) + local outcon=$(count_contexts "$output") + if [ "$outcon" -lt "$tmpcnt" ]; then + tmpcnt=$outcon + fi + fi cnt=$((cnt + tmpcnt)) done echo $cnt @@ -6545,8 +7126,18 @@ flvr_cnt_mdt2mdt() fi for num in `seq $MDSCOUNT`; do - output=`do_facet mds$num lctl get_param -n mdc.*-MDT*-mdc[0-9]*.$PROC_CLI 2>/dev/null` - tmpcnt=`count_flvr "$output" $flavor` + local output=$(do_facet mds$num lctl get_param -n \ + osp.*-MDT*osp-MDT*.$PROC_CLI 2>/dev/null) + local tmpcnt=$(count_flvr "$output" $flavor) + if $GSS_SK && [ $flavor != "null" ]; then + # tmpcnt=min(contexts,flavors) to ensure SK context is on + output=$(do_facet mds$num lctl get_param -n \ + osp.*-MDT*osp-MDT*.$PROC_CON 2>/dev/null) + local outcon=$(count_contexts "$output") + if [ "$outcon" -lt "$tmpcnt" ]; then + tmpcnt=$outcon + fi + fi cnt=$((cnt + tmpcnt)) done echo $cnt; @@ -6561,9 +7152,18 @@ flvr_cnt_mdt2ost() for num in `seq $MDSCOUNT`; do mdtosc=$(get_mdtosc_proc_path mds$num) mdtosc=${mdtosc/-MDT*/-MDT\*} - output=$(do_facet mds$num lctl get_param -n \ - osc.$mdtosc.$PROC_CLI 2>/dev/null) - tmpcnt=`count_flvr "$output" $flavor` + local output=$(do_facet mds$num lctl get_param -n \ + osc.$mdtosc.$PROC_CLI 2>/dev/null) + local tmpcnt=$(count_flvr "$output" $flavor) + if $GSS_SK && [ $flavor != "null" ]; then + # tmpcnt=min(contexts,flavors) to ensure SK context is on + output=$(do_facet mds$num lctl get_param -n \ + osc.$mdtosc.$PROC_CON 2>/dev/null) + local outcon=$(count_contexts "$output") + if [ "$outcon" -lt "$tmpcnt" ]; then + tmpcnt=$outcon + fi + fi cnt=$((cnt + tmpcnt)) done echo $cnt; @@ -6573,7 +7173,8 @@ flvr_cnt_mgc2mgs() { local flavor=$1 - output=`do_facet client lctl get_param -n mgc.*.$PROC_CLI 2>/dev/null` + local output=$(do_facet client lctl get_param -n mgc.*.$PROC_CLI \ + 2>/dev/null) count_flvr "$output" $flavor } @@ -6612,75 +7213,109 @@ do_check_flavor() wait_flavor() { - local dir=$1 # from to - local flavor=$2 # flavor expected - local expect=${3:-$(calc_connection_cnt $dir)} # number expected - - local res=0 - - for ((i=0;i<20;i++)); do - echo -n "checking $dir..." - res=$(do_check_flavor $dir $flavor) - echo "found $res/$expect $flavor connections" - [ $res -ge $expect ] && return 0 - sleep 4 - done + local dir=$1 # from to + local flavor=$2 # flavor expected + local expect=${3:-$(calc_connection_cnt $dir)} # number expected + local WAITFLAVOR_MAX=20 # how many retries before abort? + + local res=0 + for ((i = 0; i < $WAITFLAVOR_MAX; i++)); do + echo -n "checking $dir..." + res=$(do_check_flavor $dir $flavor) + echo "found $res/$expect $flavor connections" + [ $res -ge $expect ] && return 0 + sleep 4 + done - echo "Error checking $flavor of $dir: expect $expect, actual $res" - return 1 + echo "Error checking $flavor of $dir: expect $expect, actual $res" +# echo "Dumping additional logs for SK debug.." + do_nodes $(comma_list $(all_server_nodes)) "keyctl show" + if $dump; then + gather_logs $(comma_list $(nodes_list)) + fi + return 1 } restore_to_default_flavor() { - local proc="mgs.MGS.live.$FSNAME" + local proc="mgs.MGS.live.$FSNAME" - echo "restoring to default flavor..." + echo "restoring to default flavor..." - nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l` + local nrule=$(do_facet mgs lctl get_param -n $proc 2>/dev/null | + grep ".srpc.flavor" | wc -l) - # remove all existing rules if any - if [ $nrule -ne 0 ]; then - echo "$nrule existing rules" - for rule in `do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor."`; do - echo "remove rule: $rule" - spec=`echo $rule | awk -F = '{print $1}'` - do_facet mgs "$LCTL conf_param -d $spec" - done - fi + # remove all existing rules if any + if [ $nrule -ne 0 ]; then + echo "$nrule existing rules" + for rule in $(do_facet mgs lctl get_param -n $proc 2>/dev/null | + grep ".srpc.flavor."); do + echo "remove rule: $rule" + spec=`echo $rule | awk -F = '{print $1}'` + do_facet mgs "$LCTL conf_param -d $spec" + done + fi - # verify no rules left - nrule=`do_facet mgs lctl get_param -n $proc 2>/dev/null | grep ".srpc.flavor." | wc -l` - [ $nrule -ne 0 ] && error "still $nrule rules left" + # verify no rules left + nrule=$(do_facet mgs lctl get_param -n $proc 2>/dev/null | + grep ".srpc.flavor." | wc -l) + [ $nrule -ne 0 ] && error "still $nrule rules left" - # wait for default flavor to be applied - # currently default flavor for all connections are 'null' - wait_flavor all2all null - echo "now at default flavor settings" + # wait for default flavor to be applied + if $GSS_SK; then + if $SK_S2S; then + set_rule $FSNAME any any $SK_FLAVOR + wait_flavor all2all $SK_FLAVOR + else + set_rule $FSNAME any cli2mdt $SK_FLAVOR + set_rule $FSNAME any cli2ost $SK_FLAVOR + wait_flavor cli2mdt $SK_FLAVOR + wait_flavor cli2ost $SK_FLAVOR + fi + echo "GSS_SK now at default flavor: $SK_FLAVOR" + else + wait_flavor all2all null + fi } set_flavor_all() { - local flavor=${1:-null} + local flavor=${1:-null} - echo "setting all flavor to $flavor" + echo "setting all flavor to $flavor" - # FIXME need parameter to this fn - # and remove global vars - local cnt_all2all=$(calc_connection_cnt all2all) + # FIXME need parameter to this fn + # and remove global vars + local cnt_all2all=$(calc_connection_cnt all2all) - local res=$(do_check_flavor all2all $flavor) - if [ $res -eq $cnt_all2all ]; then - echo "already have total $res $flavor connections" - return - fi + local res=$(do_check_flavor all2all $flavor) + if [ $res -eq $cnt_all2all ]; then + echo "already have total $res $flavor connections" + return + fi - echo "found $res $flavor out of total $cnt_all2all connections" - restore_to_default_flavor + echo "found $res $flavor out of total $cnt_all2all connections" + restore_to_default_flavor - [[ $flavor = null ]] && return 0 + [[ $flavor = null ]] && return 0 - set_rule $FSNAME any any $flavor - wait_flavor all2all $flavor + if $GSS_SK && [ $flavor != "null" ]; then + if $SK_S2S; then + set_rule $FSNAME any any $flavor + wait_flavor all2all $flavor + else + set_rule $FSNAME any cli2mdt $flavor + set_rule $FSNAME any cli2ost $flavor + set_rule $FSNAME any mdt2ost null + set_rule $FSNAME any mdt2mdt null + wait_flavor cli2mdt $flavor + wait_flavor cli2ost $flavor + fi + echo "GSS_SK now at flavor: $flavor" + else + set_rule $FSNAME any any $flavor + wait_flavor all2all $flavor + fi } @@ -6923,7 +7558,7 @@ is_sanity_benchmark() { } min_ost_size () { - $LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1 + $LFS df | grep OST | awk '{print $4}' | sort -un | head -1 } # @@ -7162,11 +7797,14 @@ test_mkdir() { local parent=$(dirname $path) [ -d $path ] && return 0 - [ ! -d ${parent} ] && mkdir -p ${parent} + if [ ! -d ${parent} ]; then + mkdir -p ${parent} || + error "mkdir parent '$parent' failed" + fi fi if [ $MDSCOUNT -le 1 ]; then - mkdir $path + mkdir $path || error "mkdir '$path' failed" else local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g') local mdt_index @@ -7177,20 +7815,27 @@ test_mkdir() { mdt_index=$stripe_index fi echo "striped dir -i$mdt_index -c$stripe_count $path" - $LFS setdirstripe -i$mdt_index -c$stripe_count $path + $LFS mkdir -i$mdt_index -c$stripe_count $path || + error "mkdir -i $mdt_index -c$stripe_count $path failed" fi } -# find the smallest and not in use file descriptor +# free_fd: find the smallest and not in use file descriptor [above @last_fd] +# +# If called many times, passing @last_fd will avoid repeated searching +# already-open FDs repeatedly if we know they are still in use. +# +# usage: free_fd [last_fd] free_fd() { - local max_fd=$(ulimit -n) - local fd=3 - while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do - ((++fd)) - done - [ $fd -lt $max_fd ] || error "finding free file descriptor failed" - echo $fd + local max_fd=$(ulimit -n) + local fd=$((${1:-2} + 1)) + + while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do + ((++fd)) + done + [ $fd -lt $max_fd ] || error "finding free file descriptor failed" + echo $fd } check_mount_and_prep() @@ -7262,6 +7907,18 @@ pool_add_targets() { local t=$(for i in $list; do printf "$FSNAME-OST%04x_UUID " $i; done) do_facet mgs $LCTL pool_add \ $FSNAME.$pool $FSNAME-OST[$first-$last/$step] + + # wait for OSTs to be added to the pool + for mds_id in $(seq $MDSCOUNT); do + local mdt_id=$((mds_id-1)) + local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov + wait_update_facet mds$mds_id \ + "lctl get_param -n lod.$lodname.pools.$pool | + sort -u | tr '\n' ' ' " "$t" || { + error_noexit "mds$mds_id: Add to pool failed" + return 3 + } + done wait_update $HOSTNAME "lctl get_param -n lov.$FSNAME-*.pools.$pool \ | sort -u | tr '\n' ' ' " "$t" || { error_noexit "Add to pool failed" @@ -7398,6 +8055,17 @@ pool_remove_first_target() { local pname="lov.$FSNAME-*.pools.$pool" local t=$($LCTL get_param -n $pname | head -1) do_facet mgs $LCTL pool_remove $FSNAME.$pool $t + for mds_id in $(seq $MDSCOUNT); do + local mdt_id=$((mds_id-1)) + local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov + wait_update_facet mds$mds_id \ + "lctl get_param -n lod.$lodname.pools.$pool | + grep $t" "" || { + error_noexit "mds$mds_id: $t not removed from" \ + "$FSNAME.$pool" + return 2 + } + done wait_update $HOSTNAME "lctl get_param -n $pname | grep $t" "" || { error_noexit "$t not removed from $FSNAME.$pool" return 1 @@ -7413,6 +8081,15 @@ pool_remove_all_targets() { do do_facet mgs $LCTL pool_remove $FSNAME.$pool $t done + for mds_id in $(seq $MDSCOUNT); do + local mdt_id=$((mds_id-1)) + local lodname=$FSNAME-MDT$(printf "%04x" $mdt_id)-mdtlov + wait_update_facet mds$mds_id "lctl get_param -n \ + lod.$lodname.pools.$pool" "" || { + error_noexit "mds$mds_id: Pool $pool not drained" + return 4 + } + done wait_update $HOSTNAME "lctl get_param -n $pname" "" || { error_noexit "Pool $FSNAME.$pool cannot be drained" return 1 @@ -7530,3 +8207,181 @@ killall_process () { do_nodes $clients "killall $signal $name" } + +lsnapshot_create() +{ + do_facet mgs "$LCTL snapshot_create -F $FSNAME $*" +} + +lsnapshot_destroy() +{ + do_facet mgs "$LCTL snapshot_destroy -F $FSNAME $*" +} + +lsnapshot_modify() +{ + do_facet mgs "$LCTL snapshot_modify -F $FSNAME $*" +} + +lsnapshot_list() +{ + do_facet mgs "$LCTL snapshot_list -F $FSNAME $*" +} + +lsnapshot_mount() +{ + do_facet mgs "$LCTL snapshot_mount -F $FSNAME $*" +} + +lsnapshot_umount() +{ + do_facet mgs "$LCTL snapshot_umount -F $FSNAME $*" +} + +lss_err() +{ + local msg=$1 + + do_facet mgs "cat $LSNAPSHOT_LOG" + error $msg +} + +lss_cleanup() +{ + echo "Cleaning test environment ..." + + # Every lsnapshot command takes exclusive lock with others, + # so can NOT destroy the snapshot during list with 'xargs'. + while true; do + local ssname=$(lsnapshot_list | grep snapshot_name | + grep lss_ | awk '{ print $2 }' | head -n 1) + [ -z "$ssname" ] && break + + lsnapshot_destroy -n $ssname -f || + lss_err "Fail to destroy $ssname by force" + done +} + +lss_gen_conf_one() +{ + local facet=$1 + local role=$2 + local idx=$3 + + local host=$(facet_active_host $facet) + local dir=$(dirname $(facet_vdevice $facet)) + local pool=$(zpool_name $facet) + local lfsname=$(zfs_local_fsname $facet) + local label=${FSNAME}-${role}$(printf '%04x' $idx) + + do_facet mgs \ + "echo '$host - $label zfs:${dir}/${pool}/${lfsname} - -' >> \ + $LSNAPSHOT_CONF" +} + +lss_gen_conf() +{ + do_facet mgs "rm -f $LSNAPSHOT_CONF" + echo "Generating $LSNAPSHOT_CONF on MGS ..." + + if ! combined_mgs_mds ; then + [ $(facet_fstype mgs) != zfs ] && + skip "Lustre snapshot 1 only works for ZFS backend" && + exit 0 + + local host=$(facet_active_host mgs) + local dir=$(dirname $(facet_vdevice mgs)) + local pool=$(zpool_name mgs) + local lfsname=$(zfs_local_fsname mgs) + + do_facet mgs \ + "echo '$host - MGS zfs:${dir}/${pool}/${lfsname} - -' \ + >> $LSNAPSHOT_CONF" || lss_err "generate lss conf (mgs)" + fi + + for num in `seq $MDSCOUNT`; do + [ $(facet_fstype mds$num) != zfs ] && + skip "Lustre snapshot 1 only works for ZFS backend" && + exit 0 + + lss_gen_conf_one mds$num MDT $((num - 1)) || + lss_err "generate lss conf (mds$num)" + done + + for num in `seq $OSTCOUNT`; do + [ $(facet_fstype ost$num) != zfs ] && + skip "Lustre snapshot 1 only works for ZFS backend" && + exit 0 + + lss_gen_conf_one ost$num OST $((num - 1)) || + lss_err "generate lss conf (ost$num)" + done + + do_facet mgs "cat $LSNAPSHOT_CONF" +} + +parse_plain_param() +{ + local line=$1 + local val=$(awk '{print $2}' <<< $line) + + if [[ $line =~ ^"lmm_stripe_count:" ]]; then + echo "-c $val" + elif [[ $line =~ ^"lmm_stripe_size:" ]]; then + echo "-S $val" + elif [[ $line =~ ^"lmm_stripe_offset:" ]]; then + echo "-i $val" + fi +} + +parse_layout_param() +{ + local mode="" + local val="" + local param="" + + while read line; do + if [[ -z $mode ]]; then + if [[ $line =~ ^"stripe_count:" ]]; then + mode="plain_dir" + elif [[ $line =~ ^"lmm_stripe_count:" ]]; then + mode="plain_file" + elif [[ $line =~ ^"lcm_layout_gen:" ]]; then + mode="pfl" + fi + fi + + if [[ $mode = "plain_dir" ]]; then + param=$(echo $line | + awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}') + elif [[ $mode = "plain_file" ]]; then + val=$(parse_plain_param "$line") + [[ ! -z $val ]] && param="$param $val" + elif [[ $mode = "pfl" ]]; then + val=$(echo $line | awk '{print $2}') + if [[ $line =~ ^"lcme_extent.e_end:" ]]; then + if [[ $val = "EOF" ]]; then + param="$param -E -1" + else + param="$param -E $val" + fi + elif [[ $line =~ ^"stripe_count:" ]]; then + # pfl dir + val=$(echo $line | + awk '{printf("-c %d -S %d -i %d",$2,$4,$6)}') + param="$param $val" + else + #pfl file + val=$(parse_plain_param "$line") + [[ ! -z $val ]] && param="$param $val" + fi + fi + done + echo "$param" +} + +get_layout_param() +{ + local param=$($LFS getstripe -d $1 | parse_layout_param) + echo "$param" +}