X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=3e714e348861f0174e6a44e81991d16582194aea;hp=c21e281dd6577ad7e260725af5ddbcdb37abb75c;hb=9e88126a41ccc5358be6174580e820f6080a774c;hpb=5d0201db65d366d8905a28d103a2a9b511c22ca7 diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index c21e281..3e714e3 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -1,11 +1,11 @@ #!/bin/bash -# vim:expandtab:shiftwidth=4:softtabstop=4:tabstop=4: trap 'print_summary && touch $TF_FAIL && \ echo "test-framework exiting on error"' ERR set -e #set -x +export EJOURNAL=${EJOURNAL:-""} export REFORMAT=${REFORMAT:-""} export WRITECONF=${WRITECONF:-""} export VERBOSE=${VERBOSE:-false} @@ -15,6 +15,8 @@ export GSS_KRB5=false export GSS_PIPEFS=false export IDENTITY_UPCALL=default export QUOTA_AUTO=1 +export JOBSTATS_AUTO=${JOBSTATS_AUTO:-1} +export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or # LOAD_LLOOP is true. LOAD_LLOOP is false by default. @@ -27,6 +29,8 @@ LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} . $LUSTRE/tests/functions.sh . $LUSTRE/tests/yaml.sh +export LD_LIBRARY_PATH=${LUSTRE}/utils:${LD_LIBRARY_PATH} + LUSTRE_TESTS_CFG_DIR=${LUSTRE_TESTS_CFG_DIR:-${LUSTRE}/tests/cfg} EXCEPT_LIST_FILE=${EXCEPT_LIST_FILE:-${LUSTRE_TESTS_CFG_DIR}/tests-to-skip.sh} @@ -119,17 +123,68 @@ init_test_env() { export TEST_FAILED=false export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} - export MKE2FS=${MKE2FS:-mke2fs} - export DEBUGFS=${DEBUGFS:-debugfs} - export TUNE2FS=${TUNE2FS:-tune2fs} - export E2LABEL=${E2LABEL:-e2label} - export DUMPE2FS=${DUMPE2FS:-dumpe2fs} - export E2FSCK=${E2FSCK:-e2fsck} - export LFSCK_BIN=${LFSCK_BIN:-lfsck} + export MKE2FS=$MKE2FS + if [ -z "$MKE2FS" ]; then + if which mkfs.ldiskfs >/dev/null 2>&1; then + export MKE2FS=mkfs.ldiskfs + else + export MKE2FS=mke2fs + fi + fi + + export DEBUGFS=$DEBUGFS + if [ -z "$DEBUGFS" ]; then + if which debugfs.ldiskfs >/dev/null 2>&1; then + export DEBUGFS=debugfs.ldiskfs + else + export DEBUGFS=debugfs + fi + fi + + export TUNE2FS=$TUNE2FS + if [ -z "$TUNE2FS" ]; then + if which tunefs.ldiskfs >/dev/null 2>&1; then + export TUNE2FS=tunefs.ldiskfs + else + export TUNE2FS=tune2fs + fi + fi + + export E2LABEL=$E2LABEL + if [ -z "$E2LABEL" ]; then + if which label.ldiskfs >/dev/null 2>&1; then + export E2LABEL=label.ldiskfs + else + export E2LABEL=e2label + fi + fi + + export DUMPE2FS=$DUMPE2FS + if [ -z "$DUMPE2FS" ]; then + if which dumpfs.ldiskfs >/dev/null 2>&1; then + export DUMPE2FS=dumpfs.ldiskfs + else + export DUMPE2FS=dumpe2fs + fi + fi + export E2FSCK=$E2FSCK + if [ -z "$E2FSCK" ]; then + if which fsck.ldiskfs >/dev/null 2>&1; then + export E2FSCK=fsck.ldiskfs + else + export E2FSCK=e2fsck + fi + fi + + export LFSCK_BIN=${LFSCK_BIN:-lfsck} export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite export FSCK_MAX_ERR=4 # File system errors left uncorrected + export ZFS=${ZFS:-zfs} + export ZPOOL=${ZPOOL:-zpool} + export ZDB=${ZDB:-zdb} + #[ -d /r ] && export ROOT=${ROOT:-/r} export TMP=${TMP:-$ROOT/tmp} export TESTSUITELOG=${TMP}/${TESTSUITE}.log @@ -182,11 +237,11 @@ init_test_env() { fi fi export LL_DECODE_FILTER_FID=${LL_DECODE_FILTER_FID:-"$LUSTRE/utils/ll_decode_filter_fid"} - [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID=$(which ll_decode_filter_fid) + [ ! -f "$LL_DECODE_FILTER_FID" ] && export LL_DECODE_FILTER_FID="ll_decode_filter_fid" export MKFS=${MKFS:-"$LUSTRE/utils/mkfs.lustre"} - [ ! -f "$MKFS" ] && export MKFS=$(which mkfs.lustre) + [ ! -f "$MKFS" ] && export MKFS="mkfs.lustre" export TUNEFS=${TUNEFS:-"$LUSTRE/utils/tunefs.lustre"} - [ ! -f "$TUNEFS" ] && export TUNEFS=$(which tunefs.lustre) + [ ! -f "$TUNEFS" ] && export TUNEFS="tunefs.lustre" export CHECKSTAT="${CHECKSTAT:-"checkstat -v"} " export LUSTRE_RMMOD=${LUSTRE_RMMOD:-$LUSTRE/scripts/lustre_rmmod} [ ! -f "$LUSTRE_RMMOD" ] && @@ -194,7 +249,6 @@ init_test_env() { export LFS_MIGRATE=${LFS_MIGRATE:-$LUSTRE/scripts/lfs_migrate} [ ! -f "$LFS_MIGRATE" ] && export LFS_MIGRATE=$(which lfs_migrate 2> /dev/null) - export FSTYPE=${FSTYPE:-"ldiskfs"} export NAME=${NAME:-local} export LGSSD=${LGSSD:-"$LUSTRE/utils/gss/lgssd"} [ "$GSS_PIPEFS" = "true" ] && [ ! -f "$LGSSD" ] && \ @@ -228,6 +282,9 @@ init_test_env() { IDENTITY_UPCALL=false ;; esac + USE_OFD=${USE_OFD:-yes} + [ "$USE_OFD" = "yes" ] && LOAD_MODULES_REMOTE=true + export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} # Paths on remote nodes, if different @@ -263,6 +320,20 @@ init_test_env() { rm -f $TMP/*active } +check_cpt_number() { + local facet=$1 + local ncpts + + ncpts=$(do_facet $facet "lctl get_param -n " \ + "cpu_partition_table 2>/dev/null| wc -l" || echo 1) + + if [ $ncpts -eq 0 ]; then + echo "1" + else + echo $ncpts + fi +} + version_code() { # split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3" eval set -- $(tr "[:punct:]" " " <<< $*) @@ -273,12 +344,6 @@ version_code() { export LINUX_VERSION=$(uname -r | sed -e "s/[-.]/ /3" -e "s/ .*//") export LINUX_VERSION_CODE=$(version_code ${LINUX_VERSION//\./ }) -case `uname -r` in -2.4.*) EXT=".o"; USE_QUOTA=no; [ ! "$CLIENTONLY" ] && FSTYPE=ext3;; - *) EXT=".ko"; USE_QUOTA=yes;; -esac - - module_loaded () { /sbin/lsmod | grep -q "^\<$1\>" } @@ -360,14 +425,46 @@ llite_lloop_enabled() { } load_modules_local() { - if [ -n "$MODPROBE" ]; then - # use modprobe - echo "Using modprobe to load modules" - return 0 - fi + [ $(facet_fstype ost1) == "zfs" ] && export USE_OFD=yes + + if [ -n "$MODPROBE" ]; then + # use modprobe + echo "Using modprobe to load modules" + return 0 + fi + + echo Loading modules from $LUSTRE + + local ncpus + + if [ -f /sys/devices/system/cpu/online ]; then + ncpus=$(($(cut -d "-" -f 2 /sys/devices/system/cpu/online) + 1)) + echo "detected $ncpus online CPUs by sysfs" + else + ncpus=$(getconf _NPROCESSORS_CONF 2>/dev/null) + local rc=$? + if [ $rc -eq 0 ]; then + echo "detected $ncpus online CPUs by getconf" + else + echo "Can't detect number of CPUs" + ncpus=1 + fi + fi + + # if there is only one CPU core, libcfs can only create one partition + # if there is more than 4 CPU cores, libcfs should create multiple CPU + # partitions. So we just force libcfs to create 2 partitions for + # system with 2 or 4 cores + if [ $ncpus -le 4 ] && [ $ncpus -gt 1 ]; then + # force to enable multiple CPU partitions + echo "Force libcfs to create 2 CPU partitions" + MODOPTS_LIBCFS="cpu_npartitions=2 $MODOPTS_LIBCFS" + else + echo "libcfs will create CPU partition based on online CPUs" + fi + + load_module ../libcfs/libcfs/libcfs - echo Loading modules from $LUSTRE - load_module ../libcfs/libcfs/libcfs [ "$PTLDEBUG" ] && lctl set_param debug="$PTLDEBUG" [ "$SUBSYSTEM" ] && lctl set_param subsystem_debug="${SUBSYSTEM# }" load_module ../lnet/lnet/lnet @@ -377,7 +474,6 @@ load_modules_local() { load_module obdclass/obdclass load_module ptlrpc/ptlrpc load_module ptlrpc/gss/ptlrpc_gss - [ "$USE_QUOTA" = "yes" -a "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS load_module fld/fld load_module fid/fid load_module lmv/lmv @@ -385,25 +481,37 @@ load_modules_local() { load_module osc/osc load_module lov/lov load_module mgc/mgc + load_module obdecho/obdecho if ! client_only; then SYMLIST=/proc/kallsyms grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; } grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; } grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; } - if [ "$FSTYPE" = "ldiskfs" ]; then - grep -q exportfs_decode_fh $SYMLIST || - { modprobe exportfs 2> /dev/null || true; } - load_module ../ldiskfs/ldiskfs/ldiskfs - fi - load_module mgs/mgs - load_module mds/mds - load_module mdd/mdd - load_module mdt/mdt - load_module lvfs/fsfilt_$FSTYPE - load_module cmm/cmm - load_module osd-ldiskfs/osd_ldiskfs - load_module ost/ost - load_module obdfilter/obdfilter + [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS + if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then + modprobe zfs + load_module osd-zfs/osd_zfs + fi + load_module mgs/mgs + load_module mds/mds + load_module mdd/mdd + if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then + # + # This block shall be moved up beside osd-zfs as soon + # as osd-ldiskfs stops using mdd symbols. + # + grep -q exportfs_decode_fh $SYMLIST || + { modprobe exportfs 2> /dev/null || true; } + load_module ../ldiskfs/ldiskfs/ldiskfs + load_module lvfs/fsfilt_ldiskfs + load_module osd-ldiskfs/osd_ldiskfs + fi + load_module mdt/mdt + load_module ost/ost + load_module lod/lod + load_module osp/osp + load_module ofd/ofd + load_module osp/osp fi @@ -423,15 +531,17 @@ load_modules_local() { } load_modules () { - load_modules_local - # bug 19124 - # load modules on remote nodes optionally - # lustre-tests have to be installed on these nodes - if $LOAD_MODULES_REMOTE ; then - local list=$(comma_list $(remote_nodes_list)) - echo loading modules on $list - do_rpc_nodes $list load_modules - fi + load_modules_local + # bug 19124 + # load modules on remote nodes optionally + # lustre-tests have to be installed on these nodes + if $LOAD_MODULES_REMOTE; then + local list=$(comma_list $(remote_nodes_list)) + if [ -n "$list" ]; then + echo "loading modules on: '$list'" + do_rpc_nodes "$list" load_modules_local + fi + fi } check_mem_leak () { @@ -448,18 +558,18 @@ check_mem_leak () { } unload_modules() { - wait_exit_ST client # bug 12845 + wait_exit_ST client # bug 12845 - $LUSTRE_RMMOD $FSTYPE || return 2 + $LUSTRE_RMMOD ldiskfs || return 2 - if $LOAD_MODULES_REMOTE ; then - local list=$(comma_list $(remote_nodes_list)) - if [ ! -z $list ]; then - echo unloading modules on $list - do_rpc_nodes $list $LUSTRE_RMMOD $FSTYPE - do_rpc_nodes $list check_mem_leak - fi - fi + if $LOAD_MODULES_REMOTE; then + local list=$(comma_list $(remote_nodes_list)) + if [ -n "$list" ]; then + echo "unloading modules on: '$list'" + do_rpc_nodes "$list" $LUSTRE_RMMOD ldiskfs + do_rpc_nodes "$list" check_mem_leak + fi + fi if grep -qe "/sbin/mount\.lustre" /proc/mounts; then umount /sbin/mount.lustre || true @@ -585,18 +695,125 @@ cleanup_gss() { fi } +facet_type() { + local facet=$1 + + echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9]\+//' | + tr '[:lower:]' '[:upper:]' +} + +facet_number() { + local facet=$1 + + if [ $facet == mgs ]; then + return 1 + fi + + echo -n $facet | sed -e 's/^fs[0-9]\+//' | sed -e 's/^[a-z]\+//' +} + +facet_fstype() { + local facet=$1 + local var + + var=${facet}_FSTYPE + if [ -n "${!var}" ]; then + echo -n ${!var} + return + fi + + var=$(facet_type $facet)FSTYPE + if [ -n "${!var}" ]; then + echo -n ${!var} + return + fi + + if [ -n "$FSTYPE" ]; then + echo -n $FSTYPE + return + fi + + return 1 +} + +node_fstypes() { + local node=$1 + local fstypes + local fstype + local facets=$(get_facets) + local facet + + for facet in ${facets//,/ }; do + if [ $node == $(facet_host $facet) ] || + [ $node == "$(facet_failover_host $facet)" ]; then + fstype=$(facet_fstype $facet) + if [[ $fstypes != *$fstype* ]]; then + fstypes+="${fstypes:+,}$fstype" + fi + fi + done + echo -n $fstypes +} + +devicelabel() { + local facet=$1 + local dev=$2 + local label + local fstype=$(facet_fstype $facet) + + case $fstype in + ldiskfs) + label=$(do_facet ${facet} "$E2LABEL ${dev} 2>/dev/null");; + zfs) + label=$(do_facet ${facet} "$ZFS get -H -o value lustre:svname \ + ${dev} 2>/dev/null");; + *) + error "unknown fstype!";; + esac + + echo -n $label +} + mdsdevlabel() { - local num=$1 - local device=`mdsdevname $num` - local label=`do_facet mds$num "e2label ${device}" | grep -v "CMD: "` - echo -n $label + local num=$1 + local device=$(mdsdevname $num) + local label=$(devicelabel mds$num ${device} | grep -v "CMD: ") + echo -n $label } ostdevlabel() { - local num=$1 - local device=`ostdevname $num` - local label=`do_facet ost$num "e2label ${device}" | grep -v "CMD: "` - echo -n $label + local num=$1 + local device=$(ostdevname $num) + local label=$(devicelabel ost$num ${device} | grep -v "CMD: ") + echo -n $label +} + +# +# This and set_obdfilter_param() shall be used to access OSD parameters +# once existed under "obdfilter": +# +# mntdev +# stats +# read_cache_enable +# writethrough_cache_enable +# +get_obdfilter_param() { + local nodes=$1 + local device=${2:-$FSNAME-OST*} + local name=$3 + + do_nodes $nodes "$LCTL get_param -n obdfilter.$device.$name \ + osd-*.$device.$name 2>&1" | grep -v 'Found no match' +} + +set_obdfilter_param() { + local nodes=$1 + local device=${2:-$FSNAME-OST*} + local name=$3 + local value=$4 + + do_nodes $nodes "$LCTL set_param -n obdfilter.$device.$name=$value \ + osd-*.$device.$name=$value 2>&1" | grep -v 'Found no match' } set_debug_size () { @@ -628,15 +845,15 @@ set_default_debug () { } set_default_debug_nodes () { - local nodes=$1 + local nodes="$1" - if [[ ,$nodes, = *,$HOSTNAME,* ]]; then - nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME") - set_default_debug - fi + if [[ ,$nodes, = *,$HOSTNAME,* ]]; then + nodes=$(exclude_items_from_list "$nodes" "$HOSTNAME") + set_default_debug + fi - [[ -n $nodes ]] && do_rpc_nodes $nodes set_default_debug \ - \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true + do_rpc_nodes "$nodes" set_default_debug \ + \\\"$PTLDEBUG\\\" \\\"$SUBSYSTEM\\\" $DEBUG_SIZE || true } set_default_debug_facet () { @@ -649,31 +866,71 @@ set_default_debug_facet () { # Facet functions mount_facets () { - local facets=${1:-$(get_facets)} - local facet + local facets=${1:-$(get_facets)} + local facet - for facet in ${facets//,/ }; do - mount_facet $facet || error "Restart of $facet failed!" - done + for facet in ${facets//,/ }; do + mount_facet $facet + local RC=$? + [ $RC -eq 0 ] && continue + + if [ "$TESTSUITE.$TESTNAME" = "replay-dual.test_0a" ]; then + skip "Restart of $facet failed!." && touch $LU482_FAILED + else + error "Restart of $facet failed!" + fi + return $RC + done } -mount_facet() { - local facet=$1 - shift - local dev=$(facet_active $facet)_dev - local opt=${facet}_opt - local mntpt=$(facet_mntpt $facet) +# +# Add argument "arg" (e.g., "loop") to the comma-separated list +# of arguments for option "opt" (e.g., "-o") on command +# line "opts" (e.g., "-o flock"). +# +csa_add() { + local opts=$1 + local opt=$2 + local arg=$3 + local opt_pattern="\([[:space:]]\+\|^\)$opt" - echo "Starting ${facet}: ${!opt} $@ ${!dev} $mntpt" - do_facet ${facet} "mkdir -p $mntpt; mount -t lustre ${!opt} $@ ${!dev} $mntpt" - RC=${PIPESTATUS[0]} - if [ $RC -ne 0 ]; then - echo "mount -t lustre $@ ${!dev} $mntpt" - echo "Start of ${!dev} on ${facet} failed ${RC}" + if echo "$opts" | grep -q $opt_pattern; then + opts=$(echo "$opts" | sed -e \ + "s/$opt_pattern[[:space:]]*[^[:space:]]\+/&,$arg/") + else + opts+="${opts:+ }$opt $arg" + fi + echo -n "$opts" +} + +mount_facet() { + local facet=$1 + shift + local dev=$(facet_active $facet)_dev + local opt=${facet}_opt + local mntpt=$(facet_mntpt $facet) + local opts="${!opt} $@" + + if [ $(facet_fstype $facet) == ldiskfs ] && + ! do_facet $facet test -b ${!dev}; then + opts=$(csa_add "$opts" -o loop) + fi + + echo "Starting ${facet}: $opts ${!dev} $mntpt" + # for testing LU-482 error handling in mount_facets() and test_0a() + if [ -f $TMP/test-lu482-trigger ]; then + RC=2 + else + do_facet ${facet} "mkdir -p $mntpt; mount -t lustre $opts \ + ${!dev} $mntpt" + RC=${PIPESTATUS[0]} + fi + if [ $RC -ne 0 ]; then + echo "Start of ${!dev} on ${facet} failed ${RC}" else set_default_debug_facet $facet - label=$(do_facet ${facet} "$E2LABEL ${!dev}") + label=$(devicelabel ${facet} ${!dev}) [ -z "$label" ] && echo no label for ${!dev} && exit 1 eval export ${facet}_svc=${label} echo Started ${label} @@ -705,6 +962,39 @@ start() { return $RC } +# +# When a ZFS OSD is made read-only by replay_barrier(), its pool is "freezed". +# Because stopping corresponding target may not clear this in-memory state, we +# need to zap the pool from memory by exporting and reimporting the pool. +# +# Although the uberblocks are not updated when a pool is freezed, transactions +# are still written to the disks. Modified blocks may be cached in memory when +# tests try reading them back. The export-and-reimport process also evicts any +# cached pool data from memory to provide the correct "data loss" semantics. +# +refresh_disk() { + local facet=$1 + local fstype=$(facet_fstype $facet) + local _dev + local dev + local poolname + + if [ "${fstype}" == "zfs" ]; then + _dev=$(facet_active $facet)_dev + dev=${!_dev} # expand _dev to its value, e.g. ${mds1_dev} + poolname="${dev%%/*}" # poolname is string before "/" + + if [ "${poolname}" == "" ]; then + echo "invalid dataset name: $dev" + return + fi + do_facet $facet "cp /etc/zfs/zpool.cache /tmp/zpool.cache.back" + do_facet $facet "$ZPOOL export ${poolname}" + do_facet $facet "$ZPOOL import -f -c /tmp/zpool.cache.back \ + ${poolname}" + fi +} + stop() { local running local facet=$1 @@ -726,6 +1016,10 @@ stop() { # save quota version (both administrative and operational quotas) # add an additional parameter if mountpoint is ever different from $MOUNT +# +# XXX This function is kept for interoperability with old server (< 2.3.50), +# it should be removed whenever we drop the interoperability for such +# server. quota_save_version() { local fsname=${2:-$FSNAME} local spec=$1 @@ -745,7 +1039,11 @@ quota_save_version() { done } -# client could mount several lustre +# client could mount several lustre +# +# XXX This function is kept for interoperability with old server (< 2.3.50), +# it should be removed whenever we drop the interoperability for such +# server. quota_type () { local fsname=${1:-$FSNAME} local rc=0 @@ -755,17 +1053,27 @@ quota_type () { return $rc } -restore_quota_type () { - local mntpt=${1:-$MOUNT} - local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2) - if [ ! "$old_QUOTA_TYPE" ] || [ "$quota_type" = "$old_QUOTA_TYPE" ]; then - return - fi - quota_save_version $old_QUOTA_TYPE +# XXX This function is kept for interoperability with old server (< 2.3.50), +# it should be removed whenever we drop the interoperability for such +# server. +restore_quota_old() { + local mntpt=${1:-$MOUNT} + local quota_type=$(quota_type $FSNAME | grep MDT | cut -d "=" -f2) + if [ ! "$old_QUOTA_TYPE" ] || + [ "$quota_type" = "$old_QUOTA_TYPE" ]; then + return + fi + quota_save_version $old_QUOTA_TYPE } -setup_quota(){ - local mntpt=$1 +# XXX This function is kept for interoperability with old server (< 2.3.50), +# it should be removed whenever we drop the interoperability for such +# server. +setup_quota_old(){ + local mntpt=$1 + + # no quota enforcement for now and accounting works out of the box + return # We need save the original quota_type params, and restore them after testing @@ -809,6 +1117,93 @@ setup_quota(){ done } +# get mdt quota type +mdt_quota_type() { + local varsvc=${SINGLEMDS}_svc + do_facet $SINGLEMDS $LCTL get_param -n \ + osd-$FSTYPE.${!varsvc}.quota_slave.enabled +} + +# get ost quota type +ost_quota_type() { + # All OSTs should have same quota type + local varsvc=ost1_svc + do_facet ost1 $LCTL get_param -n \ + osd-$FSTYPE.${!varsvc}.quota_slave.enabled +} + +# restore old quota type settings +restore_quota() { + if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then + restore_quota_old + return + fi + + if [ "$old_MDT_QUOTA_TYPE" ]; then + do_facet mgs $LCTL conf_param \ + $FSNAME.quota.mdt=$old_MDT_QUOTA_TYPE + fi + if [ "$old_OST_QUOTA_TYPE" ]; then + do_facet mgs $LCTL conf_param \ + $FSNAME.quota.ost=$old_OST_QUOTA_TYPE + fi +} + +setup_quota(){ + if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then + setup_quota_old $1 + return + fi + + local mntpt=$1 + + # save old quota type & set new quota type + local mdt_qtype=$(mdt_quota_type) + local ost_qtype=$(ost_quota_type) + + echo "[HOST:$HOSTNAME] [old_mdt_qtype:$mdt_qtype]" \ + "[old_ost_qtype:$ost_qtype] [new_qtype:$QUOTA_TYPE]" + + export old_MDT_QUOTA_TYPE=$mdt_qtype + export old_OST_QUOTA_TYPE=$ost_qtype + + do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE || + error "set mdt quota type failed" + do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE || + error "set ost quota type failed" + + local quota_usrs=$QUOTA_USERS + + # get_filesystem_size + local disksz=$(lfs df $mntpt | grep "filesystem summary:" | + awk '{print $3}') + local blk_soft=$((disksz + 1024)) + local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over + + local inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | + awk '{print $3}') + local i_soft=$inodes + local i_hard=$((i_soft + i_soft / 20)) + + echo "Total disk size: $disksz block-softlimit: $blk_soft" \ + "block-hardlimit: $blk_hard inode-softlimit: $i_soft" \ + "inode-hardlimit: $i_hard" + + local cmd + for usr in $quota_usrs; do + echo "Setting up quota on $HOSTNAME:$mntpt for $usr..." + for type in u g; do + cmd="$LFS setquota -$type $usr -b $blk_soft" + cmd="$cmd -B $blk_hard -i $i_soft -I $i_hard $mntpt" + echo "+ $cmd" + eval $cmd || error "$cmd FAILED!" + done + # display the quota status + echo "Quota settings for $usr : " + $LFS quota -v -u $usr $mntpt || true + done +} + zconf_mount() { local client=$1 local mnt=$2 @@ -869,7 +1264,7 @@ sanity_mount_check_nodes () { local rc=0 for mnt in $mnts ; do do_nodes $nodes "running=\\\$(grep -c $mnt' ' /proc/mounts); -mpts=\\\$(mount | grep -w -c $mnt); +mpts=\\\$(mount | grep -c $mnt' '); if [ \\\$running -ne \\\$mpts ]; then echo \\\$(hostname) env are INSANE!; exit 1; @@ -939,7 +1334,7 @@ fi; exit \\\$rc" || return ${PIPESTATUS[0]} echo "Started clients $clients: " - do_nodes $clients "mount | grep -w $mnt" + do_nodes $clients "mount | grep $mnt' '" set_default_debug_nodes $clients @@ -1018,12 +1413,12 @@ facets_on_host () { echo $(comma_list $affected) } -facet_up () { - local facet=$1 - local host=${2:-$(facet_host $facet)} +facet_up() { + local facet=$1 + local host=${2:-$(facet_host $facet)} - local label=$(convert_facet2label $facet) - do_node $host lctl dl | awk '{print $4}' | grep -q $label + local label=$(convert_facet2label $facet) + do_node $host $LCTL dl | awk '{print $4}' | grep -q -x $label } facets_up_on_host () { @@ -1064,12 +1459,13 @@ remount_facet() { } reboot_facet() { - local facet=$1 - if [ "$FAILURE_MODE" = HARD ]; then - reboot_node $(facet_active_host $facet) - else - sleep 10 - fi + local facet=$1 + if [ "$FAILURE_MODE" = HARD ]; then + reboot_node $(facet_active_host $facet) + else + refresh_disk ${facet} + sleep 10 + fi } boot_node() { @@ -1105,10 +1501,10 @@ _check_progs_installed () { } check_progs_installed () { - local nodes=$1 - shift + local nodes=$1 + shift - do_rpc_nodes $nodes _check_progs_installed $@ + do_rpc_nodes "$nodes" _check_progs_installed $@ } # recovery-scale functions @@ -1325,15 +1721,16 @@ cleanup_check() { check_mem_leak || exit 204 - [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && \ - echo "$0: lustre didn't clean up..." 1>&2 && return 202 || true + [ "`lctl dl 2> /dev/null | wc -l`" -gt 0 ] && lctl dl && + echo "$TESTSUITE: lustre didn't clean up..." 1>&2 && + return 202 || true - if module_loaded lnet || module_loaded libcfs; then - echo "$0: modules still loaded..." 1>&2 - /sbin/lsmod 1>&2 - return 203 - fi - return 0 + if module_loaded lnet || module_loaded libcfs; then + echo "$TESTSUITE: modules still loaded..." 1>&2 + /sbin/lsmod 1>&2 + return 203 + fi + return 0 } wait_update () { @@ -1363,28 +1760,63 @@ wait_update () { return 3 } -wait_update_facet () { - local facet=$1 - wait_update $(facet_active_host $facet) "$@" -} - -wait_delete_completed () { - local TOTALPREV=`lctl get_param -n osc.*.kbytesavail | \ - awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` - - local WAIT=0 - local MAX_WAIT=20 - while [ "$WAIT" -ne "$MAX_WAIT" ]; do - sleep 1 - TOTAL=`lctl get_param -n osc.*.kbytesavail | \ - awk 'BEGIN{total=0}; {total+=$1}; END{print total}'` - [ "$TOTAL" -eq "$TOTALPREV" ] && return 0 - echo "Waiting delete completed ... prev: $TOTALPREV current: $TOTAL " - TOTALPREV=$TOTAL - WAIT=$(( WAIT + 1)) - done - echo "Delete is not completed in $MAX_WAIT sec" - return 1 +wait_update_facet() { + local facet=$1 + shift + wait_update $(facet_active_host $facet) "$@" +} + +sync_all_data() { + do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 | + grep -v 'Found no match' +} + +wait_delete_completed_mds() { + local MAX_WAIT=${1:-20} + local mds2sync="" + local stime=`date +%s` + local etime + local node + local changes + + # find MDS with pending deletions + for node in $(mdts_nodes); do + changes=$(do_node $node "lctl get_param -n osc.*MDT*.sync_*" \ + 2>/dev/null | calc_sum) + if [ -z "$changes" ] || [ $changes -eq 0 ]; then + continue + fi + mds2sync="$mds2sync $node" + done + if [ "$mds2sync" == "" ]; then + return + fi + mds2sync=$(comma_list $mds2sync) + + # sync MDS transactions + do_nodes $mds2sync "lctl set_param -n osd*.*MD*.force_sync 1" + + # wait till all changes are sent and commmitted by OSTs + # for ldiskfs space is released upon execution, but DMU + # do this upon commit + + local WAIT=0 + while [ "$WAIT" -ne "$MAX_WAIT" ]; do + changes=$(do_nodes $mds2sync "lctl get_param -n osc.*MDT*.sync_*" \ + | calc_sum) + #echo "$node: $changes changes on all" + if [ "$changes" -eq "0" ]; then + etime=`date +%s` + #echo "delete took $((etime - stime)) seconds" + return + fi + sleep 1 + WAIT=$(( WAIT + 1)) + done + + etime=`date +%s` + echo "Delete is not completed in $((etime - stime)) seconds" + do_nodes $mds2sync "lctl get_param osc.*MDT*.sync_*" } wait_for_host() { @@ -1440,14 +1872,14 @@ wait_recovery_complete () { fi echo affected facets: $facets - # we can use "for" here because we are waiting the slowest - for facet in ${facets//,/ }; do - local var_svc=${facet}_svc - local param="*.${!var_svc}.recovery_status" + # we can use "for" here because we are waiting the slowest + for facet in ${facets//,/ }; do + local var_svc=${facet}_svc + local param="*.${!var_svc}.recovery_status" - local host=$(facet_active_host $facet) - do_rpc_nodes $host _wait_recovery_complete $param $MAX - done + local host=$(facet_active_host $facet) + do_rpc_nodes "$host" _wait_recovery_complete $param $MAX + done } wait_mds_ost_sync () { @@ -1478,28 +1910,35 @@ wait_mds_ost_sync () { } wait_destroy_complete () { - echo "Waiting for destroy to be done..." - # MAX value shouldn't be big as this mean server responsiveness - # never increase this just to make test pass but investigate - # why it takes so long time - local MAX=5 - local WAIT=0 - while [ $WAIT -lt $MAX ]; do - local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight)) - local con=1 - for ((i=0; i<${#RPCs[@]}; i++)); do - [ ${RPCs[$i]} -eq 0 ] && continue - # there are still some destroy RPCs in flight - con=0 - break; - done - sleep 1 - [ ${con} -eq 1 ] && return 0 # done waiting - echo "Waiting $WAIT secs for destroys to be done." - WAIT=$((WAIT + 1)) - done - echo "Destroys weren't done in $MAX sec." - return 1 + echo "Waiting for local destroys to complete" + # MAX value shouldn't be big as this mean server responsiveness + # never increase this just to make test pass but investigate + # why it takes so long time + local MAX=5 + local WAIT=0 + while [ $WAIT -lt $MAX ]; do + local -a RPCs=($($LCTL get_param -n osc.*.destroys_in_flight)) + local con=1 + local i + + for ((i=0; i<${#RPCs[@]}; i++)); do + [ ${RPCs[$i]} -eq 0 ] && continue + # there are still some destroy RPCs in flight + con=0 + break; + done + sleep 1 + [ ${con} -eq 1 ] && return 0 # done waiting + echo "Waiting ${WAIT}s for local destroys to complete" + WAIT=$((WAIT + 1)) + done + echo "Local destroys weren't done in $MAX sec." + return 1 +} + +wait_delete_completed() { + wait_delete_completed_mds $1 || return $? + wait_destroy_complete } wait_exit_ST () { @@ -1612,14 +2051,6 @@ facet_failover() { echo "Failing $facet on node $host" - # Make sure the client data is synced to disk. LU-924 - # - # We don't write client data synchrnously (to avoid flooding sync writes - # when there are many clients connecting), so if the server reboots before - # the client data reachs disk, the client data will be lost and the client - # will be evicted after recovery, which is not what we expected. - do_facet $facet "sync; sync; sync" - local affected=$(affected_facets $facet) shutdown_facet $facet @@ -1653,7 +2084,9 @@ replay_barrier() { # make sure there will be no seq change local clients=${CLIENTS:-$HOSTNAME} - do_nodes $clients "f=${MOUNT}/fsa-\\\$(hostname); mcreate \\\$f; rm \\\$f" + local f=fsa-\\\$\(hostname\) + do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f" + do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi" local svc=${facet}_svc do_facet $facet $LCTL --device %${!svc} notransno @@ -1704,13 +2137,14 @@ fail_nodf() { } fail_abort() { - local facet=$1 - stop $facet - change_active $facet - wait_for_facet $facet - mount_facet $facet -o abort_recovery - clients_up || echo "first df failed: $?" - clients_up || error "post-failover df: $?" + local facet=$1 + stop $facet + refresh_disk ${facet} + change_active $facet + wait_for_facet $facet + mount_facet $facet -o abort_recovery + clients_up || echo "first df failed: $?" + clients_up || error "post-failover df: $?" } do_lmc() { @@ -1872,6 +2306,25 @@ facet_host() { echo -n ${!varname} } +facet_failover_host() { + local facet=$1 + local var + + var=${facet}failover_HOST + if [ -n "${!var}" ]; then + echo ${!var} + return + fi + + if [[ $facet == ost* ]]; then + var=ostfailover_HOST + if [ -n "${!var}" ]; then + echo ${!var} + return + fi + fi +} + facet_active() { local facet=$1 local activevar=${facet}active @@ -1981,13 +2434,34 @@ single_local_node () { # Outputs environment variable assignments that should be passed to remote nodes get_env_vars() { - local var - local value + local var + local value + local facets=$(get_facets) + local facet - for var in ${!MODOPTS_*}; do - value=${!var} - echo "${var}=\"$value\"" - done + for var in ${!MODOPTS_*}; do + value=${!var} + echo -n " ${var}=\"$value\"" + done + + echo -n " USE_OFD=$USE_OFD" + + for facet in ${facets//,/ }; do + var=${facet}_FSTYPE + if [ -n "${!var}" ]; then + echo -n " $var=${!var}" + fi + done + + for var in MGSFSTYPE MDSFSTYPE OSTFSTYPE; do + if [ -n "${!var}" ]; then + echo -n " $var=${!var}" + fi + done + + if [ -n "$FSTYPE" ]; then + echo -n " FSTYPE=$FSTYPE" + fi } do_nodes() { @@ -2040,6 +2514,25 @@ do_facet() { do_node $HOST "$@" } +# Function: do_facet_random_file $FACET $FILE $SIZE +# Creates FILE with random content on the given FACET of given SIZE + +do_facet_random_file() { + local facet="$1" + local fpath="$2" + local fsize="$3" + local cmd="dd if=/dev/urandom of='$fpath' bs=$fsize count=1" + do_facet $facet "$cmd 2>/dev/null" +} + +do_facet_create_file() { + local facet="$1" + local fpath="$2" + local fsize="$3" + local cmd="dd if=/dev/zero of='$fpath' bs=$fsize count=1" + do_facet $facet "$cmd 2>/dev/null" +} + do_nodesv() { do_nodes --verbose "$@" } @@ -2057,19 +2550,121 @@ add() { ostdevname() { num=$1 DEVNAME=OSTDEV$num - #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num - eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}} + + local fstype=$(facet_fstype ost$num) + + case $fstype in + ldiskfs ) + #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num + eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};; + zfs ) + #dataset name is independent of vdev device names + eval DEVPTR=${FSNAME}-ost${num}/ost${num};; + * ) + error "unknown fstype!";; + esac + echo -n $DEVPTR } +ostvdevname() { + num=$1 + DEVNAME=OSTDEV$num + + local fstype=$(facet_fstype ost$num) + + case $fstype in + ldiskfs ) + # vdevs are not supported by ldiskfs + eval VDEVPTR="";; + zfs ) + #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num + eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};; + * ) + error "unknown fstype!";; + esac + + echo -n $VDEVPTR +} + mdsdevname() { num=$1 DEVNAME=MDSDEV$num - #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num - eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}} + + local fstype=$(facet_fstype mds$num) + + case $fstype in + ldiskfs ) + #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num + eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};; + zfs ) + #dataset name is independent of vdev device names + eval DEVPTR=${FSNAME}-mdt${num}/mdt${num};; + * ) + error "unknown fstype!";; + esac + + echo -n $DEVPTR +} + +mdsvdevname() { + num=$1 + DEVNAME=MDSDEV$num + + local fstype=$(facet_fstype mds$num) + + case $fstype in + ldiskfs ) + # vdevs are not supported by ldiskfs + eval VDEVPTR="";; + zfs ) + #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num + eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};; + * ) + error "unknown fstype!";; + esac + + echo -n $VDEVPTR +} + +mgsdevname() { + DEVNAME=MGSDEV + + local fstype=$(facet_fstype mds$num) + + case $fstype in + ldiskfs ) + #if $MGSDEV isn't defined, default is $MDSDEV1 + eval DEVPTR=${!DEVNAME:=${MDSDEV1}};; + zfs ) + #dataset name is independent of vdev device names + eval DEVPTR=${FSNAME}-mgs/mgs;; + * ) + error "unknown fstype!";; + esac + echo -n $DEVPTR } +mgsvdevname() { + DEVNAME=MGSDEV + + local fstype=$(facet_fstype mds$num) + + case $fstype in + ldiskfs ) + # vdevs are not supported by ldiskfs + eval VDEVPTR="";; + zfs ) + #if $MGSDEV isn't defined, default is $MGSDEV1 + eval VDEVPTR=${!DEVNAME:=${MDSDEV1}};; + * ) + error "unknown fstype!";; + esac + + echo -n $VDEVPTR +} + facet_mntpt () { local facet=$1 [[ $facet = mgs ]] && combined_mgs_mds && facet="mds1" @@ -2142,88 +2737,121 @@ combined_mgs_mds () { [[ $MDSDEV1 = $MGSDEV ]] && [[ $mds1_HOST = $mgs_HOST ]] } -mkfs_opts () { - local facet=$1 +lower() { + echo -n "$1" | tr '[:upper:]' '[:lower:]' +} - local tgt=$(echo $facet | tr -d [:digit:] | tr "[:lower:]" "[:upper:]") - local optvar=${tgt}_MKFS_OPTS - local opt=${!optvar} +upper() { + echo -n "$1" | tr '[:lower:]' '[:upper:]' +} - # FIXME: ! combo mgs/mds + mgsfailover is not supported yet - [[ $facet = mgs ]] && echo $opt && return +mkfs_opts() { + local facet=$1 + local type=$(facet_type $facet) + local index=$(($(facet_number $facet) - 1)) + local fstype=$(facet_fstype $facet) + local opts + local fs_mkfs_opts + local var - # 1. - # --failnode options - local var=${facet}failover_HOST - if [ x"${!var}" != x ] && [ x"${!var}" != x$(facet_host $facet) ] ; then - local failnode=$(h2$NETTYPE ${!var}) - failnode="--failnode=$failnode" - # options does not contain - # or contains wrong --failnode= - if [[ $opt != *${failnode}* ]]; then - opt=$(echo $opt | sed 's/--failnode=.* / /') - opt="$opt $failnode" - fi - fi + if [ $type == MGS ] && combined_mgs_mds; then + return 1 + fi - # 2. - # --mgsnode options - # no additional mkfs mds "--mgsnode" option for this configuration - if [[ $facet = mds ]] && combined_mgs_mds; then - echo $opt - return - fi + if [ $type == MGS ] || ( [ $type == MDS ] && combined_mgs_mds ); then + opts="--mgs" + else + opts="--mgsnode=$MGSNID" + fi - # additional mkfs "--mgsnode" - local mgsnode="--mgsnode=$MGSNID" - opt=${opt//$mgsnode } - for nid in ${MGSNID//:/ }; do - local mgsnode="--mgsnode=$nid" - # options does not contain - # --mgsnode=$nid - if [[ $opt != *${mgsnode}" "* ]]; then - opt="$opt --mgsnode=$nid" - fi - done + if [ $type != MGS ]; then + opts+=" --fsname=$FSNAME --$(lower ${type/MDS/MDT}) --index=$index" + fi - echo $opt -} + var=${facet}failover_HOST + if [ -n "${!var}" ] && [ ${!var} != $(facet_host $facet) ]; then + opts+=" --failnode=$(h2$NETTYPE ${!var})" + fi -formatall() { - if [ "$IAMDIR" == "yes" ]; then - MDS_MKFS_OPTS="$MDS_MKFS_OPTS --iam-dir" - fi + opts+=${TIMEOUT:+" --param=sys.timeout=$TIMEOUT"} + opts+=${LDLM_TIMEOUT:+" --param=sys.ldlm_timeout=$LDLM_TIMEOUT"} - [ "$FSTYPE" ] && FSTYPE_OPT="--backfstype $FSTYPE" + if [ $type == MDS ]; then + opts+=${SECLEVEL:+" --param=mdt.sec_level"} + opts+=${MDSCAPA:+" --param-mdt.capa=$MDSCAPA"} + opts+=${STRIPE_BYTES:+" --param=lov.stripesize=$STRIPE_BYTES"} + opts+=${STRIPES_PER_OBJ:+" --param=lov.stripecount=$STRIPES_PER_OBJ"} + opts+=${L_GETIDENTITY:+" --param=mdt.identity_upcall=$L_GETIDENTITY"} - stopall - # We need ldiskfs here, may as well load them all - load_modules - [ "$CLIENTONLY" ] && return - echo Formatting mgs, mds, osts - if ! combined_mgs_mds ; then - add mgs $(mkfs_opts mgs) $FSTYPE_OPT --reformat $MGSDEV || exit 10 - fi + if [ $fstype == ldiskfs ]; then + fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"} + fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"} + fi + fi - for num in `seq $MDSCOUNT`; do - echo "Format mds$num: $(mdsdevname $num)" - if $VERBOSE; then - add mds$num $(mkfs_opts mds) $FSTYPE_OPT --reformat $(mdsdevname $num) || exit 10 - else - add mds$num $(mkfs_opts mds) $FSTYPE_OPT --reformat $(mdsdevname $num) > /dev/null || exit 10 - fi - done + if [ $type == OST ]; then + opts+=${SECLEVEL:+" --param=ost.sec_level"} + opts+=${OSSCAPA:+" --param=ost.capa=$OSSCAPA"} - # the ost-s could have different OST_MKFS_OPTS - # because of different failnode-s - for num in `seq $OSTCOUNT`; do - echo "Format ost$num: $(ostdevname $num)" - if $VERBOSE; then - add ost$num $(mkfs_opts ost${num}) $FSTYPE_OPT --reformat `ostdevname $num` || exit 10 - else - add ost$num $(mkfs_opts ost${num}) $FSTYPE_OPT --reformat `ostdevname $num` > /dev/null || exit 10 - fi - done + if [ $fstype == ldiskfs ]; then + fs_mkfs_opts+=${OSTJOURNALSIZE:+" -J size=$OSTJOURNALSIZE"} + fi + fi + + opts+=" --backfstype=$fstype" + + var=${type}SIZE + if [ -n "${!var}" ]; then + opts+=" --device-size=${!var}" + fi + + var=$(upper $fstype)_MKFS_OPTS + fs_mkfs_opts+=${!var:+" ${!var}"} + + var=${type}_FS_MKFS_OPTS + fs_mkfs_opts+=${!var:+" ${!var}"} + + if [ -n "${fs_mkfs_opts## }" ]; then + opts+=" --mkfsoptions=\\\"${fs_mkfs_opts## }\\\"" + fi + + var=${type}OPT + opts+=${!var:+" ${!var}"} + + echo -n "$opts" +} + +formatall() { + local quiet + + if ! $VERBOSE; then + quiet=yes + fi + + stopall + # We need ldiskfs here, may as well load them all + load_modules + [ "$CLIENTONLY" ] && return + echo Formatting mgs, mds, osts + if ! combined_mgs_mds ; then + echo "Format mgs: $(mgsdevname)" + add mgs $(mkfs_opts mgs) --reformat $(mgsdevname) \ + $(mgsvdevname) ${quiet:+>/dev/null} || exit 10 + fi + + for num in `seq $MDSCOUNT`; do + echo "Format mds$num: $(mdsdevname $num)" + add mds$num $(mkfs_opts mds$num) --reformat \ + $(mdsdevname $num) $(mdsvdevname $num) \ + ${quiet:+>/dev/null} || exit 10 + done + + for num in `seq $OSTCOUNT`; do + echo "Format ost$num: $(ostdevname $num)" + add ost$num $(mkfs_opts ost$num) --reformat \ + $(ostdevname $num) $(ostvdevname ${num}) \ + ${quiet:+>/dev/null} || exit 10 + done } mount_client() { @@ -2303,7 +2931,7 @@ setupall() { echo $WRITECONF | grep -q "writeconf" && \ writeconf_all if ! combined_mgs_mds ; then - start mgs $MGSDEV $MGS_MOUNT_OPTS + start mgs $(mgsdevname) $MGS_MOUNT_OPTS fi for num in `seq $MDSCOUNT`; do @@ -2373,45 +3001,70 @@ mounted_lustre_filesystems() { } init_facet_vars () { - [ "$CLIENTONLY" ] && return 0 - local facet=$1 - shift - local device=$1 - - shift - - eval export ${facet}_dev=${device} - eval export ${facet}_opt=\"$@\" - - local dev=${facet}_dev - local label=$(do_facet ${facet} "$E2LABEL ${!dev}") - [ -z "$label" ] && echo no label for ${!dev} && exit 1 - - eval export ${facet}_svc=${label} - - local varname=${facet}failover_HOST - if [ -z "${!varname}" ]; then - eval $varname=$(facet_host $facet) - fi - - # ${facet}failover_dev is set in cfg file - varname=${facet}failover_dev - if [ -n "${!varname}" ] ; then - eval export ${facet}failover_dev=${!varname} - else - eval export ${facet}failover_dev=$device - fi - - # get mount point of already mounted device - # is facet_dev is already mounted then use the real - # mount point of this facet; otherwise use $(facet_mntpt $facet) - # i.e. ${facet}_MOUNT if specified by user or default - local mntpt=$(do_facet ${facet} cat /proc/mounts | \ - awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }') - if [ -z $mntpt ]; then - mntpt=$(facet_mntpt $facet) - fi - eval export ${facet}_MOUNT=$mntpt + [ "$CLIENTONLY" ] && return 0 + local facet=$1 + shift + local device=$1 + + shift + + eval export ${facet}_dev=${device} + eval export ${facet}_opt=\"$@\" + + local dev=${facet}_dev + + # We need to loop for the label + # in case its not initialized yet. + for wait_time in {0,1,3,5,10}; do + + if [ $wait_time -gt 0 ]; then + echo "${!dev} not yet initialized,"\ + "waiting ${wait_time} seconds." + sleep $wait_time + fi + + local label=$(devicelabel ${facet} ${!dev}) + + # Check to make sure the label does + # not include ffff at the end of the label. + # This indicates it has not been initialized yet. + + if [[ $label =~ [f|F]{4}$ ]]; then + # label is not initialized, unset the result + # and either try again or fail + unset label + else + break + fi + done + + [ -z "$label" ] && echo no label for ${!dev} && exit 1 + + eval export ${facet}_svc=${label} + + local varname=${facet}failover_HOST + if [ -z "${!varname}" ]; then + eval $varname=$(facet_host $facet) + fi + + # ${facet}failover_dev is set in cfg file + varname=${facet}failover_dev + if [ -n "${!varname}" ] ; then + eval export ${facet}failover_dev=${!varname} + else + eval export ${facet}failover_dev=$device + fi + + # get mount point of already mounted device + # is facet_dev is already mounted then use the real + # mount point of this facet; otherwise use $(facet_mntpt $facet) + # i.e. ${facet}_MOUNT if specified by user or default + local mntpt=$(do_facet ${facet} cat /proc/mounts | \ + awk '"'${!dev}'" == $1 && $3 == "lustre" { print $2 }') + if [ -z $mntpt ]; then + mntpt=$(facet_mntpt $facet) + fi + eval export ${facet}_MOUNT=$mntpt } init_facets_vars () { @@ -2424,7 +3077,7 @@ init_facets_vars () { done fi - combined_mgs_mds || init_facet_vars mgs $MGSDEV $MGS_MOUNT_OPTS + combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS remote_ost_nodsh && return @@ -2453,26 +3106,66 @@ osc_ensure_active () { [ $period -lt $timeout ] || log "$count OST are inactive after $timeout seconds, give up" } -init_param_vars () { - remote_mds_nodsh || - TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") - - log "Using TIMEOUT=$TIMEOUT" - - osc_ensure_active $SINGLEMDS $TIMEOUT - osc_ensure_active client $TIMEOUT +set_conf_param_and_check() { + local myfacet=$1 + local TEST=$2 + local PARAM=$3 + local ORIG=$(do_facet $myfacet "$TEST") + if [ $# -gt 3 ]; then + local FINAL=$4 + else + local -i FINAL + FINAL=$((ORIG + 5)) + fi + echo "Setting $PARAM from $ORIG to $FINAL" + do_facet mgs "$LCTL conf_param $PARAM='$FINAL'" || + error "conf_param $PARAM failed" - if [ $QUOTA_AUTO -ne 0 ]; then - if [ "$ENABLE_QUOTA" ]; then - echo "enable quota as required" - setup_quota $MOUNT || return 2 - else - echo "disable quota as required" - $LFS quotaoff -ug $MOUNT > /dev/null 2>&1 - fi - fi + wait_update $(facet_host $myfacet) "$TEST" "$FINAL" || + error "check $PARAM failed!" +} - return 0 +init_param_vars () { + remote_mds_nodsh || + TIMEOUT=$(do_facet $SINGLEMDS "lctl get_param -n timeout") + + log "Using TIMEOUT=$TIMEOUT" + + osc_ensure_active $SINGLEMDS $TIMEOUT + osc_ensure_active client $TIMEOUT + + local jobid_var + if [ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ]; then + jobid_var="none" + elif [ $JOBSTATS_AUTO -ne 0 ]; then + echo "enable jobstats, set job scheduler as $JOBID_VAR" + jobid_var=$JOBID_VAR + else + jobid_var=`$LCTL get_param -n jobid_var` + if [ $jobid_var != "disable" ]; then + echo "disable jobstats as required" + jobid_var="disable" + else + jobid_var="none" + fi + fi + + if [ $jobid_var == $JOBID_VAR -o $jobid_var == "disable" ]; then + do_facet mgs $LCTL conf_param $FSNAME.sys.jobid_var=$jobid_var + wait_update $HOSTNAME "$LCTL get_param -n jobid_var" \ + $jobid_var || return 1 + fi + + if [ $QUOTA_AUTO -ne 0 ]; then + if [ "$ENABLE_QUOTA" ]; then + echo "enable quota as required" + setup_quota $MOUNT || return 2 + else + echo "disable quota as required" + # $LFS quotaoff -ug $MOUNT > /dev/null 2>&1 + fi + fi + return 0 } nfs_client_mode () { @@ -2536,15 +3229,14 @@ check_config_client () { } check_config_clients () { - local clients=${CLIENTS:-$HOSTNAME} - local mntpt=$1 + local clients=${CLIENTS:-$HOSTNAME} + local mntpt=$1 - nfs_client_mode && return + nfs_client_mode && return - do_rpc_nodes $clients check_config_client $mntpt + do_rpc_nodes "$clients" check_config_client $mntpt - sanity_mount_check || - error "environments are insane!" + sanity_mount_check || error "environments are insane!" } check_timeout () { @@ -2636,12 +3328,14 @@ check_and_setup_lustre() { set_default_debug_nodes $(comma_list $(nodes_list)) fi - init_gss - set_flavor_all $SEC + init_gss + if $GSS; then + set_flavor_all $SEC + fi - if [ "$ONLY" == "setup" ]; then - exit 0 - fi + if [ "$ONLY" == "setup" ]; then + exit 0 + fi } restore_mount () { @@ -2671,26 +3365,24 @@ cleanup_and_setup_lustre() { # Get all of the server target devices from a given server node and type. get_mnt_devs() { - local node=$1 - local type=$2 - local obd_type - local devs - local dev - - case $type in - mdt) obd_type="osd" ;; - ost) obd_type="obdfilter" ;; # needs to be fixed when OST also uses an OSD - *) echo "invalid server type" && return 1 ;; - esac - - devs=$(do_node $node "lctl get_param -n $obd_type*.*.mntdev") - for dev in $devs; do - case $dev in - *loop*) do_node $node "losetup $dev" | \ - sed -e "s/.*(//" -e "s/).*//" ;; - *) echo $dev ;; - esac - done + local node=$1 + local type=$2 + local devs + local dev + + if [ "$type" == ost ]; then + devs=$(get_obdfilter_param $node "" mntdev) + else + devs=$(do_node $node \ + "lctl get_param -n osd-*.$FSNAME-M*.mntdev") + fi + for dev in $devs; do + case $dev in + *loop*) do_node $node "losetup $dev" | \ + sed -e "s/.*(//" -e "s/).*//" ;; + *) echo $dev ;; + esac + done } # Get all of the server target devices. @@ -2726,12 +3418,12 @@ run_e2fsck() { # verify a directory is shared among nodes. check_shared_dir() { - local dir=$1 + local dir=$1 - [ -z "$dir" ] && return 1 - do_rpc_nodes $(comma_list $(nodes_list)) check_logdir $dir - check_write_access $dir || return 1 - return 0 + [ -z "$dir" ] && return 1 + do_rpc_nodes "$(comma_list $(nodes_list))" check_logdir $dir + check_write_access $dir || return 1 + return 0 } # Run e2fsck on MDT and OST(s) to generate databases used for lfsck. @@ -2740,6 +3432,9 @@ generate_db() { local ostidx local dev + [[ $(lustre_version_code $SINGLEMDS) -ne $(version_code 2.2.0) ]] || + { skip "Lustre 2.2.0 lacks the patch for LU-1255"; exit 0; } + check_shared_dir $SHARED_DIRECTORY || error "$SHARED_DIRECTORY isn't a shared directory" @@ -2783,11 +3478,11 @@ check_and_cleanup_lustre() { run_lfsck fi - if is_mounted $MOUNT; then - [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* || - error "remove sub-test dirs failed" - [ "$ENABLE_QUOTA" ] && restore_quota_type || true - fi + if is_mounted $MOUNT; then + [ -n "$DIR" ] && rm -rf $DIR/[Rdfs][0-9]* || + error "remove sub-test dirs failed" + [ "$ENABLE_QUOTA" ] && restore_quota || true + fi if [ "$I_UMOUNTED2" = "yes" ]; then restore_mount $MOUNT2 || error "restore $MOUNT2 failed" @@ -2852,10 +3547,12 @@ no_dsh() { eval $@ } +# Convert a space-delimited list to a comma-delimited list. If the input is +# only whitespace, ensure the output is empty (i.e. "") so [ -n $list ] works comma_list() { - # the sed converts spaces to commas, but leaves the last space - # alone, so the line doesn't end with a comma. - echo "$*" | tr -s " " "\n" | sort -b -u | tr "\n" " " | sed 's/ \([^$]\)/,\1/g' + # echo is used to convert newlines to spaces, since it doesn't + # introduce a trailing space as using "tr '\n' ' '" does + echo $(tr -s " " "\n" <<< $* | sort -b -u) | tr ' ' ',' } list_member () { @@ -2960,6 +3657,10 @@ at_max_get() { at_get $1 at_max } +at_min_get() { + at_get $1 at_min +} + at_max_set() { local at_max=$1 shift @@ -3019,16 +3720,19 @@ pause_bulk() { } drop_ldlm_cancel() { -#define OBD_FAIL_LDLM_CANCEL 0x304 - RC=0 - do_facet client lctl set_param fail_loc=0x304 - do_facet client "$@" || RC=$? - do_facet client lctl set_param fail_loc=0 - return $RC +#define OBD_FAIL_LDLM_CANCEL_NET 0x304 + local RC=0 + local list=$(comma_list $(mdts_nodes) $(osts_nodes)) + do_nodes $list lctl set_param fail_loc=0x304 + + do_facet client "$@" || RC=$? + + do_nodes $list lctl set_param fail_loc=0 + return $RC } drop_bl_callback() { -#define OBD_FAIL_LDLM_BL_CALLBACK 0x305 +#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305 RC=0 do_facet client lctl set_param fail_loc=0x305 do_facet client "$@" || RC=$? @@ -3131,6 +3835,17 @@ stop_full_debug_logging() { debugrestore } +# prints bash call stack +log_trace_dump() { + echo " Trace dump:" + for (( i=1; i < ${#BASH_LINENO[*]} ; i++ )) ; do + local s=${BASH_SOURCE[$i]} + local l=${BASH_LINENO[$i-1]} + local f=${FUNCNAME[$i]} + echo " = $s:$l:$f()" + done +} + ################################## # Test interface ################################## @@ -3146,6 +3861,7 @@ error_noexit() { fi log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " + log_trace_dump mkdir -p $LOGDIR # We need to dump the logs on all nodes @@ -3153,17 +3869,18 @@ error_noexit() { gather_logs $(comma_list $(nodes_list)) fi - debugrestore - [ "$TESTSUITELOG" ] && echo "$0: ${TYPE}: $TESTNAME $@" >> $TESTSUITELOG - echo "$@" > $LOGDIR/err + debugrestore + [ "$TESTSUITELOG" ] && + echo "$TESTSUITE: $TYPE: $TESTNAME $@" >> $TESTSUITELOG + echo "$@" > $LOGDIR/err } exit_status () { - local status=0 - local log=$TESTSUITELOG + local status=0 + local log=$TESTSUITELOG - [ -f "$log" ] && grep -q FAIL: $log && status=1 - exit $status + [ -f "$log" ] && grep -q FAIL $log && status=1 + exit $status } error() { @@ -3184,17 +3901,29 @@ error_ignore() { error_noexit "$@" } +error_and_remount() { + error_noexit "$@" + remount_client $MOUNT + exit 1 +} + skip_env () { $FAIL_ON_SKIP_ENV && error false $@ || skip $@ } -skip () { - echo - log " SKIP: ${TESTSUITE} ${TESTNAME} $@" - [ "$ALWAYS_SKIPPED" ] && \ - skip_logged ${TESTNAME} "$@" || true - [ "$TESTSUITELOG" ] && \ - echo "${TESTSUITE}: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true +skip() { + echo + log " SKIP: $TESTSUITE $TESTNAME $@" + + if [[ -n "$ALWAYS_SKIPPED" ]]; then + skip_logged $TESTNAME "$@" + else + mkdir -p $LOGDIR + echo "$@" > $LOGDIR/skip + fi + + [[ -n "$TESTSUITELOG" ]] && + echo "$TESTSUITE: SKIP: $TESTNAME $@" >> $TESTSUITELOG || true } build_test_filter() { @@ -3262,41 +3991,36 @@ run_test() { echo -n "." return 0 fi + + LAST_SKIPPED="y" + ALWAYS_SKIPPED="y" testname=EXCEPT_$1 if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" TESTNAME=test_$1 skip "skipping excluded test $1" return 0 fi testname=EXCEPT_$base if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" TESTNAME=test_$1 skip "skipping excluded test $1 (base $base)" return 0 fi testname=EXCEPT_ALWAYS_$1 if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" - ALWAYS_SKIPPED="y" TESTNAME=test_$1 skip "skipping ALWAYS excluded test $1" return 0 fi testname=EXCEPT_ALWAYS_$base if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" - ALWAYS_SKIPPED="y" TESTNAME=test_$1 skip "skipping ALWAYS excluded test $1 (base $base)" return 0 fi testname=EXCEPT_SLOW_$1 if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" TESTNAME=test_$1 skip "skipping SLOW test $1" return 0 fi testname=EXCEPT_SLOW_$base if [ ${!testname}x != x ]; then - LAST_SKIPPED="y" TESTNAME=test_$1 skip "skipping SLOW test $1 (base $base)" return 0 fi @@ -3308,10 +4032,6 @@ run_test() { return $? } -equals_msg() { - banner "$*" -} - log() { echo "$*" module_loaded lnet || load_modules @@ -3338,19 +4058,23 @@ trace() { } complete () { - equals_msg $1 test complete, duration $2 sec + local duration=$1 + + banner test complete, duration $duration sec [ -f "$TESTSUITELOG" ] && egrep .FAIL $TESTSUITELOG || true - echo duration $2 >>$TESTSUITELOG + echo duration $duration >>$TESTSUITELOG } pass() { - # Set TEST_STATUS here; will be used for logging the result - if [ -f $LOGDIR/err ]; then - TEST_STATUS="FAIL" - else - TEST_STATUS="PASS" - fi - echo "$TEST_STATUS $@" 2>&1 | tee -a $TESTSUITELOG + # Set TEST_STATUS here. It will be used for logging the result. + TEST_STATUS="PASS" + + if [[ -f $LOGDIR/err ]]; then + TEST_STATUS="FAIL" + elif [[ -f $LOGDIR/skip ]]; then + TEST_STATUS="SKIP" + fi + echo "$TEST_STATUS $@" 2>&1 | tee -a $TESTSUITELOG } check_mds() { @@ -3393,7 +4117,7 @@ banner() { run_one() { local testnum=$1 local message=$2 - tfile=f${testnum} + tfile=f.${TESTSUITE}.${testnum} export tdir=d0.${TESTSUITE}/d${base} export TESTNAME=test_$testnum local SAVE_UMASK=`umask` @@ -3419,42 +4143,49 @@ run_one() { # - test result is saved to data file # run_one_logged() { - local BEFORE=`date +%s` - local TEST_ERROR - local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log - local test_log=$LOGDIR/$name - rm -rf $LOGDIR/err - local SAVE_UMASK=`umask` - umask 0022 + local BEFORE=`date +%s` + local TEST_ERROR + local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log + local test_log=$LOGDIR/$name + rm -rf $LOGDIR/err + rm -rf $LOGDIR/skip + local SAVE_UMASK=`umask` + umask 0022 - echo - log_sub_test_begin test_${1} - (run_one $1 "$2") 2>&1 | tee -i $test_log - local RC=${PIPESTATUS[0]} + echo + log_sub_test_begin test_${1} + (run_one $1 "$2") 2>&1 | tee -i $test_log + local RC=${PIPESTATUS[0]} - [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \ - echo "test_$1 returned $RC" | tee $LOGDIR/err + [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \ + echo "test_$1 returned $RC" | tee $LOGDIR/err - duration=$((`date +%s` - $BEFORE)) - pass "$1" "(${duration}s)" - [ -f $LOGDIR/err ] && TEST_ERROR=$(cat $LOGDIR/err) - log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR" + duration=$((`date +%s` - $BEFORE)) + pass "$1" "(${duration}s)" - if [ -f $LOGDIR/err ]; then - $FAIL_ON_ERROR && exit $RC - fi + if [[ -f $LOGDIR/err ]]; then + TEST_ERROR=$(cat $LOGDIR/err) + elif [[ -f $LOGDIR/skip ]]; then + TEST_ERROR=$(cat $LOGDIR/skip) + fi + log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR" - umask $SAVE_UMASK + if [ -f $LOGDIR/err ]; then + $FAIL_ON_ERROR && exit $RC + fi - return 0 + umask $SAVE_UMASK + + return 0 } # # Print information of skipped tests to result.yml # skip_logged(){ - log_sub_test_begin $1 - log_sub_test_end "SKIP" "0" "0" "\"$2\"" + log_sub_test_begin $1 + shift + log_sub_test_end "SKIP" "0" "0" "$@" } canonical_path() { @@ -3652,9 +4383,7 @@ nodes_list () { } remote_nodes_list () { - local rnodes=$(nodes_list) - rnodes=$(echo " $rnodes " | sed -re "s/\s+$HOSTNAME\s+/ /g") - echo $rnodes + echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g" } init_clients_lists () { @@ -3752,6 +4481,63 @@ setstripe_nfsserver () { do_nodev $nfsserver lfs setstripe "$@" } +# Check and add a test group. +add_group() { + local group_id=$1 + local group_name=$2 + local rc=0 + + local gid=$(getent group $group_name | cut -d: -f3) + if [[ -n "$gid" ]]; then + [[ "$gid" -eq "$group_id" ]] || { + error_noexit "inconsistent group ID:" \ + "new: $group_id, old: $gid" + rc=1 + } + else + groupadd -g $group_id $group_name + rc=${PIPESTATUS[0]} + fi + + return $rc +} + +# Check and add a test user. +add_user() { + local user_id=$1 + shift + local user_name=$1 + shift + local group_name=$1 + shift + local home=$1 + shift + local opts="$@" + local rc=0 + + local uid=$(getent passwd $user_name | cut -d: -f3) + if [[ -n "$uid" ]]; then + if [[ "$uid" -eq "$user_id" ]]; then + local dir=$(getent passwd $user_name | cut -d: -f6) + if [[ "$dir" != "$home" ]]; then + mkdir -p $home + usermod -d $home $user_name + rc=${PIPESTATUS[0]} + fi + else + error_noexit "inconsistent user ID:" \ + "new: $user_id, old: $uid" + rc=1 + fi + else + mkdir -p $home + useradd -M -u $user_id -d $home -g $group_name $opts $user_name + rc=${PIPESTATUS[0]} + fi + + return $rc +} + check_runas_id_ret() { local myRC=0 local myRUNAS_UID=$1 @@ -3816,7 +4602,7 @@ fi" # Run multiop in the background, but wait for it to print # "PAUSING" to its stdout before returning from this function. multiop_bg_pause() { - MULTIOP_PROG=${MULTIOP_PROG:-multiop} + MULTIOP_PROG=${MULTIOP_PROG:-$MULTIOP} FILE=$1 ARGS=$2 @@ -3916,15 +4702,15 @@ restore_lustre_params() { } check_catastrophe() { - local rnodes=${1:-$(comma_list $(remote_nodes_list))} - local C=$CATASTROPHE - [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1 + local rnodes=${1:-$(comma_list $(remote_nodes_list))} + local C=$CATASTROPHE + [ -f $C ] && [ $(cat $C) -ne 0 ] && return 1 + + [ -z "$rnodes" ] && return 0 - if [ $rnodes ]; then - do_nodes $rnodes "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); -if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi -exit \\\$rc;" - fi + do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); + if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi + exit \\\$rc;" } # CMD: determine mds index where directory inode presents @@ -4107,10 +4893,7 @@ wait_import_state() { # the value depends on configure options, and it is not stored in /proc. # obd_support.h: # #define CONNECTION_SWITCH_MIN 5U -# #ifndef CRAY_XT3 # #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20) -# #else -# #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/2) request_timeout () { local facet=$1 @@ -4136,13 +4919,13 @@ wait_osc_import_state() { # 2. wait the deadline of client 2nd request local maxtime=$(( 2 * $(request_timeout $facet))) - if ! do_rpc_nodes $(facet_host $facet) \ - _wait_import_state $expected $param $maxtime; then - error "import is not in ${expected} state" - return 1 - fi + if ! do_rpc_nodes "$(facet_host $facet)" \ + _wait_import_state $expected $param $maxtime; then + error "import is not in ${expected} state" + return 1 + fi - return 0 + return 0 } get_clientmdc_proc_path() { @@ -4150,12 +4933,14 @@ get_clientmdc_proc_path() { } do_rpc_nodes () { - local list=$1 - shift + local list=$1 + shift - # Add paths to lustre tests for 32 and 64 bit systems. - local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH" - do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ " + [ -z "$list" ] && return 0 + + # Add paths to lustre tests for 32 and 64 bit systems. + local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH" + do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ " } wait_clients_import_state () { @@ -4180,27 +4965,37 @@ wait_clients_import_state () { local params=$(expand_list $params $proc_path) done - if ! do_rpc_nodes $list wait_import_state $expected $params; then - error "import is not in ${expected} state" - return 1 - fi + if ! do_rpc_nodes "$list" wait_import_state $expected $params; then + error "import is not in ${expected} state" + return 1 + fi } oos_full() { - local -a AVAILA - local -a GRANTA - local OSCFULL=1 - AVAILA=($(do_nodes $(comma_list $(osts_nodes)) \ - $LCTL get_param obdfilter.*.kbytesavail)) - GRANTA=($(do_nodes $(comma_list $(osts_nodes)) \ - $LCTL get_param -n obdfilter.*.tot_granted)) - for ((i=0; i<${#AVAILA[@]}; i++)); do - local -a AVAIL1=(${AVAILA[$i]//=/ }) - GRANT=$((${GRANTA[$i]}/1024)) - echo -n $(echo ${AVAIL1[0]} | cut -d"." -f2) avl=${AVAIL1[1]} grnt=$GRANT diff=$((AVAIL1[1] - GRANT)) - [ $((AVAIL1[1] - GRANT)) -lt 400 ] && OSCFULL=0 && echo " FULL" || echo - done - return $OSCFULL + local -a AVAILA + local -a GRANTA + local -a TOTALA + local OSCFULL=1 + AVAILA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param obdfilter.*.kbytesavail)) + GRANTA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param -n obdfilter.*.tot_granted)) + TOTALA=($(do_nodes $(comma_list $(osts_nodes)) \ + $LCTL get_param -n obdfilter.*.kbytestotal)) + for ((i=0; i<${#AVAILA[@]}; i++)); do + local -a AVAIL1=(${AVAILA[$i]//=/ }) + local -a TOTAL=(${TOTALA[$i]//=/ }) + GRANT=$((${GRANTA[$i]}/1024)) + # allow 1% of total space in bavail because of delayed + # allocation with ZFS which might release some free space after + # txg commit. For small devices, we set a mininum of 8MB + local LIMIT=$((${TOTAL} / 100 + 8000)) + echo -n $(echo ${AVAIL1[0]} | cut -d"." -f2) avl=${AVAIL1[1]} \ + grnt=$GRANT diff=$((AVAIL1[1] - GRANT)) limit=${LIMIT} + [ $((AVAIL1[1] - GRANT)) -lt $LIMIT ] && OSCFULL=0 && \ + echo " FULL" || echo + done + return $OSCFULL } pool_list () { @@ -4773,32 +5568,44 @@ run_llverfs() } remove_mdt_files() { - local facet=$1 - local mdtdev=$2 - shift 2 - local files="$@" - local mntpt=$(facet_mntpt $facet) - - echo "removing files from $mdtdev on $facet: $files" - mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $? - rc=0; - for f in $files; do - rm $mntpt/ROOT/$f || { rc=$?; break; } - done - umount -f $mntpt || return $? - return $rc + local facet=$1 + local mdtdev=$2 + shift 2 + local files="$@" + local mntpt=$(facet_mntpt $facet) + local opts=$MDS_MOUNT_OPTS + + echo "removing files from $mdtdev on $facet: $files" + if [ $(facet_fstype $facet) == ldiskfs ] && + ! do_facet $facet test -b ${!dev}; then + opts=$(csa_add "$opts" -o loop) + fi + mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || + return $? + rc=0; + for f in $files; do + rm $mntpt/ROOT/$f || { rc=$?; break; } + done + umount -f $mntpt || return $? + return $rc } duplicate_mdt_files() { - local facet=$1 - local mdtdev=$2 - shift 2 - local files="$@" - local mntpt=$(facet_mntpt $facet) - - echo "duplicating files on $mdtdev on $facet: $files" - mkdir -p $mntpt || return $? - mount -t $FSTYPE $MDS_MOUNT_OPTS $mdtdev $mntpt || return $? + local facet=$1 + local mdtdev=$2 + shift 2 + local files="$@" + local mntpt=$(facet_mntpt $facet) + local opts=$MDS_MOUNT_OPTS + + echo "duplicating files on $mdtdev on $facet: $files" + mkdir -p $mntpt || return $? + if [ $(facet_fstype $facet) == ldiskfs ] && + ! do_facet $facet test -b ${!dev}; then + opts=$(csa_add "$opts" -o loop) + fi + mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || + return $? do_umount() { trap 0 @@ -4915,3 +5722,112 @@ generate_string() { echo "$(head -c $size < /dev/zero | tr '\0' y)" } + +reformat_external_journal() { + if [ ! -z ${EJOURNAL} ]; then + local rcmd="do_facet ${SINGLEMDS}" + + echo "reformat external journal on ${SINGLEMDS}:${EJOURNAL}" + ${rcmd} mke2fs -O journal_dev ${EJOURNAL} || return 1 + fi +} + +# MDT file-level backup/restore +mds_backup_restore() { + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local mntpt=$(facet_mntpt brpt) + local rcmd="do_facet ${SINGLEMDS}" + local metaea=${TMP}/backup_restore.ea + local metadata=${TMP}/backup_restore.tgz + local opts=${MDS_MOUNT_OPTS} + local svc=${SINGLEMDS}_svc + + if ! ${rcmd} test -b ${devname}; then + opts=$(csa_add "$opts" -o loop) + fi + + echo "file-level backup/restore on ${SINGLEMDS}:${devname}" + + # step 1: build mount point + ${rcmd} mkdir -p $mntpt + # step 2: cleanup old backup + ${rcmd} rm -f $metaea $metadata + # step 3: mount dev + ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 1 + # step 4: backup metaea + echo "backup EA" + ${rcmd} "cd $mntpt && getfattr -R -d -m '.*' -P . > $metaea && cd -" || + return 2 + # step 5: backup metadata + echo "backup data" + ${rcmd} tar zcf $metadata -C $mntpt/ . > /dev/null 2>&1 || return 3 + # step 6: umount + ${rcmd} umount -d $mntpt || return 4 + # step 7: reformat external journal if needed + reformat_external_journal || return 5 + # step 8: reformat dev + echo "reformat new device" + add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS}) --backfstype ldiskfs \ + --reformat $devname > /dev/null || return 6 + # step 9: mount dev + ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7 + # step 10: restore metadata + echo "restore data" + ${rcmd} tar zxfp $metadata -C $mntpt > /dev/null 2>&1 || return 8 + # step 11: restore metaea + echo "restore EA" + ${rcmd} "cd $mntpt && setfattr --restore=$metaea && cd - " || return 9 + # step 12: remove recovery logs + echo "remove recovery logs" + ${rcmd} rm -fv $mntpt/OBJECTS/* $mntpt/CATALOGS + # step 13: umount dev + ${rcmd} umount -d $mntpt || return 10 + # step 14: cleanup tmp backup + ${rcmd} rm -f $metaea $metadata + # step 15: reset device label - it's not virgin on + ${rcmd} e2label $devname ${!svc} +} + +# remove OI files +mds_remove_ois() { + local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local mntpt=$(facet_mntpt brpt) + local rcmd="do_facet ${SINGLEMDS}" + local idx=$1 + local opts=${MDS_MOUNT_OPTS} + + if ! ${rcmd} test -b ${devname}; then + opts=$(csa_add "$opts" -o loop) + fi + + echo "remove OI files: idx=${idx}" + + # step 1: build mount point + ${rcmd} mkdir -p $mntpt + # step 2: mount dev + ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 1 + if [ -z $idx ]; then + # step 3: remove all OI files + ${rcmd} rm -fv $mntpt/oi.16* + elif [ $idx -lt 2 ]; then + ${rcmd} rm -fv $mntpt/oi.16.${idx} + else + local i + + # others, rm oi.16.[idx, idx * idx, idx ** ...] + for ((i=${idx}; i<64; i=$((i * idx)))); do + ${rcmd} rm -fv $mntpt/oi.16.${i} + done + fi + # step 4: umount + ${rcmd} umount -d $mntpt || return 2 + # OI files will be recreated when mounted as lustre next time. +} + +# generate maloo upload-able log file name +# \param logname specify unique part of file name +generate_logname() { + local logname=${1:-"default_logname"} + + echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log" +}