X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Ftest-framework.sh;h=332f74065d10773bf3d28eff1cd8196f9325cea1;hp=4771bfbb130868c3b12bb712e299d7b8c5fab0dd;hb=a98d44e0439ad5b20d0d7f273b29e6ea990312f0;hpb=54fe9796ec837698a27420c8a92d9493c733b6a9 diff --git a/lustre/tests/test-framework.sh b/lustre/tests/test-framework.sh index 4771bfb..332f740 100644 --- a/lustre/tests/test-framework.sh +++ b/lustre/tests/test-framework.sh @@ -5,6 +5,7 @@ trap 'print_summary && touch $TF_FAIL && \ set -e #set -x +export LANG=en_US export EJOURNAL=${EJOURNAL:-""} export REFORMAT=${REFORMAT:-""} export WRITECONF=${WRITECONF:-""} @@ -15,8 +16,8 @@ export GSS_KRB5=false export GSS_PIPEFS=false export IDENTITY_UPCALL=default export QUOTA_AUTO=1 -export JOBSTATS_AUTO=${JOBSTATS_AUTO:-1} -export JOBID_VAR=${JOBID_VAR:-"procname_uid"} +# specify environment variable containing batch job name for server statistics +export JOBID_VAR=${JOBID_VAR:-"procname_uid"} # or "existing" or "disable" # LOAD_LLOOP: LU-409: only load llite_lloop module if kernel < 2.6.32 or # LOAD_LLOOP is true. LOAD_LLOOP is false by default. @@ -118,10 +119,11 @@ print_summary () { } init_test_env() { - export LUSTRE=`absolute_path $LUSTRE` - export TESTSUITE=`basename $0 .sh` - export TEST_FAILED=false - export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} + export LUSTRE=$(absolute_path $LUSTRE) + export TESTSUITE=$(basename $0 .sh) + export TEST_FAILED=false + export FAIL_ON_SKIP_ENV=${FAIL_ON_SKIP_ENV:-false} + export RPC_MODE=${RPC_MODE:-false} export MKE2FS=$MKE2FS if [ -z "$MKE2FS" ]; then @@ -177,6 +179,15 @@ init_test_env() { fi fi + export RESIZE2FS=$RESIZE2FS + if [ -z "$RESIZE2FS" ]; then + if which resizefs.ldiskfs >/dev/null 2>&1; then + export RESIZE2FS=resizefs.ldiskfs + else + export RESIZE2FS=resize2fs + fi + fi + export LFSCK_BIN=${LFSCK_BIN:-lfsck} export LFSCK_ALWAYS=${LFSCK_ALWAYS:-"no"} # check fs after each test suite export FSCK_MAX_ERR=4 # File system errors left uncorrected @@ -184,6 +195,7 @@ init_test_env() { export ZFS=${ZFS:-zfs} export ZPOOL=${ZPOOL:-zpool} export ZDB=${ZDB:-zdb} + export PARTPROBE=${PARTPROBE:-partprobe} #[ -d /r ] && export ROOT=${ROOT:-/r} export TMP=${TMP:-$ROOT/tmp} @@ -282,8 +294,6 @@ init_test_env() { IDENTITY_UPCALL=false ;; esac - USE_OFD=${USE_OFD:-yes} - [ "$USE_OFD" = "yes" ] && LOAD_MODULES_REMOTE=true export LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false} @@ -291,13 +301,15 @@ init_test_env() { export RLUSTRE=${RLUSTRE:-$LUSTRE} export RPWD=${RPWD:-$PWD} export I_MOUNTED=${I_MOUNTED:-"no"} - if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mds.ko -a \ - ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mds.ko -a \ - ! -f `dirname $0`/../mds/mds.ko ]; then - export CLIENTMODSONLY=yes - fi + if [ ! -f /lib/modules/$(uname -r)/kernel/fs/lustre/mdt.ko -a \ + ! -f /lib/modules/$(uname -r)/updates/kernel/fs/lustre/mdt.ko -a \ + ! -f /lib/modules/$(uname -r)/extra/kernel/fs/lustre/mdt.ko -a \ + ! -f $LUSTRE/mdt/mdt.ko ]; then + export CLIENTMODSONLY=yes + fi - export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3} + export SHUTDOWN_ATTEMPTS=${SHUTDOWN_ATTEMPTS:-3} + export OSD_TRACK_DECLARES_LBUG=${OSD_TRACK_DECLARES_LBUG:-"yes"} # command line @@ -314,10 +326,12 @@ init_test_env() { shift $((OPTIND - 1)) ONLY=${ONLY:-$*} - # print the durations of each test if "true" - DDETAILS=${DDETAILS:-false} - [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true - rm -f $TMP/*active + # print the durations of each test if "true" + DDETAILS=${DDETAILS:-false} + [ "$TESTSUITELOG" ] && rm -f $TESTSUITELOG || true + if ! $RPC_MODE; then + rm -f $TMP/*active + fi } check_cpt_number() { @@ -338,7 +352,7 @@ version_code() { # split arguments like "1.8.6-wc3" into "1", "8", "6", "wc3" eval set -- $(tr "[:punct:]" " " <<< $*) - echo -n $((($1 << 16) | ($2 << 8) | $3)) + echo -n "$((($1 << 16) | ($2 << 8) | $3))" } export LINUX_VERSION=$(uname -r | sed -e "s/[-.]/ /3" -e "s/ .*//") @@ -373,20 +387,24 @@ load_module() { optvar="MODOPTS_$(basename $module | tr a-z A-Z)" eval set -- \$$optvar if [ $# -eq 0 -a -n "$MODPROBECONF" ]; then - # Nothing in $MODOPTS_; try modprobe.conf - set -- $(grep -P "^options\\s+${BASE}" $MODPROBECONF) - # Get rid of "options $module" - (($# > 0)) && shift 2 - - # Ensure we have accept=all for lnet - if [ $(basename $module) = lnet ]; then - # OK, this is a bit wordy... - local arg accept_all_present=false - for arg in "$@"; do - [ "$arg" = accept=all ] && accept_all_present=true - done - $accept_all_present || set -- "$@" accept=all - fi + # Nothing in $MODOPTS_; try modprobe.conf + local opt + opt=$(awk -v var="^options $BASE" '$0 ~ var \ + {gsub("'"options $BASE"'",""); print}' $MODPROBECONF) + set -- $(echo -n $opt) + + # Ensure we have accept=all for lnet + if [ $(basename $module) = lnet ]; then + # OK, this is a bit wordy... + local arg accept_all_present=false + + for arg in "$@"; do + [ "$arg" = accept=all ] && \ + accept_all_present=true + done + $accept_all_present || set -- "$@" accept=all + fi + export $optvar="$*" fi fi @@ -425,8 +443,6 @@ llite_lloop_enabled() { } load_modules_local() { - [ $(facet_fstype ost1) == "zfs" ] && export USE_OFD=yes - if [ -n "$MODPROBE" ]; then # use modprobe echo "Using modprobe to load modules" @@ -470,7 +486,6 @@ load_modules_local() { load_module ../lnet/lnet/lnet LNETLND=${LNETLND:-"socklnd/ksocklnd"} load_module ../lnet/klnds/$LNETLND - load_module lvfs/lvfs load_module obdclass/obdclass load_module ptlrpc/ptlrpc load_module ptlrpc/gss/ptlrpc_gss @@ -487,25 +502,21 @@ load_modules_local() { grep -q crc16 $SYMLIST || { modprobe crc16 2>/dev/null || true; } grep -q -w jbd $SYMLIST || { modprobe jbd 2>/dev/null || true; } grep -q -w jbd2 $SYMLIST || { modprobe jbd2 2>/dev/null || true; } + load_module lfsck/lfsck [ "$LQUOTA" != "no" ] && load_module quota/lquota $LQUOTAOPTS if [[ $(node_fstypes $HOSTNAME) == *zfs* ]]; then modprobe zfs load_module osd-zfs/osd_zfs fi - load_module mgs/mgs - load_module mds/mds - load_module mdd/mdd if [[ $(node_fstypes $HOSTNAME) == *ldiskfs* ]]; then - # - # This block shall be moved up beside osd-zfs as soon - # as osd-ldiskfs stops using mdd symbols. - # grep -q exportfs_decode_fh $SYMLIST || { modprobe exportfs 2> /dev/null || true; } - load_module ../ldiskfs/ldiskfs/ldiskfs - load_module lvfs/fsfilt_ldiskfs + load_module ../ldiskfs/ldiskfs load_module osd-ldiskfs/osd_ldiskfs fi + load_module nodemap/nodemap + load_module mgs/mgs + load_module mdd/mdd load_module mdt/mdt load_module ost/ost load_module lod/lod @@ -583,6 +594,18 @@ unload_modules() { return 0 } +fs_log_size() { + local facet=${1:-$SINGLEMDS} + local fstype=$(facet_fstype $facet) + local size=0 + case $fstype in + ldiskfs) size=50;; # largest seen is 44, leave some headroom + zfs) size=400;; # largest seen is 384 + esac + + echo -n $size +} + check_gss_daemon_nodes() { local list=$1 dname=$2 @@ -695,10 +718,17 @@ cleanup_gss() { fi } +facet_svc() { + local facet=$1 + local var=${facet}_svc + + echo -n ${!var} +} + facet_type() { local facet=$1 - echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9]\+//' | + echo -n $facet | sed -e 's/^fs[0-9]\+//' -e 's/[0-9_]\+//' | tr '[:lower:]' '[:upper:]' } @@ -733,6 +763,11 @@ facet_fstype() { return fi + if [[ $facet == mgs ]] && combined_mgs_mds; then + facet_fstype mds1 + return + fi + return 1 } @@ -789,7 +824,184 @@ ostdevlabel() { } # -# This and set_obdfilter_param() shall be used to access OSD parameters +# Get the device of a facet. +# +facet_device() { + local facet=$1 + local device + + case $facet in + mgs) device=$(mgsdevname) ;; + mds*) device=$(mdsdevname $(facet_number $facet)) ;; + ost*) device=$(ostdevname $(facet_number $facet)) ;; + fs2mds) device=$(mdsdevname 1_2) ;; + fs2ost) device=$(ostdevname 1_2) ;; + fs3ost) device=$(ostdevname 2_2) ;; + *) ;; + esac + + echo -n $device +} + +# +# Get the virtual device of a facet. +# +facet_vdevice() { + local facet=$1 + local device + + case $facet in + mgs) device=$(mgsvdevname) ;; + mds*) device=$(mdsvdevname $(facet_number $facet)) ;; + ost*) device=$(ostvdevname $(facet_number $facet)) ;; + fs2mds) device=$(mdsvdevname 1_2) ;; + fs2ost) device=$(ostvdevname 1_2) ;; + fs3ost) device=$(ostvdevname 2_2) ;; + *) ;; + esac + + echo -n $device +} + +# +# Re-read the partition table on failover partner host. +# After a ZFS storage pool is created on a shared device, the partition table +# on the device may change. However, the operating system on the failover +# host may not notice the change automatically. Without the up-to-date partition +# block devices, 'zpool import ..' cannot find the labels, whose positions are +# relative to partition rather than disk beginnings. +# +# This function performs partprobe on the failover host to make it re-read the +# partition table. +# +refresh_partition_table() { + local facet=$1 + local device=$2 + local host + + host=$(facet_passive_host $facet) + if [[ -n "$host" ]]; then + do_node $host "$PARTPROBE $device" + fi +} + +# +# Get ZFS storage pool name. +# +zpool_name() { + local facet=$1 + local device + local poolname + + device=$(facet_device $facet) + # poolname is string before "/" + poolname="${device%%/*}" + + echo -n $poolname +} + +# +# Create ZFS storage pool. +# +create_zpool() { + local facet=$1 + local poolname=$2 + local vdev=$3 + shift 3 + local opts=${@:-"-o cachefile=none"} + + do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $ZPOOL create -f $opts $poolname $vdev" +} + +# +# Create ZFS file system. +# +create_zfs() { + local facet=$1 + local dataset=$2 + shift 2 + local opts=${@:-"-o mountpoint=legacy"} + + do_facet $facet "$ZFS list -H $dataset >/dev/null 2>&1 || + $ZFS create $opts $dataset" +} + +# +# Export ZFS storage pool. +# Before exporting the pool, all datasets within the pool should be unmounted. +# +export_zpool() { + local facet=$1 + shift + local opts="$@" + local poolname + + poolname=$(zpool_name $facet) + + if [[ -n "$poolname" ]]; then + do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 || + grep -q ^$poolname/ /proc/mounts || + $ZPOOL export $opts $poolname" + fi +} + +# +# Destroy ZFS storage pool. +# Destroy the given pool and free up any devices for other use. This command +# tries to unmount any active datasets before destroying the pool. +# -f Force any active datasets contained within the pool to be unmounted. +# +destroy_zpool() { + local facet=$1 + local poolname=${2:-$(zpool_name $facet)} + + if [[ -n "$poolname" ]]; then + do_facet $facet "! $ZPOOL list -H $poolname >/dev/null 2>&1 || + $ZPOOL destroy -f $poolname" + fi +} + +# +# Import ZFS storage pool. +# Force importing, even if the pool appears to be potentially active. +# +import_zpool() { + local facet=$1 + shift + local opts=${@:-"-o cachefile=none"} + local poolname + + poolname=$(zpool_name $facet) + + if [[ -n "$poolname" ]]; then + opts+=" -d $(dirname $(facet_vdevice $facet))" + do_facet $facet "$ZPOOL list -H $poolname >/dev/null 2>&1 || + $ZPOOL import -f $opts $poolname" + fi +} + +# +# Set the "cachefile=none" property on ZFS storage pool so that the pool +# is not automatically imported on system startup. +# +# In a failover environment, this will provide resource level fencing which +# will ensure that the same ZFS storage pool will not be imported concurrently +# on different nodes. +# +disable_zpool_cache() { + local facet=$1 + local poolname + + poolname=$(zpool_name $facet) + + if [[ -n "$poolname" ]]; then + do_facet $facet "$ZPOOL set cachefile=none $poolname" + fi +} + +# +# This and set_osd_param() shall be used to access OSD parameters # once existed under "obdfilter": # # mntdev @@ -797,7 +1009,7 @@ ostdevlabel() { # read_cache_enable # writethrough_cache_enable # -get_obdfilter_param() { +get_osd_param() { local nodes=$1 local device=${2:-$FSNAME-OST*} local name=$3 @@ -806,7 +1018,7 @@ get_obdfilter_param() { osd-*.$device.$name 2>&1" | grep -v 'Found no match' } -set_obdfilter_param() { +set_osd_param() { local nodes=$1 local device=${2:-$FSNAME-OST*} local name=$3 @@ -916,6 +1128,11 @@ mount_facet() { opts=$(csa_add "$opts" -o loop) fi + if [[ $(facet_fstype $facet) == zfs ]]; then + # import ZFS storage pool + import_zpool $facet || return ${PIPESTATUS[0]} + fi + echo "Starting ${facet}: $opts ${!dev} $mntpt" # for testing LU-482 error handling in mount_facets() and test_0a() if [ -f $TMP/test-lu482-trigger ]; then @@ -962,39 +1179,6 @@ start() { return $RC } -# -# When a ZFS OSD is made read-only by replay_barrier(), its pool is "freezed". -# Because stopping corresponding target may not clear this in-memory state, we -# need to zap the pool from memory by exporting and reimporting the pool. -# -# Although the uberblocks are not updated when a pool is freezed, transactions -# are still written to the disks. Modified blocks may be cached in memory when -# tests try reading them back. The export-and-reimport process also evicts any -# cached pool data from memory to provide the correct "data loss" semantics. -# -refresh_disk() { - local facet=$1 - local fstype=$(facet_fstype $facet) - local _dev - local dev - local poolname - - if [ "${fstype}" == "zfs" ]; then - _dev=$(facet_active $facet)_dev - dev=${!_dev} # expand _dev to its value, e.g. ${mds1_dev} - poolname="${dev%%/*}" # poolname is string before "/" - - if [ "${poolname}" == "" ]; then - echo "invalid dataset name: $dev" - return - fi - do_facet $facet "cp /etc/zfs/zpool.cache /tmp/zpool.cache.back" - do_facet $facet "$ZPOOL export ${poolname}" - do_facet $facet "$ZPOOL import -f -c /tmp/zpool.cache.back \ - ${poolname}" - fi -} - stop() { local running local facet=$1 @@ -1009,9 +1193,14 @@ stop() { do_facet ${facet} umount -d $@ $mntpt fi - # umount should block, but we should wait for unrelated obd's - # like the MGS or MGC to also stop. - wait_exit_ST ${facet} + # umount should block, but we should wait for unrelated obd's + # like the MGS or MGC to also stop. + wait_exit_ST ${facet} || return ${PIPESTATUS[0]} + + if [[ $(facet_fstype $facet) == zfs ]]; then + # export ZFS storage pool + export_zpool $facet + fi } # save quota version (both administrative and operational quotas) @@ -1044,13 +1233,14 @@ quota_save_version() { # XXX This function is kept for interoperability with old server (< 2.3.50), # it should be removed whenever we drop the interoperability for such # server. -quota_type () { - local fsname=${1:-$FSNAME} - local rc=0 - do_facet mgs lctl get_param mdd.${fsname}-MDT*.quota_type || rc=$? - do_nodes $(comma_list $(osts_nodes)) \ - lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$? - return $rc +quota_type() { + local fsname=${1:-$FSNAME} + local rc=0 + do_facet $SINGLEMDS lctl get_param mdd.${fsname}-MDT*.quota_type || + rc=$? + do_nodes $(comma_list $(osts_nodes)) \ + lctl get_param obdfilter.${fsname}-OST*.quota_type || rc=$? + return $rc } # XXX This function is kept for interoperability with old server (< 2.3.50), @@ -1092,11 +1282,11 @@ setup_quota_old(){ local quota_usrs=$QUOTA_USERS # get_filesystem_size - local disksz=$(lfs df $mntpt | grep "filesystem summary:" | awk '{print $3}') + local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}') local blk_soft=$((disksz + 1024)) local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over - local Inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | awk '{print $3}') + local Inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}') local i_soft=$Inodes local i_hard=$((i_soft + i_soft / 20)) @@ -1121,7 +1311,7 @@ setup_quota_old(){ mdt_quota_type() { local varsvc=${SINGLEMDS}_svc do_facet $SINGLEMDS $LCTL get_param -n \ - osd-$FSTYPE.${!varsvc}.quota_slave.enabled + osd-$(facet_fstype $SINGLEMDS).${!varsvc}.quota_slave.enabled } # get ost quota type @@ -1129,7 +1319,7 @@ ost_quota_type() { # All OSTs should have same quota type local varsvc=ost1_svc do_facet ost1 $LCTL get_param -n \ - osd-$FSTYPE.${!varsvc}.quota_slave.enabled + osd-$(facet_fstype ost1).${!varsvc}.quota_slave.enabled } # restore old quota type settings @@ -1149,6 +1339,30 @@ restore_quota() { fi } +# Handle the case when there is a space in the lfs df +# "filesystem summary" line the same as when there is no space. +# This will allow fixing the "lfs df" summary line in the future. +lfs_df() { + $LFS df $* | sed -e 's/filesystem /filesystem_/' +} + +# Get free inodes on the MDT specified by mdt index, free indoes on +# the whole filesystem will be returned when index == -1. +mdt_free_inodes() { + local index=$1 + local free_inodes + local mdt_uuid + + if [ $index -eq -1 ]; then + mdt_uuid="summary" + else + mdt_uuid=$(mdtuuid_from_index $index) + fi + + free_inodes=$(lfs_df -i $MOUNT | grep $mdt_uuid | awk '{print $4}') + echo $free_inodes +} + setup_quota(){ if [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.50) ]; then setup_quota_old $1 @@ -1175,13 +1389,11 @@ setup_quota(){ local quota_usrs=$QUOTA_USERS # get_filesystem_size - local disksz=$(lfs df $mntpt | grep "filesystem summary:" | - awk '{print $3}') + local disksz=$(lfs_df $mntpt | grep "summary" | awk '{print $2}') local blk_soft=$((disksz + 1024)) local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over - local inodes=$(lfs df -i $mntpt | grep "filesystem summary:" | - awk '{print $3}') + local inodes=$(lfs_df -i $mntpt | grep "summary" | awk '{print $2}') local i_soft=$inodes local i_hard=$((i_soft + i_soft / 20)) @@ -1463,7 +1675,6 @@ reboot_facet() { if [ "$FAILURE_MODE" = HARD ]; then reboot_node $(facet_active_host $facet) else - refresh_disk ${facet} sleep 10 fi } @@ -1526,6 +1737,8 @@ TESTLOG_PREFIX=$TESTLOG_PREFIX \ TESTNAME=$TESTNAME \ DBENCH_LIB=$DBENCH_LIB \ DBENCH_SRC=$DBENCH_SRC \ +CLIENT_COUNT=$((CLIENTCOUNT - 1)) \ +LFS=$LFS \ run_${load}.sh" & local ppid=$! log "Started client load: ${load} on $client" @@ -1734,30 +1947,44 @@ cleanup_check() { } wait_update () { - local node=$1 - local TEST=$2 - local FINAL=$3 - local MAX=${4:-90} - - local RESULT - local WAIT=0 - local sleep=1 - local print=10 - while [ true ]; do - RESULT=$(do_node $node "$TEST") - if [ "$RESULT" == "$FINAL" ]; then - [ -z "$RESULT" -o $WAIT -le $sleep ] || - echo "Updated after ${WAIT}s: wanted '$FINAL' got '$RESULT'" - return 0 - fi - [ $WAIT -ge $MAX ] && break - [ $((WAIT % print)) -eq 0 ] && - echo "Waiting $((MAX - WAIT)) secs for update" - WAIT=$((WAIT + sleep)) - sleep $sleep - done - echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'" - return 3 + local verbose=false + if [[ "$1" == "--verbose" ]]; then + shift + verbose=true + fi + + local node=$1 + local TEST=$2 + local FINAL=$3 + local MAX=${4:-90} + local RESULT + local PREV_RESULT + local WAIT=0 + local sleep=1 + local print=10 + + PREV_RESULT=$(do_node $node "$TEST") + while [ true ]; do + RESULT=$(do_node $node "$TEST") + if [[ "$RESULT" == "$FINAL" ]]; then + [[ -z "$RESULT" || $WAIT -le $sleep ]] || + echo "Updated after ${WAIT}s: wanted '$FINAL'"\ + "got '$RESULT'" + return 0 + fi + if [[ $verbose && "$RESULT" != "$PREV_RESULT" ]]; then + echo "Changed after ${WAIT}s: from '$PREV_RESULT'"\ + "to '$RESULT'" + PREV_RESULT=$RESULT + fi + [[ $WAIT -ge $MAX ]] && break + [[ $((WAIT % print)) -eq 0 ]] && + echo "Waiting $((MAX - WAIT)) secs for update" + WAIT=$((WAIT + sleep)) + sleep $sleep + done + echo "Update not seen after ${MAX}s: wanted '$FINAL' got '$RESULT'" + return 3 } wait_update_facet() { @@ -1767,7 +1994,10 @@ wait_update_facet() { } sync_all_data() { - do_node $(osts_nodes) "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 | + do_nodes $(comma_list $(mdts_nodes)) \ + "lctl set_param -n osd*.*MDT*.force_sync 1" + do_nodes $(comma_list $(osts_nodes)) \ + "lctl set_param -n osd*.*OS*.force_sync 1" 2>&1 | grep -v 'Found no match' } @@ -1883,30 +2113,50 @@ wait_recovery_complete () { } wait_mds_ost_sync () { - # just because recovery is done doesn't mean we've finished - # orphan cleanup. Wait for llogs to get synchronized. - echo "Waiting for orphan cleanup..." - # MAX value includes time needed for MDS-OST reconnection - local MAX=$(( TIMEOUT * 2 )) - local WAIT=0 - while [ $WAIT -lt $MAX ]; do - local -a sync=($(do_nodes $(comma_list $(osts_nodes)) \ - "$LCTL get_param -n obdfilter.*.mds_sync")) - local con=1 - local i - for ((i=0; i<${#sync[@]}; i++)); do - [ ${sync[$i]} -eq 0 ] && continue - # there is a not finished MDS-OST synchronization - con=0 - break; - done - sleep 2 # increase waiting time and cover statfs cache - [ ${con} -eq 1 ] && return 0 - echo "Waiting $WAIT secs for $facet mds-ost sync done." - WAIT=$((WAIT + 2)) - done - echo "$facet recovery not done in $MAX sec. $STATUS" - return 1 + # just because recovery is done doesn't mean we've finished + # orphan cleanup. Wait for llogs to get synchronized. + echo "Waiting for orphan cleanup..." + # MAX value includes time needed for MDS-OST reconnection + local MAX=$(( TIMEOUT * 2 )) + local WAIT_TIMEOUT=${1:-$MAX} + local WAIT=0 + local new_wait=true + local list=$(comma_list $(mdts_nodes)) + local cmd="$LCTL get_param -n osp.*osc*.old_sync_processed" + if ! do_facet $SINGLEMDS \ + "$LCTL list_param osp.*osc*.old_sync_processed 2> /dev/null" + then + # old way, use mds_sync + new_wait=false + list=$(comma_list $(osts_nodes)) + cmd="$LCTL get_param -n obdfilter.*.mds_sync" + fi + + echo "wait $WAIT_TIMEOUT secs maximumly for $list mds-ost sync done." + while [ $WAIT -lt $WAIT_TIMEOUT ]; do + local -a sync=($(do_nodes $list "$cmd")) + local con=1 + local i + for ((i=0; i<${#sync[@]}; i++)); do + if $new_wait; then + [ ${sync[$i]} -eq 1 ] && continue + else + [ ${sync[$i]} -eq 0 ] && continue + fi + # there is a not finished MDS-OST synchronization + con=0 + break; + done + sleep 2 # increase waiting time and cover statfs cache + [ ${con} -eq 1 ] && return 0 + echo "Waiting $WAIT secs for $list $i mds-ost sync done." + WAIT=$((WAIT + 2)) + done + + # show which nodes are not finished. + do_nodes $list "$cmd" + echo "$facet recovery node $i not done in $WAIT_TIMEOUT sec. $STATUS" + return 1 } wait_destroy_complete () { @@ -2045,32 +2295,58 @@ affected_facets () { } facet_failover() { - local facet=$1 - local sleep_time=$2 - local host=$(facet_active_host $facet) - - echo "Failing $facet on node $host" - - local affected=$(affected_facets $facet) + local facets=$1 + local sleep_time=$2 + local -a affecteds + local facet + local total=0 + local index=0 + local skip + + #Because it will only get up facets, we need get affected + #facets before shutdown + #For HARD Failure mode, it needs make sure facets on the same + #HOST will only be shutdown and reboot once + for facet in ${facets//,/ }; do + local affected_facet + skip=0 + #check whether facet has been included in other affected facets + for ((index=0; index<$total; index++)); do + [[ *,$facet,* == ,${affecteds[index]}, ]] && skip=1 + done - shutdown_facet $facet + if [ $skip -eq 0 ]; then + affecteds[$total]=$(affected_facets $facet) + total=$((total+1)) + fi + done - echo affected facets: $affected + for ((index=0; index<$total; index++)); do + facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1) + local host=$(facet_active_host $facet) + echo "Failing ${affecteds[index]} on $host" + shutdown_facet $facet + done - [ -n "$sleep_time" ] && sleep $sleep_time + for ((index=0; index<$total; index++)); do + facet=$(echo ${affecteds[index]} | tr -s " " | cut -d"," -f 1) + echo reboot facets: ${affecteds[index]} - reboot_facet $facet + reboot_facet $facet - change_active $affected + change_active ${affecteds[index]} - wait_for_facet $affected - # start mgs first if it is affected - if ! combined_mgs_mds && list_member $affected mgs; then - mount_facet mgs || error "Restart of mgs failed" - fi - # FIXME; has to be changed to mount all facets concurrently - affected=$(exclude_items_from_list $affected mgs) - mount_facets $affected + wait_for_facet ${affecteds[index]} + # start mgs first if it is affected + if ! combined_mgs_mds && + list_member ${affecteds[index]} mgs; then + mount_facet mgs || error "Restart of mgs failed" + fi + # FIXME; has to be changed to mount all facets concurrently + affected=$(exclude_items_from_list ${affecteds[index]} mgs) + echo mount facets: ${affecteds[index]} + mount_facets ${affecteds[index]} + done } obd_name() { @@ -2078,57 +2354,94 @@ obd_name() { } replay_barrier() { - local facet=$1 - do_facet $facet "sync; sync; sync" - df $MOUNT - - # make sure there will be no seq change - local clients=${CLIENTS:-$HOSTNAME} - local f=fsa-\\\$\(hostname\) - do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f" - do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi" + local facet=$1 + do_facet $facet "sync; sync; sync" + df $MOUNT - local svc=${facet}_svc - do_facet $facet $LCTL --device %${!svc} notransno - do_facet $facet $LCTL --device %${!svc} readonly - do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" - $LCTL mark "local REPLAY BARRIER on ${!svc}" + # make sure there will be no seq change + local clients=${CLIENTS:-$HOSTNAME} + local f=fsa-\\\$\(hostname\) + do_nodes $clients "mcreate $MOUNT/$f; rm $MOUNT/$f" + do_nodes $clients "if [ -d $MOUNT2 ]; then mcreate $MOUNT2/$f; rm $MOUNT2/$f; fi" + + local svc=${facet}_svc + do_facet $facet $LCTL --device ${!svc} notransno + # + # If a ZFS OSD is made read-only here, its pool is "freezed". This + # in-memory state has to be cleared by either rebooting the host or + # exporting and reimporting the pool. + # + # Although the uberblocks are not updated when a pool is freezed, + # transactions are still written to the disks. Modified blocks may be + # cached in memory when tests try reading them back. The + # export-and-reimport process also evicts any cached pool data from + # memory to provide the correct "data loss" semantics. + # + # In the test framework, the exporting and importing operations are + # handled by stop() and mount_facet() separately, which are used + # inside fail() and fail_abort(). + # + do_facet $facet $LCTL --device ${!svc} readonly + do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" + $LCTL mark "local REPLAY BARRIER on ${!svc}" } replay_barrier_nodf() { - local facet=$1 echo running=${running} - do_facet $facet "sync; sync; sync" - local svc=${facet}_svc - echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} notransno - do_facet $facet $LCTL --device %${!svc} readonly - do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" - $LCTL mark "local REPLAY BARRIER on ${!svc}" + local facet=$1 echo running=${running} + do_facet $facet "sync; sync; sync" + local svc=${facet}_svc + echo Replay barrier on ${!svc} + do_facet $facet $LCTL --device ${!svc} notransno + do_facet $facet $LCTL --device ${!svc} readonly + do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" + $LCTL mark "local REPLAY BARRIER on ${!svc}" } replay_barrier_nosync() { - local facet=$1 echo running=${running} - local svc=${facet}_svc - echo Replay barrier on ${!svc} - do_facet $facet $LCTL --device %${!svc} notransno - do_facet $facet $LCTL --device %${!svc} readonly - do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" - $LCTL mark "local REPLAY BARRIER on ${!svc}" + local facet=$1 echo running=${running} + local svc=${facet}_svc + echo Replay barrier on ${!svc} + do_facet $facet $LCTL --device ${!svc} notransno + do_facet $facet $LCTL --device ${!svc} readonly + do_facet $facet $LCTL mark "$facet REPLAY BARRIER on ${!svc}" + $LCTL mark "local REPLAY BARRIER on ${!svc}" +} + +# +# Get Lustre client uuid for a given Lustre mount point. +# +get_client_uuid() { + local mntpnt=${1:-$MOUNT} + + local name=$($LFS getname $mntpnt | cut -d' ' -f1) + local uuid=$($LCTL get_param -n llite.$name.uuid) + + echo -n $uuid } mds_evict_client() { - UUID=`lctl get_param -n mdc.${mds1_svc}-mdc-*.uuid` - do_facet mds1 "lctl set_param -n mdt.${mds1_svc}.evict_client $UUID" + local mntpnt=${1:-$MOUNT} + local uuid=$(get_client_uuid $mntpnt) + + do_facet $SINGLEMDS \ + "$LCTL set_param -n mdt.${mds1_svc}.evict_client $uuid" } ost_evict_client() { - UUID=`lctl get_param -n devices| grep ${ost1_svc}-osc- | egrep -v 'MDT' | awk '{print $5}'` - do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.evict_client $UUID" + local mntpnt=${1:-$MOUNT} + local uuid=$(get_client_uuid $mntpnt) + + do_facet ost1 \ + "$LCTL set_param -n obdfilter.${ost1_svc}.evict_client $uuid" } fail() { - facet_failover $* || error "failover: $?" - clients_up || error "post-failover df: $?" + local facets=$1 + local clients=${CLIENTS:-$HOSTNAME} + + facet_failover $* || error "failover: $?" + wait_clients_import_state "$clients" "$facets" FULL + clients_up || error "post-failover df: $?" } fail_nodf() { @@ -2139,7 +2452,6 @@ fail_nodf() { fail_abort() { local facet=$1 stop $facet - refresh_disk ${facet} change_active $facet wait_for_facet $facet mount_facet $facet -o abort_recovery @@ -2165,42 +2477,43 @@ host_nids_address() { } h2name_or_ip() { - if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else - echo $1"@$2" - fi + if [ "$1" = "'*'" ]; then echo \'*\'; else + echo $1"@$2" + fi } h2ptl() { - if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else - ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | awk '{print $1}'` - if [ -z "$ID" ]; then - echo "Could not get a ptl id for $1..." - exit 1 - fi - echo $ID"@ptl" - fi + if [ "$1" = "'*'" ]; then echo \'*\'; else + ID=`xtprocadmin -n $1 2>/dev/null | egrep -v 'NID' | \ + awk '{print $1}'` + if [ -z "$ID" ]; then + echo "Could not get a ptl id for $1..." + exit 1 + fi + echo $ID"@ptl" + fi } declare -fx h2ptl h2tcp() { - h2name_or_ip "$1" "tcp" + h2name_or_ip "$1" "tcp" } declare -fx h2tcp h2elan() { - if [ "$1" = "client" -o "$1" = "'*'" ]; then echo \'*\'; else - if type __h2elan >/dev/null 2>&1; then - ID=$(__h2elan $1) - else - ID=`echo $1 | sed 's/[^0-9]*//g'` - fi - echo $ID"@elan" - fi + if [ "$1" = "'*'" ]; then echo \'*\'; else + if type __h2elan >/dev/null 2>&1; then + ID=$(__h2elan $1) + else + ID=`echo $1 | sed 's/[^0-9]*//g'` + fi + echo $ID"@elan" + fi } declare -fx h2elan h2o2ib() { - h2name_or_ip "$1" "o2ib" + h2name_or_ip "$1" "o2ib" } declare -fx h2o2ib @@ -2294,21 +2607,30 @@ hostlist_expand() { } facet_host() { - local facet=$1 + local facet=$1 + local varname - [ "$facet" == client ] && echo -n $HOSTNAME && return - varname=${facet}_HOST - if [ -z "${!varname}" ]; then - if [ "${facet:0:3}" == "ost" ]; then - eval ${facet}_HOST=${ost_HOST} - fi - fi - echo -n ${!varname} + [ "$facet" == client ] && echo -n $HOSTNAME && return + varname=${facet}_HOST + if [ -z "${!varname}" ]; then + if [ "${facet:0:3}" == "ost" ]; then + local fh=${facet%failover}_HOST + eval export ${facet}_HOST=${!fh} + if [ -z "${!varname}" ]; then + eval export ${facet}_HOST=${ost_HOST} + fi + elif [ "${facet:0:3}" == "mdt" -o \ + "${facet:0:3}" == "mds" -o \ + "${facet:0:3}" == "mgs" ]; then + eval export ${facet}_HOST=${mds_HOST} + fi + fi + echo -n ${!varname} } facet_failover_host() { local facet=$1 - local var + local varname var=${facet}failover_HOST if [ -n "${!var}" ]; then @@ -2316,12 +2638,18 @@ facet_failover_host() { return fi + if [ "${facet:0:3}" == "mdt" -o "${facet:0:3}" == "mds" -o \ + "${facet:0:3}" == "mgs" ]; then + + eval export ${facet}failover_host=${mds_HOST} + echo ${mds_HOST} + return + fi + if [[ $facet == ost* ]]; then - var=ostfailover_HOST - if [ -n "${!var}" ]; then - echo ${!var} - return - fi + eval export ${facet}failover_host=${ost_HOST} + echo ${ost_HOST} + return fi } @@ -2351,6 +2679,24 @@ facet_active_host() { fi } +# Get the passive failover partner host of facet. +facet_passive_host() { + local facet=$1 + [[ $facet = client ]] && return + + local host=${facet}_HOST + local failover_host=${facet}failover_HOST + local active_host=$(facet_active_host $facet) + + [[ -z ${!failover_host} || ${!failover_host} = ${!host} ]] && return + + if [[ $active_host = ${!host} ]]; then + echo -n ${!failover_host} + else + echo -n ${!host} + fi +} + change_active() { local facetlist=$1 local facet @@ -2444,8 +2790,6 @@ get_env_vars() { echo -n " ${var}=\"$value\"" done - echo -n " USE_OFD=$USE_OFD" - for facet in ${facets//,/ }; do var=${facet}_FSTYPE if [ -n "${!var}" ]; then @@ -2538,18 +2882,32 @@ do_nodesv() { } add() { - local facet=$1 - shift - # make sure its not already running - stop ${facet} -f - rm -f $TMP/${facet}active - [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive - do_facet ${facet} $MKFS $* + local facet=$1 + shift + # make sure its not already running + stop ${facet} -f + rm -f $TMP/${facet}active + [[ $facet = mds1 ]] && combined_mgs_mds && rm -f $TMP/mgsactive + do_facet ${facet} $MKFS $* || return ${PIPESTATUS[0]} + + if [[ $(facet_fstype $facet) == zfs ]]; then + # + # After formatting a ZFS target, "cachefile=none" property will + # be set on the ZFS storage pool so that the pool is not + # automatically imported on system startup. And then the pool + # will be exported so as to leave the importing and exporting + # operations handled by mount_facet() and stop() separately. + # + refresh_partition_table $facet $(facet_vdevice $facet) + disable_zpool_cache $facet + export_zpool $facet + fi } +# Device formatted as ost ostdevname() { - num=$1 - DEVNAME=OSTDEV$num + local num=$1 + local DEVNAME=OSTDEV$num local fstype=$(facet_fstype ost$num) @@ -2558,8 +2916,9 @@ ostdevname() { #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num eval DEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};; zfs ) - #dataset name is independent of vdev device names - eval DEVPTR=${FSNAME}-ost${num}/ost${num};; + #try $OSTZFSDEVn - independent of vdev + DEVNAME=OSTZFSDEV$num + eval DEVPTR=${!DEVNAME:=${FSNAME}-ost${num}/ost${num}};; * ) error "unknown fstype!";; esac @@ -2567,9 +2926,11 @@ ostdevname() { echo -n $DEVPTR } +# Physical device location of data ostvdevname() { - num=$1 - DEVNAME=OSTDEV$num + local num=$1 + local DEVNAME + local VDEVPTR local fstype=$(facet_fstype ost$num) @@ -2578,7 +2939,9 @@ ostvdevname() { # vdevs are not supported by ldiskfs eval VDEVPTR="";; zfs ) - #if $OSTDEVn isn't defined, default is $OSTDEVBASE + num + #if $OSTDEVn isn't defined, default is $OSTDEVBASE{n} + # Device formated by zfs + DEVNAME=OSTDEV$num eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};; * ) error "unknown fstype!";; @@ -2587,19 +2950,21 @@ ostvdevname() { echo -n $VDEVPTR } +# Logical device formated for lustre mdsdevname() { - num=$1 - DEVNAME=MDSDEV$num + local num=$1 + local DEVNAME=MDSDEV$num local fstype=$(facet_fstype mds$num) case $fstype in ldiskfs ) - #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num + #if $MDSDEVn isn't defined, default is $MDSDEVBASE{n} eval DEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};; zfs ) - #dataset name is independent of vdev device names - eval DEVPTR=${FSNAME}-mdt${num}/mdt${num};; + # try $MDSZFSDEVn - independent of vdev + DEVNAME=MDSZFSDEV$num + eval DEVPTR=${!DEVNAME:=${FSNAME}-mdt${num}/mdt${num}};; * ) error "unknown fstype!";; esac @@ -2607,10 +2972,10 @@ mdsdevname() { echo -n $DEVPTR } +# Physical location of data mdsvdevname() { - num=$1 - DEVNAME=MDSDEV$num - + local VDEVPTR="" + local num=$1 local fstype=$(facet_fstype mds$num) case $fstype in @@ -2618,7 +2983,9 @@ mdsvdevname() { # vdevs are not supported by ldiskfs eval VDEVPTR="";; zfs ) - #if $MDSDEVn isn't defined, default is $MDSDEVBASE + num + # if $MDSDEVn isn't defined, default is $MDSDEVBASE{n} + # Device formated by ZFS + local DEVNAME=MDSDEV$num eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};; * ) error "unknown fstype!";; @@ -2628,38 +2995,51 @@ mdsvdevname() { } mgsdevname() { - DEVNAME=MGSDEV - - local fstype=$(facet_fstype mds$num) + local DEVPTR + local fstype=$(facet_fstype mgs) case $fstype in - ldiskfs ) - #if $MGSDEV isn't defined, default is $MDSDEV1 - eval DEVPTR=${!DEVNAME:=${MDSDEV1}};; - zfs ) - #dataset name is independent of vdev device names - eval DEVPTR=${FSNAME}-mgs/mgs;; - * ) - error "unknown fstype!";; + ldiskfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSDEV" ] || [ $MGSDEV = $(mdsdevname 1) ] ); then + DEVPTR=$(mdsdevname 1) + else + DEVPTR=$MGSDEV + fi;; + zfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSZFSDEV" ] && + [ -z "$MGSDEV" -o "$MGSDEV" = $(mdsvdevname 1) ] ); then + DEVPTR=$(mdsdevname 1) + else + DEVPTR=${MGSZFSDEV:-${FSNAME}-mgs/mgs} + fi;; + * ) + error "unknown fstype!";; esac - echo -n $DEVPTR + echo -n $DEVPTR } mgsvdevname() { - DEVNAME=MGSDEV + local VDEVPTR="" - local fstype=$(facet_fstype mds$num) + local fstype=$(facet_fstype mgs) case $fstype in - ldiskfs ) - # vdevs are not supported by ldiskfs - eval VDEVPTR="";; - zfs ) - #if $MGSDEV isn't defined, default is $MGSDEV1 - eval VDEVPTR=${!DEVNAME:=${MDSDEV1}};; - * ) - error "unknown fstype!";; + ldiskfs ) + # vdevs are not supported by ldiskfs + ;; + zfs ) + if [ $(facet_host mgs) = $(facet_host mds1) ] && + ( [ -z "$MGSDEV" ] && + [ -z "$MGSZFSDEV" -o "$MGSZFSDEV" = $(mdsdevname 1) ]); then + VDEVPTR=$(mdsvdevname 1) + elif [ -n "$MGSDEV" ]; then + VDEVPTR=$MGSDEV + fi;; + * ) + error "unknown fstype!";; esac echo -n $VDEVPTR @@ -2675,6 +3055,84 @@ facet_mntpt () { echo -n $mntpt } +mount_ldiskfs() { + local facet=$1 + local dev=$(facet_device $facet) + local mnt=$(facet_mntpt $facet) + local opts + + if ! do_facet $facet test -b $dev; then + opts="-o loop" + fi + do_facet $facet mount -t ldiskfs $opts $dev $mnt +} + +unmount_ldiskfs() { + local facet=$1 + local dev=$(facet_device $facet) + local mnt=$(facet_mntpt $facet) + + do_facet $facet umount -d $mnt +} + +var_name() { + echo -n "$1" | tr -c '[:alnum:]\n' '_' +} + +mount_zfs() { + local facet=$1 + local ds=$(facet_device $facet) + local mnt=$(facet_mntpt $facet) + local canmnt + local mntpt + + import_zpool $facet + canmnt=$(do_facet $facet $ZFS get -H -o value canmount $ds) + mntpt=$(do_facet $facet $ZFS get -H -o value mountpoint $ds) + do_facet $facet $ZFS set canmount=noauto $ds + # + # The "legacy" mount method is used here because "zfs unmount $mnt" + # calls stat(2) on $mnt/../*, which may include $MOUNT. If certain + # targets are not available at the time, the stat(2) on $MOUNT will + # hang. + # + do_facet $facet $ZFS set mountpoint=legacy $ds + do_facet $facet mount -t zfs $ds $mnt + eval export mz_$(var_name ${facet}_$ds)_canmount=$canmnt + eval export mz_$(var_name ${facet}_$ds)_mountpoint=$mntpt +} + +unmount_zfs() { + local facet=$1 + local ds=$(facet_device $facet) + local mnt=$(facet_mntpt $facet) + local var_mntpt=mz_$(var_name ${facet}_$ds)_mountpoint + local var_canmnt=mz_$(var_name ${facet}_$ds)_canmount + local mntpt=${!var_mntpt} + local canmnt=${!var_canmnt} + + unset $var_mntpt + unset $var_canmnt + do_facet $facet umount $mnt + do_facet $facet $ZFS set mountpoint=$mntpt $ds + do_facet $facet $ZFS set canmount=$canmnt $ds + export_zpool $facet +} + +mount_fstype() { + local facet=$1 + local fstype=$(facet_fstype $facet) + + mount_$fstype $facet +} + +unmount_fstype() { + local facet=$1 + local fstype=$(facet_fstype $facet) + + unmount_$fstype $facet +} + ######## ## MountConf setup @@ -2734,7 +3192,8 @@ cleanupall() { } combined_mgs_mds () { - [[ $MDSDEV1 = $MGSDEV ]] && [[ $mds1_HOST = $mgs_HOST ]] + [[ "$(mdsdevname 1)" = "$(mgsdevname)" ]] && + [[ "$(facet_host mds1)" = "$(facet_host mgs)" ]] } lower() { @@ -2747,9 +3206,12 @@ upper() { mkfs_opts() { local facet=$1 + local dev=$2 + local fsname=${3:-"$FSNAME"} local type=$(facet_type $facet) local index=$(($(facet_number $facet) - 1)) local fstype=$(facet_fstype $facet) + local host=$(facet_host $facet) local opts local fs_mkfs_opts local var @@ -2758,14 +3220,17 @@ mkfs_opts() { return 1 fi - if [ $type == MGS ] || ( [ $type == MDS ] && combined_mgs_mds ); then + if [ $type == MGS ] || ( [ $type == MDS ] && + [ "$dev" == $(mgsdevname) ] && + [ "$host" == "$(facet_host mgs)" ] ); then opts="--mgs" else opts="--mgsnode=$MGSNID" fi if [ $type != MGS ]; then - opts+=" --fsname=$FSNAME --$(lower ${type/MDS/MDT}) --index=$index" + opts+=" --fsname=$fsname --$(lower ${type/MDS/MDT}) \ + --index=$index" fi var=${facet}failover_HOST @@ -2785,6 +3250,9 @@ mkfs_opts() { if [ $fstype == ldiskfs ]; then fs_mkfs_opts+=${MDSJOURNALSIZE:+" -J size=$MDSJOURNALSIZE"} + if [ ! -z $EJOURNAL ]; then + fs_mkfs_opts+=${MDSJOURNALSIZE:+" device=$EJOURNAL"} + fi fs_mkfs_opts+=${MDSISIZE:+" -i $MDSISIZE"} fi fi @@ -2835,23 +3303,24 @@ formatall() { echo Formatting mgs, mds, osts if ! combined_mgs_mds ; then echo "Format mgs: $(mgsdevname)" - add mgs $(mkfs_opts mgs) --reformat $(mgsdevname) \ - $(mgsvdevname) ${quiet:+>/dev/null} || exit 10 - fi + add mgs $(mkfs_opts mgs $(mgsdevname)) --reformat \ + $(mgsdevname) $(mgsvdevname) ${quiet:+>/dev/null} || + exit 10 + fi - for num in `seq $MDSCOUNT`; do - echo "Format mds$num: $(mdsdevname $num)" - add mds$num $(mkfs_opts mds$num) --reformat \ - $(mdsdevname $num) $(mdsvdevname $num) \ + for num in $(seq $MDSCOUNT); do + echo "Format mds$num: $(mdsdevname $num)" + add mds$num $(mkfs_opts mds$num $(mdsdevname ${num})) \ + --reformat $(mdsdevname $num) $(mdsvdevname $num) \ ${quiet:+>/dev/null} || exit 10 - done + done - for num in `seq $OSTCOUNT`; do - echo "Format ost$num: $(ostdevname $num)" - add ost$num $(mkfs_opts ost$num) --reformat \ - $(ostdevname $num) $(ostvdevname ${num}) \ + for num in $(seq $OSTCOUNT); do + echo "Format ost$num: $(ostdevname $num)" + add ost$num $(mkfs_opts ost$num $(ostdevname ${num})) \ + --reformat $(ostdevname $num) $(ostvdevname ${num}) \ ${quiet:+>/dev/null} || exit 10 - done + done } mount_client() { @@ -2899,23 +3368,31 @@ remount_client() zconf_mount `hostname` $1 || error "mount failed" } -writeconf_facet () { - local facet=$1 - local dev=$2 +writeconf_facet() { + local facet=$1 + local dev=$2 - do_facet $facet "$TUNEFS --writeconf $dev" + stop ${facet} -f + rm -f $TMP/${facet}active + do_facet ${facet} "$TUNEFS --quiet --writeconf $dev" || return 1 + return 0 } writeconf_all () { - for num in `seq $MDSCOUNT`; do - DEVNAME=$(mdsdevname $num) - writeconf_facet mds$num $DEVNAME - done + local mdt_count=${1:-$MDSCOUNT} + local ost_count=${2:-$OSTCOUNT} + local rc=0 - for num in `seq $OSTCOUNT`; do - DEVNAME=$(ostdevname $num) - writeconf_facet ost$num $DEVNAME - done + for num in $(seq $mdt_count); do + DEVNAME=$(mdsdevname $num) + writeconf_facet mds$num $DEVNAME || rc=$? + done + + for num in $(seq $ost_count); do + DEVNAME=$(ostdevname $num) + writeconf_facet ost$num $DEVNAME || rc=$? + done + return $rc } setupall() { @@ -3044,9 +3521,14 @@ init_facet_vars () { local varname=${facet}failover_HOST if [ -z "${!varname}" ]; then - eval $varname=$(facet_host $facet) + eval export $varname=$(facet_host $facet) fi + varname=${facet}_HOST + if [ -z "${!varname}" ]; then + eval export $varname=$(facet_host $facet) + fi + # ${facet}failover_dev is set in cfg file varname=${facet}failover_dev if [ -n "${!varname}" ] ; then @@ -3068,23 +3550,23 @@ init_facet_vars () { } init_facets_vars () { - local DEVNAME + local DEVNAME - if ! remote_mds_nodsh; then - for num in `seq $MDSCOUNT`; do - DEVNAME=`mdsdevname $num` - init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS - done - fi + if ! remote_mds_nodsh; then + for num in $(seq $MDSCOUNT); do + DEVNAME=`mdsdevname $num` + init_facet_vars mds$num $DEVNAME $MDS_MOUNT_OPTS + done + fi combined_mgs_mds || init_facet_vars mgs $(mgsdevname) $MGS_MOUNT_OPTS - remote_ost_nodsh && return - - for num in `seq $OSTCOUNT`; do - DEVNAME=`ostdevname $num` - init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS - done + if ! remote_ost_nodsh; then + for num in $(seq $OSTCOUNT); do + DEVNAME=$(ostdevname $num) + init_facet_vars ost$num $DEVNAME $OST_MOUNT_OPTS + done + fi } osc_ensure_active () { @@ -3134,26 +3616,20 @@ init_param_vars () { osc_ensure_active $SINGLEMDS $TIMEOUT osc_ensure_active client $TIMEOUT - local jobid_var - if [ -z "$(lctl get_param -n mdc.*.connect_flags | grep jobstats)" ]; then - jobid_var="none" - elif [ $JOBSTATS_AUTO -ne 0 ]; then - echo "enable jobstats, set job scheduler as $JOBID_VAR" - jobid_var=$JOBID_VAR - else - jobid_var=`$LCTL get_param -n jobid_var` - if [ $jobid_var != "disable" ]; then - echo "disable jobstats as required" - jobid_var="disable" - else - jobid_var="none" - fi - fi + if [ -n "$(lctl get_param -n mdc.*.connect_flags|grep jobstats)" ]; then + local current_jobid_var=$($LCTL get_param -n jobid_var) - if [ $jobid_var == $JOBID_VAR -o $jobid_var == "disable" ]; then - do_facet mgs $LCTL conf_param $FSNAME.sys.jobid_var=$jobid_var - wait_update $HOSTNAME "$LCTL get_param -n jobid_var" \ - $jobid_var || return 1 + if [ $JOBID_VAR = "existing" ]; then + echo "keeping jobstats as $current_jobid_var" + elif [ $current_jobid_var != $JOBID_VAR ]; then + echo "seting jobstats to $JOBID_VAR" + + set_conf_param_and_check client \ + "$LCTL get_param -n jobid_var" \ + "$FSNAME.sys.jobid_var" $JOBID_VAR + fi + else + echo "jobstats not supported by server" fi if [ $QUOTA_AUTO -ne 0 ]; then @@ -3257,17 +3733,24 @@ is_mounted () { } is_empty_dir() { - [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0 - return 1 + [ $(find $1 -maxdepth 1 -print | wc -l) = 1 ] && return 0 + return 1 } # empty lustre filesystem may have empty directories lost+found and .lustre is_empty_fs() { - [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \ - -print | wc -l) = 1 ] || return 1 - [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found && return 0 - [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre && return 0 - return 1 + # exclude .lustre & lost+found + [ $(find $1 -maxdepth 1 -name lost+found -o -name .lustre -prune -o \ + -print | wc -l) = 1 ] || return 1 + [ ! -d $1/lost+found ] || is_empty_dir $1/lost+found || return 1 + if [ $(lustre_version_code $SINGLEMDS) -gt $(version_code 2.4.0) ]; then + # exclude .lustre/fid (LU-2780) + [ $(find $1/.lustre -maxdepth 1 -name fid -prune -o \ + -print | wc -l) = 1 ] || return 1 + else + [ ! -d $1/.lustre ] || is_empty_dir $1/.lustre || return 1 + fi + return 0 } check_and_setup_lustre() { @@ -3328,6 +3811,21 @@ check_and_setup_lustre() { set_default_debug_nodes $(comma_list $(nodes_list)) fi + if [ $(lower $OSD_TRACK_DECLARES_LBUG) == 'yes' ] ; then + local facets="" + [ "$(facet_fstype ost1)" = "ldiskfs" ] && + facets="$(get_facets OST)" + [ "$(facet_fstype mds1)" = "ldiskfs" ] && + facets="$facets,$(get_facets MDS)" + [ "$(facet_fstype mgs)" = "ldiskfs" ] && + facets="$facets,mgs" + local nodes="$(facets_hosts ${facets})" + if [ -n "$nodes" ] ; then + do_nodes $nodes "$LCTL set_param \ + osd-ldiskfs.track_declares_assert=1 || true" + fi + fi + init_gss if $GSS; then set_flavor_all $SEC @@ -3371,7 +3869,7 @@ get_mnt_devs() { local dev if [ "$type" == ost ]; then - devs=$(get_obdfilter_param $node "" mntdev) + devs=$(get_osd_param $node "" mntdev) else devs=$(do_node $node \ "lctl get_param -n osd-*.$FSNAME-M*.mntdev") @@ -3407,7 +3905,7 @@ run_e2fsck() { local extra_opts=$3 df > /dev/null # update statfs data on disk - local cmd="$E2FSCK -d -v -t -t -f -n $extra_opts $target_dev" + local cmd="$E2FSCK -d -v -t -t -f $extra_opts $target_dev" echo $cmd local rc=0 do_node $node $cmd || rc=$? @@ -3416,13 +3914,27 @@ run_e2fsck() { return 0 } +# +# Run resize2fs on MDT or OST device. +# +run_resize2fs() { + local facet=$1 + local device=$2 + local size=$3 + shift 3 + local opts="$@" + + do_facet $facet "$RESIZE2FS $opts $device $size" +} + # verify a directory is shared among nodes. check_shared_dir() { local dir=$1 + local list=${2:-$(comma_list $(nodes_list))} [ -z "$dir" ] && return 1 - do_rpc_nodes "$(comma_list $(nodes_list))" check_logdir $dir - check_write_access $dir || return 1 + do_rpc_nodes "$list" check_logdir $dir + check_write_access $dir "$list" || return 1 return 0 } @@ -3443,14 +3955,14 @@ generate_db() { [ $MDSCOUNT -eq 1 ] || error "CMD is not supported" - run_e2fsck $(mdts_nodes) $MDTDEV "--mdsdb $MDSDB" + run_e2fsck $(mdts_nodes) $MDTDEV "-n --mdsdb $MDSDB" i=0 ostidx=0 OSTDB_LIST="" for node in $(osts_nodes); do for dev in ${OSTDEVS[i]}; do - run_e2fsck $node $dev "--mdsdb $MDSDB --ostdb $OSTDB-$ostidx" + run_e2fsck $node $dev "-n --mdsdb $MDSDB --ostdb $OSTDB-$ostidx" OSTDB_LIST="$OSTDB_LIST $OSTDB-$ostidx" ostidx=$((ostidx + 1)) done @@ -3458,17 +3970,55 @@ generate_db() { done } +# Run lfsck on server node if lfsck can't be found on client (LU-2571) +run_lfsck_remote() { + local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT" + local client=$1 + local mounted=true + local rc=0 + + #Check if lustre is already mounted + do_rpc_nodes $client is_mounted $MOUNT || mounted=false + if ! $mounted; then + zconf_mount $client $MOUNT || + error "failed to mount Lustre on $client" + fi + #Run lfsck + echo $cmd + do_node $node $cmd || rc=$? + #Umount if necessary + if ! $mounted; then + zconf_umount $client $MOUNT || + error "failed to unmount Lustre on $client" + fi + + [ $rc -le $FSCK_MAX_ERR ] || + error "$cmd returned $rc, should be <= $FSCK_MAX_ERR" + echo "lfsck finished with rc=$rc" + + return $rc +} + run_lfsck() { - local cmd="$LFSCK_BIN -c -l --mdsdb $MDSDB --ostdb $OSTDB_LIST $MOUNT" - echo $cmd - local rc=0 - eval $cmd || rc=$? - [ $rc -le $FSCK_MAX_ERR ] || \ - error "$cmd returned $rc, should be <= $FSCK_MAX_ERR" - echo "lfsck finished with rc=$rc" + local facets="client $SINGLEMDS" + local found=false + local facet + local node + local rc=0 - rm -rvf $MDSDB* $OSTDB* || true - return 0 + for facet in $facets; do + node=$(facet_active_host $facet) + if check_progs_installed $node $LFSCK_BIN; then + found=true + break + fi + done + ! $found && error "None of \"$facets\" supports lfsck" + + run_lfsck_remote $node || rc=$? + + rm -rvf $MDSDB* $OSTDB* || true + return $rc } check_and_cleanup_lustre() { @@ -3619,7 +4169,7 @@ get_facets () { case $type in MGS ) list="$list $name";; - MDS|OST ) local count=${type}COUNT + MDS|OST|AGT ) local count=${type}COUNT for ((i=1; i<=${!count}; i++)) do list="$list ${name}$i" done;; @@ -3709,14 +4259,29 @@ drop_reint_reply() { return $RC } +drop_update_reply() { +# OBD_FAIL_UPDATE_OBJ_NET_REP + local index=$1 + shift 1 + RC=0 + do_facet mds${index} lctl set_param fail_loc=0x1701 + do_facet client "$@" || RC=$? + do_facet mds${index} lctl set_param fail_loc=0 + return $RC +} + pause_bulk() { #define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214 - RC=0 - do_facet ost1 lctl set_param fail_loc=0x214 - do_facet client "$1" || RC=$? - do_facet client "sync" - do_facet ost1 lctl set_param fail_loc=0 - return $RC + RC=0 + + local timeout=${2:-0} + # default is (obd_timeout / 4) if unspecified + echo "timeout is $timeout/$2" + do_facet ost1 lctl set_param fail_val=$timeout fail_loc=0x80000214 + do_facet client "$1" || RC=$? + do_facet client "sync" + do_facet ost1 lctl set_param fail_loc=0 + return $RC } drop_ldlm_cancel() { @@ -3733,11 +4298,11 @@ drop_ldlm_cancel() { drop_bl_callback() { #define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305 - RC=0 - do_facet client lctl set_param fail_loc=0x305 - do_facet client "$@" || RC=$? - do_facet client lctl set_param fail_loc=0 - return $RC + RC=0 + do_facet client lctl set_param fail_loc=0x80000305 + do_facet client "$@" || RC=$? + do_facet client lctl set_param fail_loc=0 + return $RC } drop_ldlm_reply() { @@ -3758,7 +4323,7 @@ clear_failloc() { } set_nodes_failloc () { - do_nodes $(comma_list $1) lctl set_param fail_loc=$2 + do_nodes $(comma_list $1) lctl set_param fail_val=0 fail_loc=$2 } cancel_lru_locks() { @@ -3851,28 +4416,37 @@ log_trace_dump() { ################################## error_noexit() { - local TYPE=${TYPE:-"FAIL"} + local TYPE=${TYPE:-"FAIL"} - local dump=true - # do not dump logs if $1=false - if [ "x$1" = "xfalse" ]; then - shift - dump=false - fi + local dump=true + # do not dump logs if $1=false + if [ "x$1" = "xfalse" ]; then + shift + dump=false + fi - log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " - log_trace_dump - mkdir -p $LOGDIR - # We need to dump the logs on all nodes - if $dump; then - gather_logs $(comma_list $(nodes_list)) - fi + log " ${TESTSUITE} ${TESTNAME}: @@@@@@ ${TYPE}: $@ " + log_trace_dump + + mkdir -p $LOGDIR + # We need to dump the logs on all nodes + if $dump; then + gather_logs $(comma_list $(nodes_list)) + fi debugrestore [ "$TESTSUITELOG" ] && echo "$TESTSUITE: $TYPE: $TESTNAME $@" >> $TESTSUITELOG - echo "$@" > $LOGDIR/err + if [ -z "$*" ]; then + echo "error() without useful message, please fix" > $LOGDIR/err + else + if [[ `echo $TYPE | grep ^IGNORE` ]]; then + echo "$@" > $LOGDIR/ignore + else + echo "$@" > $LOGDIR/err + fi + fi } exit_status () { @@ -3884,21 +4458,22 @@ exit_status () { } error() { - error_noexit "$@" - exit 1 + error_noexit "$@" + exit 1 } error_exit() { - error "$@" + error "$@" } # use only if we are ignoring failures for this test, bugno required. # (like ALWAYS_EXCEPT, but run the test and ignore the results.) -# e.g. error_ignore 5494 "your message" +# e.g. error_ignore bz5494 "your message" or +# error_ignore LU-5494 "your message" error_ignore() { - local TYPE="IGNORE (bz$1)" - shift - error_noexit "$@" + local TYPE="IGNORE ($1)" + shift + error_noexit "$@" } error_and_remount() { @@ -3908,7 +4483,7 @@ error_and_remount() { } skip_env () { - $FAIL_ON_SKIP_ENV && error false $@ || skip $@ + $FAIL_ON_SKIP_ENV && error false $@ || skip $@ } skip() { @@ -3988,7 +4563,6 @@ run_test() { return $? fi LAST_SKIPPED="y" - echo -n "." return 0 fi @@ -4109,31 +4683,35 @@ banner() { } # -# Run a single test function and cleanup after it. +# Run a single test function and cleanup after it. # # This function should be run in a subshell so the test func can # exit() without stopping the whole script. # run_one() { - local testnum=$1 - local message=$2 - tfile=f.${TESTSUITE}.${testnum} - export tdir=d0.${TESTSUITE}/d${base} - export TESTNAME=test_$testnum - local SAVE_UMASK=`umask` - umask 0022 - - banner "test $testnum: $message" - test_${testnum} || error "test_$testnum failed with $?" - cd $SAVE_PWD - reset_fail_loc - check_grant ${testnum} || error "check_grant $testnum failed with $?" - check_catastrophe || error "LBUG/LASSERT detected" - ps auxww | grep -v grep | grep -q multiop && error "multiop still running" - unset TESTNAME - unset tdir - umask $SAVE_UMASK - return 0 + local testnum=$1 + local message=$2 + export tfile=f${testnum}.${TESTSUITE} + export tdir=d${testnum}.${TESTSUITE} + export TESTNAME=test_$testnum + local SAVE_UMASK=`umask` + umask 0022 + + banner "test $testnum: $message" + test_${testnum} || error "test_$testnum failed with $?" + cd $SAVE_PWD + reset_fail_loc + check_grant ${testnum} || error "check_grant $testnum failed with $?" + check_catastrophe || error "LBUG/LASSERT detected" + if [ "$PARALLEL" != "yes" ]; then + ps auxww | grep -v grep | grep -q multiop && + error "multiop still running" + fi + unset TESTNAME + unset tdir + unset tfile + umask $SAVE_UMASK + return 0 } # @@ -4148,6 +4726,7 @@ run_one_logged() { local name=${TESTSUITE}.test_${1}.test_log.$(hostname -s).log local test_log=$LOGDIR/$name rm -rf $LOGDIR/err + rm -rf $LOGDIR/ignore rm -rf $LOGDIR/skip local SAVE_UMASK=`umask` umask 0022 @@ -4157,7 +4736,7 @@ run_one_logged() { (run_one $1 "$2") 2>&1 | tee -i $test_log local RC=${PIPESTATUS[0]} - [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && \ + [ $RC -ne 0 ] && [ ! -f $LOGDIR/err ] && echo "test_$1 returned $RC" | tee $LOGDIR/err duration=$((`date +%s` - $BEFORE)) @@ -4165,11 +4744,17 @@ run_one_logged() { if [[ -f $LOGDIR/err ]]; then TEST_ERROR=$(cat $LOGDIR/err) + elif [[ -f $LOGDIR/ignore ]]; then + TEST_ERROR=$(cat $LOGDIR/ignore) elif [[ -f $LOGDIR/skip ]]; then TEST_ERROR=$(cat $LOGDIR/skip) fi log_sub_test_end $TEST_STATUS $duration "$RC" "$TEST_ERROR" + if [[ "$TEST_STATUS" != "SKIP" ]] && [[ -f $TF_SKIP ]]; then + rm -f $TF_SKIP + fi + if [ -f $LOGDIR/err ]; then $FAIL_ON_ERROR && exit $RC fi @@ -4264,9 +4849,45 @@ mdtuuid_from_index() $LFS mdts $2 | sed -ne "/^$1: /s/.* \(.*\) .*$/\1/p" } +# Description: +# Return unique identifier for given hostname +host_id() { + local host_name=$1 + echo $host_name | md5sum | cut -d' ' -f1 +} + +# Description: +# Returns list of ip addresses for each interface +local_addr_list() { + ip addr | awk '/inet\ / {print $2}' | awk -F\/ '{print $1}' +} + +is_local_addr() { + local addr=$1 + # Cache address list to avoid mutiple execution of local_addr_list + LOCAL_ADDR_LIST=${LOCAL_ADDR_LIST:-$(local_addr_list)} + local i + for i in $LOCAL_ADDR_LIST ; do + [[ "$i" == "$addr" ]] && return 0 + done + return 1 +} + +local_node() { + local host_name=$1 + local is_local="IS_LOCAL_$(host_id $host_name)" + if [ -z "${!is_local-}" ] ; then + eval $is_local=0 + local host_ip=$($LUSTRE/tests/resolveip $host_name) + is_local_addr "$host_ip" && eval $is_local=1 + fi + [[ "${!is_local}" == "1" ]] +} + remote_node () { - local node=$1 - [ "$node" != "$(hostname)" ] + local node=$1 + local_node $node && return 1 + return 0 } remote_mds () @@ -4326,64 +4947,126 @@ local_mode () $(single_local_node $(comma_list $(nodes_list))) } -mdts_nodes () { - local MDSNODES - local NODES_sort - for num in `seq $MDSCOUNT`; do - MDSNODES="$MDSNODES $(facet_host mds$num)" - done - NODES_sort=$(for i in $MDSNODES; do echo $i; done | sort -u) - - echo $NODES_sort -} - remote_servers () { remote_ost && remote_mds } +# Get the active nodes for facets. facets_nodes () { - local facets=$1 - local nodes - local NODES_sort + local facets=$1 + local facet + local nodes + local nodes_sort + local i - for facet in ${facets//,/ }; do - if [ "$FAILURE_MODE" = HARD ]; then - nodes="$nodes $(facet_active_host $facet)" - else - nodes="$nodes $(facet_host $facet)" - fi - done - NODES_sort=$(for i in $nodes; do echo $i; done | sort -u) + for facet in ${facets//,/ }; do + nodes="$nodes $(facet_active_host $facet)" + done + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} - echo $NODES_sort +# Get all of the active MDS nodes. +mdts_nodes () { + echo -n $(facets_nodes $(get_facets MDS)) } +# Get all of the active OSS nodes. osts_nodes () { - local facets=$(get_facets OST) - local nodes=$(facets_nodes $facets) + echo -n $(facets_nodes $(get_facets OST)) +} - echo $nodes +# Get all of the active AGT (HSM agent) nodes. +agts_nodes () { + echo -n $(facets_nodes $(get_facets AGT)) } +# Get all of the client nodes and active server nodes. nodes_list () { - # FIXME. We need a list of clients - local myNODES=$HOSTNAME - local myNODES_sort - - # CLIENTS (if specified) contains the local client - [ -n "$CLIENTS" ] && myNODES=${CLIENTS//,/ } + local nodes=$HOSTNAME + local nodes_sort + local i - if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then - myNODES="$myNODES $(facets_nodes $(get_facets))" - fi + # CLIENTS (if specified) contains the local client + [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ } - myNODES_sort=$(for i in $myNODES; do echo $i; done | sort -u) + if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then + nodes="$nodes $(facets_nodes $(get_facets))" + fi - echo $myNODES_sort + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort } +# Get all of the remote client nodes and remote active server nodes. remote_nodes_list () { - echo $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g" + echo -n $(nodes_list) | sed -re "s/\<$HOSTNAME\>//g" +} + +# Get all of the MDS nodes, including active and passive nodes. +all_mdts_nodes () { + local host + local failover_host + local nodes + local nodes_sort + local i + + for i in $(seq $MDSCOUNT); do + host=mds${i}_HOST + failover_host=mds${i}failover_HOST + nodes="$nodes ${!host} ${!failover_host}" + done + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the OSS nodes, including active and passive nodes. +all_osts_nodes () { + local host + local failover_host + local nodes + local nodes_sort + local i + + for i in $(seq $OSTCOUNT); do + host=ost${i}_HOST + failover_host=ost${i}failover_HOST + nodes="$nodes ${!host} ${!failover_host}" + done + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the server nodes, including active and passive nodes. +all_server_nodes () { + local nodes + local nodes_sort + local i + + nodes="$mgs_HOST $mgsfailover_HOST $(all_mdts_nodes) $(all_osts_nodes)" + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort +} + +# Get all of the client and server nodes, including active and passive nodes. +all_nodes () { + local nodes=$HOSTNAME + local nodes_sort + local i + + # CLIENTS (if specified) contains the local client + [ -n "$CLIENTS" ] && nodes=${CLIENTS//,/ } + + if [ "$PDSH" -a "$PDSH" != "no_dsh" ]; then + nodes="$nodes $(all_server_nodes)" + fi + + nodes_sort=$(for i in $nodes; do echo $i; done | sort -u) + echo -n $nodes_sort } init_clients_lists () { @@ -4464,10 +5147,11 @@ generate_machine_file() { } get_stripe () { - local file=$1/stripe - touch $file - $LFS getstripe -v $file || error - rm -f $file + local file=$1/stripe + + touch $file + $LFS getstripe -v $file || error "getstripe $file failed" + rm -f $file } setstripe_nfsserver () { @@ -4684,21 +5368,33 @@ calc_osc_kbytes () { $LCTL get_param -n osc.*[oO][sS][cC][-_][0-9a-f]*.$1 | calc_sum } -# save_lustre_params(node, parameter_mask) -# generate a stream of formatted strings ( =) +# save_lustre_params(comma separated facet list, parameter_mask) +# generate a stream of formatted strings ( =) save_lustre_params() { - local s - do_nodesv $1 "lctl get_param $2 | while read s; do echo \\\$s; done" + local facets=$1 + local facet + local nodes + local node + + for facet in ${facets//,/ }; do + node=$(facet_active_host $facet) + [[ *\ $node\ * = " $nodes " ]] && continue + nodes="$nodes $node" + + do_node $node "$LCTL get_param $2 | + while read s; do echo $facet \\\$s; done" + done } # restore lustre parameters from input stream, produces by save_lustre_params restore_lustre_params() { - local node - local name - local val - while IFS=" =" read node name val; do - do_node ${node//:/} "lctl set_param -n $name $val" - done + local facet + local name + local val + + while IFS=" =" read facet name val; do + do_facet $facet "$LCTL set_param -n $name $val" + done } check_catastrophe() { @@ -4708,9 +5404,17 @@ check_catastrophe() { [ -z "$rnodes" ] && return 0 - do_nodes "$rnodes" "rc=\\\$([ -f $C ] && echo \\\$(< $C) || echo 0); + local data + data=$(do_nodes "$rnodes" "rc=\\\$([ -f $C ] && + echo \\\$(< $C) || echo 0); if [ \\\$rc -ne 0 ]; then echo \\\$(hostname): \\\$rc; fi - exit \\\$rc;" + exit \\\$rc") + local rc=$? + if [ -n "$data" ]; then + echo $data + return $rc + fi + return 0 } # CMD: determine mds index where directory inode presents @@ -4741,10 +5445,11 @@ get_mds_dir () { } mdsrate_cleanup () { - if [ -d $4 ]; then - mpi_run -np $1 -machinefile $2 ${MDSRATE} --unlink --nfiles $3 --dir $4 --filefmt $5 $6 - rmdir $4 - fi + if [ -d $4 ]; then + mpi_run ${MACHINEFILE_OPTION} $2 -np $1 ${MDSRATE} --unlink \ + --nfiles $3 --dir $4 --filefmt $5 $6 + rmdir $4 + fi } delayed_recovery_enabled () { @@ -4754,7 +5459,7 @@ delayed_recovery_enabled () { ######################## -convert_facet2label() { +convert_facet2label() { local facet=$1 if [ x$facet = xost ]; then @@ -4765,13 +5470,13 @@ convert_facet2label() { if [ -n ${!varsvc} ]; then echo ${!varsvc} - else + else error "No lablel for $facet!" fi } get_clientosc_proc_path() { - echo "${1}-osc-[^M]*" + echo "${1}-osc-*" } get_lustre_version () { @@ -4835,7 +5540,7 @@ _wait_import_state () { local CONN_STATE local i=0 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq) while [ "${CONN_STATE}" != "${expected}" ]; do if [ "${expected}" == "DISCONN" ]; then # for disconn we can check after proc entry is removed @@ -4848,7 +5553,8 @@ _wait_import_state () { error "can't put import for $CONN_PROC into ${expected} state after $i sec, have ${CONN_STATE}" && \ return 1 sleep 1 - CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2) + # Add uniq for multi-mount case + CONN_STATE=$($LCTL get_param -n $CONN_PROC 2>/dev/null | cut -f2 | uniq) i=$(($i + 1)) done @@ -4867,6 +5573,14 @@ wait_import_state() { done } +wait_import_state_mount() { + if ! is_mounted $MOUNT && ! is_mounted $MOUNT2; then + return 0 + fi + + wait_import_state $* +} + # One client request could be timed out because server was not ready # when request was sent by client. # The request timeout calculation details : @@ -4907,20 +5621,42 @@ request_timeout () { echo $(( init_connect_timeout + at_min )) } -wait_osc_import_state() { +_wait_osc_import_state() { local facet=$1 local ost_facet=$2 local expected=$3 local ost=$(get_osc_import_name $facet $ost_facet) - local param="osc.${ost}.ost_server_uuid" + local param="osc.${ost}.ost_server_uuid" + local i=0 # 1. wait the deadline of client 1st request (it could be skipped) # 2. wait the deadline of client 2nd request local maxtime=$(( 2 * $(request_timeout $facet))) - if ! do_rpc_nodes "$(facet_host $facet)" \ - _wait_import_state $expected $param $maxtime; then + #During setup time, the osc might not be setup, it need wait + #until list_param can return valid value. And also if there + #are mulitple osc entries we should list all of them before + #go to wait. + local params=$($LCTL list_param $param 2>/dev/null || true) + while [ -z "$params" ]; do + if [ $i -ge $maxtime ]; then + echo "can't get $param by list_param in $maxtime secs" + if [[ $facet != client* ]]; then + echo "Go with $param directly" + params=$param + break + else + return 1 + fi + fi + sleep 1 + i=$((i + 1)) + params=$($LCTL list_param $param 2>/dev/null || true) + done + + if ! do_rpc_nodes "$(facet_active_host $facet)" \ + wait_import_state $expected "$params" $maxtime; then error "import is not in ${expected} state" return 1 fi @@ -4928,6 +5664,21 @@ wait_osc_import_state() { return 0 } +wait_osc_import_state() { + local facet=$1 + local ost_facet=$2 + local expected=$3 + local num + + if [[ $facet = mds ]]; then + for num in $(seq $MDSCOUNT); do + _wait_osc_import_state mds$num "$ost_facet" "$expected" + done + else + _wait_osc_import_state "$facet" "$ost_facet" "$expected" + fi +} + get_clientmdc_proc_path() { echo "${1}-mdc-*" } @@ -4939,7 +5690,9 @@ do_rpc_nodes () { [ -z "$list" ] && return 0 # Add paths to lustre tests for 32 and 64 bit systems. - local RPATH="PATH=$RLUSTRE/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:$PATH" + local LIBPATH="/usr/lib/lustre/tests:/usr/lib64/lustre/tests:" + local TESTPATH="$RLUSTRE/tests:" + local RPATH="PATH=${TESTPATH}${LIBPATH}${PATH}:/sbin:/bin:/usr/sbin:" do_nodesv $list "${RPATH} NAME=${NAME} sh rpc.sh $@ " } @@ -4965,7 +5718,7 @@ wait_clients_import_state () { local params=$(expand_list $params $proc_path) done - if ! do_rpc_nodes "$list" wait_import_state $expected $params; then + if ! do_rpc_nodes "$list" wait_import_state_mount $expected $params; then error "import is not in ${expected} state" return 1 fi @@ -5477,19 +6230,20 @@ check_logdir() { } check_write_access() { - local dir=$1 - local node - local file + local dir=$1 + local list=${2:-$(comma_list $(nodes_list))} + local node + local file - for node in $(nodes_list); do - file=$dir/check_file.$(short_hostname $node) - if [[ ! -f "$file" ]]; then - # Logdir not accessible/writable from this node. - return 1 - fi - rm -f $file || return 1 - done - return 0 + for node in ${list//,/ }; do + file=$dir/check_file.$(short_nodename $node) + if [[ ! -f "$file" ]]; then + # Logdir not accessible/writable from this node. + return 1 + fi + rm -f $file || return 1 + done + return 0 } init_logging() { @@ -5557,16 +6311,44 @@ run_llverfs() { local dir=$1 local llverfs_opts=$2 + local use_partial_arg=$3 local partial_arg="" local size=$(df -B G $dir |tail -n 1 |awk '{print $2}' |sed 's/G//') #GB # Run in partial (fast) mode if the size # of a partition > 1 GB - [ $size -gt 1 ] && partial_arg="-p" + [ "x$use_partial_arg" != "xno" ] && [ $size -gt 1 ] && partial_arg="-p" llverfs $partial_arg $llverfs_opts $dir } +#Remove objects from OST +remove_ost_objects() { + local facet=$1 + local ostdev=$2 + local group=$3 + shift 3 + local objids="$@" + local mntpt=$(facet_mntpt $facet) + local opts=$OST_MOUNT_OPTS + local i + local rc + + echo "removing objects from $ostdev on $facet: $objids" + if ! test -b $ostdev; then + opts=$(csa_add "$opts" -o loop) + fi + mount -t $(facet_fstype $facet) $opts $ostdev $mntpt || + return $? + rc=0 + for i in $objids; do + rm $mntpt/O/$group/d$((i % 32))/$i || { rc=$?; break; } + done + umount -f $mntpt || return $? + return $rc +} + +#Remove files from MDT remove_mdt_files() { local facet=$1 local mdtdev=$2 @@ -5577,12 +6359,12 @@ remove_mdt_files() { echo "removing files from $mdtdev on $facet: $files" if [ $(facet_fstype $facet) == ldiskfs ] && - ! do_facet $facet test -b ${!dev}; then + ! do_facet $facet test -b $mdtdev; then opts=$(csa_add "$opts" -o loop) fi mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || return $? - rc=0; + rc=0 for f in $files; do rm $mntpt/ROOT/$f || { rc=$?; break; } done @@ -5601,7 +6383,7 @@ duplicate_mdt_files() { echo "duplicating files on $mdtdev on $facet: $files" mkdir -p $mntpt || return $? if [ $(facet_fstype $facet) == ldiskfs ] && - ! do_facet $facet test -b ${!dev}; then + ! do_facet $facet test -b $mdtdev; then opts=$(csa_add "$opts" -o loop) fi mount -t $(facet_fstype $facet) $opts $mdtdev $mntpt || @@ -5670,6 +6452,45 @@ min_ost_size () { $LCTL get_param -n osc.*.kbytesavail | sort -n | head -n1 } +# +# Get the available size (KB) of a given obd target. +# +get_obd_size() { + local facet=$1 + local obd=$2 + local size + + [[ $facet != client ]] || return 0 + + size=$(do_facet $facet $LCTL get_param -n *.$obd.kbytesavail | head -n1) + echo -n $size +} + +# +# Get the page size (bytes) on a given facet node. +# +get_page_size() { + local facet=$1 + local size + + size=$(do_facet $facet getconf PAGE_SIZE) + [[ ${PIPESTATUS[0]} = 0 && -n "$size" ]] || size=4096 + echo -n $size +} + +# +# Get the block count of the filesystem. +# +get_block_count() { + local facet=$1 + local device=$2 + local count + + count=$(do_facet $facet "$DUMPE2FS -h $device 2>&1" | + awk '/^Block count:/ {print $3}') + echo -n $count +} + # Get the block size of the filesystem. get_block_size() { local facet=$1 @@ -5683,10 +6504,13 @@ get_block_size() { # Check whether the "large_xattr" feature is enabled or not. large_xattr_enabled() { - local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + [[ $(facet_fstype $SINGLEMDS) == zfs ]] && return 0 - do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | grep -q large_xattr" - return ${PIPESTATUS[0]} + local mds_dev=$(mdsdevname ${SINGLEMDS//mds/}) + + do_facet $SINGLEMDS "$DUMPE2FS -h $mds_dev 2>&1 | + grep -E -q '(ea_inode|large_xattr)'" + return ${PIPESTATUS[0]} } # Get the maximum xattr size supported by the filesystem. @@ -5724,28 +6548,33 @@ generate_string() { } reformat_external_journal() { + local facet=$1 + if [ ! -z ${EJOURNAL} ]; then - local rcmd="do_facet ${SINGLEMDS}" + local rcmd="do_facet $facet" - echo "reformat external journal on ${SINGLEMDS}:${EJOURNAL}" + echo "reformat external journal on $facet:${EJOURNAL}" ${rcmd} mke2fs -O journal_dev ${EJOURNAL} || return 1 fi } # MDT file-level backup/restore mds_backup_restore() { - local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local facet=$1 + local igif=$2 + local devname=$(mdsdevname $(facet_number $facet)) local mntpt=$(facet_mntpt brpt) - local rcmd="do_facet ${SINGLEMDS}" + local rcmd="do_facet $facet" local metaea=${TMP}/backup_restore.ea local metadata=${TMP}/backup_restore.tgz local opts=${MDS_MOUNT_OPTS} + local svc=${facet}_svc if ! ${rcmd} test -b ${devname}; then opts=$(csa_add "$opts" -o loop) fi - echo "file-level backup/restore on ${SINGLEMDS}:${devname}" + echo "file-level backup/restore on $facet:${devname}" # step 1: build mount point ${rcmd} mkdir -p $mntpt @@ -5753,6 +6582,10 @@ mds_backup_restore() { ${rcmd} rm -f $metaea $metadata # step 3: mount dev ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 1 + if [ ! -z $igif ]; then + # step 3.5: rm .lustre + ${rcmd} rm -rf $mntpt/ROOT/.lustre || return 1 + fi # step 4: backup metaea echo "backup EA" ${rcmd} "cd $mntpt && getfattr -R -d -m '.*' -P . > $metaea && cd -" || @@ -5763,11 +6596,12 @@ mds_backup_restore() { # step 6: umount ${rcmd} umount -d $mntpt || return 4 # step 7: reformat external journal if needed - reformat_external_journal || return 5 + reformat_external_journal $facet || return 5 # step 8: reformat dev echo "reformat new device" - add ${SINGLEMDS} $(mkfs_opts ${SINGLEMDS}) --backfstype ldiskfs \ - --reformat $devname > /dev/null || return 6 + add $facet $(mkfs_opts $facet ${devname}) --backfstype ldiskfs \ + --reformat ${devname} $(mdsvdevname $(facet_number $facet)) \ + > /dev/null || exit 6 # step 9: mount dev ${rcmd} mount -t ldiskfs $opts $devname $mntpt || return 7 # step 10: restore metadata @@ -5783,21 +6617,24 @@ mds_backup_restore() { ${rcmd} umount -d $mntpt || return 10 # step 14: cleanup tmp backup ${rcmd} rm -f $metaea $metadata + # step 15: reset device label - it's not virgin on + ${rcmd} e2label $devname ${!svc} } # remove OI files mds_remove_ois() { - local devname=$(mdsdevname ${SINGLEMDS//mds/}) + local facet=$1 + local idx=$2 + local devname=$(mdsdevname $(facet_number $facet)) local mntpt=$(facet_mntpt brpt) - local rcmd="do_facet ${SINGLEMDS}" - local idx=$1 + local rcmd="do_facet $facet" local opts=${MDS_MOUNT_OPTS} if ! ${rcmd} test -b ${devname}; then opts=$(csa_add "$opts" -o loop) fi - echo "remove OI files: idx=${idx}" + echo "removing OI files on $facet: idx=${idx}" # step 1: build mount point ${rcmd} mkdir -p $mntpt @@ -5828,3 +6665,62 @@ generate_logname() { echo "$TESTLOG_PREFIX.$TESTNAME.$logname.$(hostname -s).log" } + +# make directory on different MDTs +test_mkdir() { + local option + local parent + local child + local path + local rc=0 + + case $# in + 1) path=$1;; + 2) option=$1 + path=$2;; + *) error "Only creating single directory is supported";; + esac + + child=$(basename $path) + parent=$(dirname $path) + + if [ "$option" == "-p" -a -d $parent/$child ]; then + return $rc + fi + + if [ ! -d ${parent} ]; then + if [ "$option" == "-p" ]; then + mkdir -p ${parent} + else + return 1 + fi + fi + + if [ $MDSCOUNT -le 1 ]; then + mkdir $option $parent/$child || rc=$? + else + local mdt_idx=$($LFS getstripe -M $parent) + local test_num=$(echo $testnum | sed -e 's/[^0-9]*//g') + + if [ "$mdt_idx" -ne 0 ]; then + mkdir $option $parent/$child || rc=$? + else + mdt_idx=$((test_num % MDSCOUNT)) + echo "mkdir $mdt_idx for $parent/$child" + $LFS setdirstripe -i $mdt_idx $parent/$child || rc=$? + fi + fi + return $rc +} + +# find the smallest and not in use file descriptor +free_fd() +{ + local max_fd=$(ulimit -n) + local fd=3 + while [[ $fd -le $max_fd && -e /proc/self/fd/$fd ]]; do + ((++fd)) + done + [ $fd -lt $max_fd ] || error "finding free file descriptor failed" + echo $fd +}